mirror of https://github.com/encounter/aurora.git
Transition back to webgpu_cpp; initial emscripten support
This commit is contained in:
parent
04590f30cd
commit
893cabe55a
|
@ -12,7 +12,6 @@ add_library(aurora STATIC
|
|||
lib/imgui.cpp
|
||||
lib/input.cpp
|
||||
lib/window.cpp
|
||||
lib/dawn/BackendBinding.cpp
|
||||
lib/gfx/common.cpp
|
||||
lib/gfx/texture.cpp
|
||||
lib/gfx/gx.cpp
|
||||
|
@ -50,7 +49,15 @@ if (NOT TARGET SDL2::SDL2-static)
|
|||
find_package(SDL2 REQUIRED)
|
||||
endif ()
|
||||
target_link_libraries(aurora PUBLIC SDL2::SDL2-static fmt::fmt imgui xxhash)
|
||||
target_link_libraries(aurora PRIVATE dawn_native dawncpp webgpu_dawn absl::btree absl::flat_hash_map)
|
||||
if (EMSCRIPTEN)
|
||||
target_link_options(aurora PUBLIC -sUSE_WEBGPU=1 -sASYNCIFY -sEXIT_RUNTIME)
|
||||
target_compile_definitions(aurora PRIVATE ENABLE_BACKEND_WEBGPU)
|
||||
else ()
|
||||
target_link_libraries(aurora PRIVATE dawn_native dawncpp webgpu_dawn)
|
||||
target_sources(aurora PRIVATE lib/dawn/BackendBinding.cpp)
|
||||
target_compile_definitions(aurora PRIVATE WEBGPU_DAWN)
|
||||
endif ()
|
||||
target_link_libraries(aurora PRIVATE absl::btree absl::flat_hash_map)
|
||||
if (DAWN_ENABLE_VULKAN)
|
||||
target_compile_definitions(aurora PRIVATE DAWN_ENABLE_BACKEND_VULKAN)
|
||||
target_sources(aurora PRIVATE lib/dawn/VulkanBinding.cpp)
|
||||
|
|
|
@ -1,32 +1,37 @@
|
|||
if (NOT TARGET dawn_native)
|
||||
if (CMAKE_SYSTEM_NAME STREQUAL Windows)
|
||||
set(DAWN_ENABLE_DESKTOP_GL ON CACHE BOOL "Enable compilation of the OpenGL backend" FORCE)
|
||||
if (NOT EMSCRIPTEN)
|
||||
if (NOT TARGET dawn_native)
|
||||
if (CMAKE_SYSTEM_NAME STREQUAL Windows)
|
||||
set(DAWN_ENABLE_DESKTOP_GL ON CACHE BOOL "Enable compilation of the OpenGL backend" FORCE)
|
||||
endif ()
|
||||
if (CMAKE_SYSTEM_NAME STREQUAL Linux)
|
||||
set(DAWN_ENABLE_OPENGLES ON CACHE BOOL "Enable compilation of the OpenGL ES backend" FORCE)
|
||||
endif ()
|
||||
add_subdirectory(dawn EXCLUDE_FROM_ALL)
|
||||
if (DAWN_ENABLE_VULKAN)
|
||||
target_compile_definitions(dawn_native PRIVATE
|
||||
DAWN_ENABLE_VULKAN_VALIDATION_LAYERS
|
||||
DAWN_VK_DATA_DIR="vulkandata")
|
||||
endif ()
|
||||
if (MSVC)
|
||||
target_compile_options(dawn_native PRIVATE /bigobj)
|
||||
else ()
|
||||
target_compile_options(SPIRV-Tools-static PRIVATE -Wno-implicit-fallthrough)
|
||||
target_compile_options(SPIRV-Tools-opt PRIVATE -Wno-implicit-fallthrough)
|
||||
endif ()
|
||||
endif ()
|
||||
if (CMAKE_SYSTEM_NAME STREQUAL Linux)
|
||||
set(DAWN_ENABLE_OPENGLES ON CACHE BOOL "Enable compilation of the OpenGL ES backend" FORCE)
|
||||
endif ()
|
||||
add_subdirectory(dawn EXCLUDE_FROM_ALL)
|
||||
if (DAWN_ENABLE_VULKAN)
|
||||
target_compile_definitions(dawn_native PRIVATE
|
||||
DAWN_ENABLE_VULKAN_VALIDATION_LAYERS
|
||||
DAWN_VK_DATA_DIR="vulkandata")
|
||||
endif ()
|
||||
if (MSVC)
|
||||
target_compile_options(dawn_native PRIVATE /bigobj)
|
||||
else ()
|
||||
target_compile_options(SPIRV-Tools-static PRIVATE -Wno-implicit-fallthrough)
|
||||
target_compile_options(SPIRV-Tools-opt PRIVATE -Wno-implicit-fallthrough)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (NOT TARGET SDL2-static)
|
||||
if (WIN32)
|
||||
set(SDL_LIBC ON CACHE BOOL "Use the system C library" FORCE)
|
||||
endif ()
|
||||
add_subdirectory(SDL EXCLUDE_FROM_ALL)
|
||||
if (NOT MSVC)
|
||||
target_compile_options(SDL2-static PRIVATE -Wno-implicit-fallthrough -Wno-shadow)
|
||||
if (NOT TARGET SDL2-static)
|
||||
if (WIN32)
|
||||
set(SDL_LIBC ON CACHE BOOL "Use the system C library" FORCE)
|
||||
endif ()
|
||||
add_subdirectory(SDL EXCLUDE_FROM_ALL)
|
||||
if (NOT MSVC)
|
||||
target_compile_options(SDL2-static PRIVATE -Wno-implicit-fallthrough -Wno-shadow)
|
||||
endif ()
|
||||
endif ()
|
||||
else ()
|
||||
set(ABSL_PROPAGATE_CXX_STD ON)
|
||||
add_subdirectory(dawn/third_party/abseil-cpp EXCLUDE_FROM_ALL)
|
||||
endif ()
|
||||
|
||||
if (NOT TARGET xxhash)
|
||||
|
@ -48,7 +53,7 @@ if (NOT TARGET imgui)
|
|||
if (CMAKE_COMPILER_IS_GNUCXX)
|
||||
# currently explicitly ignored for clang in imgui code, but not gcc (yet)
|
||||
target_compile_options(imgui PRIVATE -Wno-deprecated-enum-enum-conversion)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# Optional, replaces stb_freetype if available
|
||||
find_package(Freetype)
|
||||
|
|
|
@ -2,10 +2,16 @@
|
|||
#define DOLPHIN_TYPES_H
|
||||
|
||||
#ifdef TARGET_PC
|
||||
#include <bits/wordsize.h>
|
||||
#endif
|
||||
|
||||
#ifdef __MWERKS__
|
||||
#include <stdint.h>
|
||||
typedef int8_t s8;
|
||||
typedef int16_t s16;
|
||||
typedef int32_t s32;
|
||||
typedef int64_t s64;
|
||||
typedef uint8_t u8;
|
||||
typedef uint16_t u16;
|
||||
typedef uint32_t u32;
|
||||
typedef uint64_t u64;
|
||||
#else
|
||||
typedef signed char s8;
|
||||
typedef signed short int s16;
|
||||
typedef signed long s32;
|
||||
|
@ -14,23 +20,6 @@ typedef unsigned char u8;
|
|||
typedef unsigned short int u16;
|
||||
typedef unsigned long u32;
|
||||
typedef unsigned long long int u64;
|
||||
#else
|
||||
typedef signed char s8;
|
||||
typedef signed short int s16;
|
||||
typedef signed int s32;
|
||||
#if __WORDSIZE == 64
|
||||
typedef signed long int s64;
|
||||
#else
|
||||
typedef signed long long int s64;
|
||||
#endif
|
||||
typedef unsigned char u8;
|
||||
typedef unsigned short int u16;
|
||||
typedef unsigned int u32;
|
||||
#if __WORDSIZE == 64
|
||||
typedef unsigned long int u64;
|
||||
#else
|
||||
typedef unsigned long long int u64;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
typedef volatile u8 vu8;
|
||||
|
@ -49,31 +38,43 @@ typedef double f64;
|
|||
typedef volatile f32 vf32;
|
||||
typedef volatile f64 vf64;
|
||||
|
||||
#ifdef TARGET_PC
|
||||
#if defined(TARGET_PC) && !defined(_WIN32)
|
||||
#include <stdbool.h>
|
||||
typedef bool BOOL;
|
||||
#ifndef FALSE
|
||||
#define FALSE false
|
||||
#endif
|
||||
#ifndef TRUE
|
||||
#define TRUE true
|
||||
#endif
|
||||
#else
|
||||
typedef int BOOL;
|
||||
#ifndef FALSE
|
||||
#define FALSE 0
|
||||
#endif
|
||||
#ifndef TRUE
|
||||
#define TRUE 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef TARGET_PC
|
||||
#include <stddef.h>
|
||||
#else
|
||||
#ifndef NULL
|
||||
#define NULL 0
|
||||
#endif
|
||||
#endif
|
||||
#ifndef __cplusplus
|
||||
#ifndef nullptr
|
||||
#define nullptr NULL
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__MWERKS__)
|
||||
#define AT_ADDRESS(addr) : (addr)
|
||||
#define ATTRIBUTE_ALIGN(num) __attribute__((aligned(num)))
|
||||
#elif defined(__GNUC__)
|
||||
#define AT_ADDRESS(addr) // was removed in GCC. define in linker script instead.
|
||||
#define AT_ADDRESS(addr)
|
||||
#define ATTRIBUTE_ALIGN(num) __attribute__((aligned(num)))
|
||||
#elif defined(_MSC_VER)
|
||||
#define AT_ADDRESS(addr)
|
||||
|
|
|
@ -20,6 +20,9 @@ using webgpu::g_queue;
|
|||
using webgpu::g_swapChain;
|
||||
|
||||
constexpr std::array PreferredBackendOrder{
|
||||
#ifdef ENABLE_BACKEND_WEBGPU
|
||||
BACKEND_WEBGPU,
|
||||
#endif
|
||||
#ifdef DAWN_ENABLE_BACKEND_D3D12
|
||||
// BACKEND_D3D12,
|
||||
#endif
|
||||
|
@ -90,7 +93,7 @@ static AuroraInfo initialize(int argc, char* argv[], const AuroraConfig& config)
|
|||
}
|
||||
|
||||
// Initialize SDL_Renderer for ImGui when we can't use a Dawn backend
|
||||
if (webgpu::g_backendType == WGPUBackendType_Null) {
|
||||
if (webgpu::g_backendType == wgpu::BackendType::Null) {
|
||||
if (!window::create_renderer()) {
|
||||
Log.report(LOG_FATAL, FMT_STRING("Failed to initialize SDL renderer: {}"), SDL_GetError());
|
||||
unreachable();
|
||||
|
@ -118,13 +121,14 @@ static AuroraInfo initialize(int argc, char* argv[], const AuroraConfig& config)
|
|||
};
|
||||
}
|
||||
|
||||
static WGPUTextureView g_currentView = nullptr;
|
||||
#ifndef EMSCRIPTEN
|
||||
static wgpu::TextureView g_currentView;
|
||||
#endif
|
||||
|
||||
static void shutdown() noexcept {
|
||||
if (g_currentView != nullptr) {
|
||||
wgpuTextureViewRelease(g_currentView);
|
||||
g_currentView = nullptr;
|
||||
}
|
||||
#ifndef EMSCRIPTEN
|
||||
g_currentView = {};
|
||||
#endif
|
||||
imgui::shutdown();
|
||||
gfx::shutdown();
|
||||
webgpu::shutdown();
|
||||
|
@ -142,7 +146,8 @@ static const AuroraEvent* update() noexcept {
|
|||
}
|
||||
|
||||
static bool begin_frame() noexcept {
|
||||
g_currentView = wgpuSwapChainGetCurrentTextureView(g_swapChain);
|
||||
#ifndef EMSCRIPTEN
|
||||
g_currentView = g_swapChain.GetCurrentTextureView();
|
||||
if (!g_currentView) {
|
||||
ImGui::EndFrame();
|
||||
// Force swapchain recreation
|
||||
|
@ -150,50 +155,55 @@ static bool begin_frame() noexcept {
|
|||
webgpu::resize_swapchain(size.fb_width, size.fb_height, true);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
gfx::begin_frame();
|
||||
return true;
|
||||
}
|
||||
|
||||
static void end_frame() noexcept {
|
||||
const auto encoderDescriptor = WGPUCommandEncoderDescriptor{
|
||||
const auto encoderDescriptor = wgpu::CommandEncoderDescriptor{
|
||||
.label = "Redraw encoder",
|
||||
};
|
||||
auto encoder = wgpuDeviceCreateCommandEncoder(g_device, &encoderDescriptor);
|
||||
auto encoder = g_device.CreateCommandEncoder(&encoderDescriptor);
|
||||
gfx::end_frame(encoder);
|
||||
gfx::render(encoder);
|
||||
{
|
||||
const std::array attachments{
|
||||
WGPURenderPassColorAttachment{
|
||||
wgpu::RenderPassColorAttachment{
|
||||
#ifdef EMSCRIPTEN
|
||||
.view = g_swapChain.GetCurrentTextureView(),
|
||||
#else
|
||||
.view = g_currentView,
|
||||
.loadOp = WGPULoadOp_Clear,
|
||||
.storeOp = WGPUStoreOp_Store,
|
||||
#endif
|
||||
.loadOp = wgpu::LoadOp::Clear,
|
||||
.storeOp = wgpu::StoreOp::Store,
|
||||
},
|
||||
};
|
||||
const WGPURenderPassDescriptor renderPassDescriptor{
|
||||
const wgpu::RenderPassDescriptor renderPassDescriptor{
|
||||
.label = "Post render pass",
|
||||
.colorAttachmentCount = attachments.size(),
|
||||
.colorAttachments = attachments.data(),
|
||||
};
|
||||
auto pass = wgpuCommandEncoderBeginRenderPass(encoder, &renderPassDescriptor);
|
||||
auto pass = encoder.BeginRenderPass(&renderPassDescriptor);
|
||||
// Copy EFB -> XFB (swapchain)
|
||||
wgpuRenderPassEncoderSetPipeline(pass, webgpu::g_CopyPipeline);
|
||||
wgpuRenderPassEncoderSetBindGroup(pass, 0, webgpu::g_CopyBindGroup, 0, nullptr);
|
||||
wgpuRenderPassEncoderDraw(pass, 3, 1, 0, 0);
|
||||
pass.SetPipeline(webgpu::g_CopyPipeline);
|
||||
pass.SetBindGroup(0, webgpu::g_CopyBindGroup, 0, nullptr);
|
||||
pass.Draw(3);
|
||||
if (!g_initialFrame) {
|
||||
// Render ImGui
|
||||
imgui::render(pass);
|
||||
}
|
||||
wgpuRenderPassEncoderEnd(pass);
|
||||
wgpuRenderPassEncoderRelease(pass);
|
||||
pass.End();
|
||||
}
|
||||
const WGPUCommandBufferDescriptor cmdBufDescriptor{.label = "Redraw command buffer"};
|
||||
const auto buffer = wgpuCommandEncoderFinish(encoder, &cmdBufDescriptor);
|
||||
wgpuQueueSubmit(g_queue, 1, &buffer);
|
||||
wgpuCommandBufferRelease(buffer);
|
||||
wgpuCommandEncoderRelease(encoder);
|
||||
wgpuSwapChainPresent(g_swapChain);
|
||||
wgpuTextureViewRelease(g_currentView);
|
||||
g_currentView = nullptr;
|
||||
const wgpu::CommandBufferDescriptor cmdBufDescriptor{.label = "Redraw command buffer"};
|
||||
const auto buffer = encoder.Finish(&cmdBufDescriptor);
|
||||
g_queue.Submit(1, &buffer);
|
||||
#ifdef WEBGPU_DAWN
|
||||
g_swapChain.Present();
|
||||
g_currentView = {};
|
||||
#else
|
||||
emscripten_sleep(0);
|
||||
#endif
|
||||
if (!g_initialFrame) {
|
||||
ImGui::EndFrame();
|
||||
}
|
||||
|
|
|
@ -37,28 +37,28 @@ BackendBinding* CreateVulkanBinding(SDL_Window* window, WGPUDevice device);
|
|||
|
||||
BackendBinding::BackendBinding(SDL_Window* window, WGPUDevice device) : m_window(window), m_device(device) {}
|
||||
|
||||
bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, WGPUBackendType type) {
|
||||
bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, wgpu::BackendType type) {
|
||||
switch (type) {
|
||||
#if defined(DAWN_ENABLE_BACKEND_D3D12)
|
||||
case WGPUBackendType_D3D12: {
|
||||
case wgpu::BackendType::D3D12: {
|
||||
dawn::native::d3d12::AdapterDiscoveryOptions options;
|
||||
return instance->DiscoverAdapters(&options);
|
||||
}
|
||||
#endif
|
||||
#if defined(DAWN_ENABLE_BACKEND_METAL)
|
||||
case WGPUBackendType_Metal: {
|
||||
case wgpu::BackendType::Metal: {
|
||||
dawn::native::metal::AdapterDiscoveryOptions options;
|
||||
return instance->DiscoverAdapters(&options);
|
||||
}
|
||||
#endif
|
||||
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
|
||||
case WGPUBackendType_Vulkan: {
|
||||
case wgpu::BackendType::Vulkan: {
|
||||
dawn::native::vulkan::AdapterDiscoveryOptions options;
|
||||
return instance->DiscoverAdapters(&options);
|
||||
}
|
||||
#endif
|
||||
#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
|
||||
case WGPUBackendType_OpenGL: {
|
||||
case wgpu::BackendType::OpenGL: {
|
||||
SDL_GL_ResetAttributes();
|
||||
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
|
||||
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
|
||||
|
@ -71,7 +71,7 @@ bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, WGPUB
|
|||
}
|
||||
#endif
|
||||
#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
|
||||
case WGPUBackendType_OpenGLES: {
|
||||
case wgpu::BackendType::OpenGLES: {
|
||||
SDL_GL_ResetAttributes();
|
||||
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES);
|
||||
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
|
||||
|
@ -84,7 +84,7 @@ bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, WGPUB
|
|||
}
|
||||
#endif
|
||||
#if defined(DAWN_ENABLE_BACKEND_NULL)
|
||||
case WGPUBackendType_Null:
|
||||
case wgpu::BackendType::Null:
|
||||
instance->DiscoverDefaultAdapters();
|
||||
return true;
|
||||
#endif
|
||||
|
@ -93,30 +93,30 @@ bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, WGPUB
|
|||
}
|
||||
}
|
||||
|
||||
BackendBinding* CreateBinding(WGPUBackendType type, SDL_Window* window, WGPUDevice device) {
|
||||
BackendBinding* CreateBinding(wgpu::BackendType type, SDL_Window* window, WGPUDevice device) {
|
||||
switch (type) {
|
||||
#if defined(DAWN_ENABLE_BACKEND_D3D12)
|
||||
case WGPUBackendType_D3D12:
|
||||
case wgpu::BackendType::D3D12:
|
||||
return CreateD3D12Binding(window, device);
|
||||
#endif
|
||||
#if defined(DAWN_ENABLE_BACKEND_METAL)
|
||||
case WGPUBackendType_Metal:
|
||||
case wgpu::BackendType::Metal:
|
||||
return CreateMetalBinding(window, device);
|
||||
#endif
|
||||
#if defined(DAWN_ENABLE_BACKEND_NULL)
|
||||
case WGPUBackendType_Null:
|
||||
case wgpu::BackendType::Null:
|
||||
return CreateNullBinding(window, device);
|
||||
#endif
|
||||
#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
|
||||
case WGPUBackendType_OpenGL:
|
||||
case wgpu::BackendType::OpenGL:
|
||||
return CreateOpenGLBinding(window, device);
|
||||
#endif
|
||||
#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
|
||||
case WGPUBackendType_OpenGLES:
|
||||
case wgpu::BackendType::OpenGLES:
|
||||
return CreateOpenGLBinding(window, device);
|
||||
#endif
|
||||
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
|
||||
case WGPUBackendType_Vulkan:
|
||||
case wgpu::BackendType::Vulkan:
|
||||
return CreateVulkanBinding(window, device);
|
||||
#endif
|
||||
default:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <dawn/native/DawnNative.h>
|
||||
#include <webgpu/webgpu.h>
|
||||
#include <webgpu/webgpu_cpp.h>
|
||||
|
||||
struct SDL_Window;
|
||||
|
||||
|
@ -21,7 +21,7 @@ protected:
|
|||
WGPUDevice m_device = nullptr;
|
||||
};
|
||||
|
||||
bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, WGPUBackendType type);
|
||||
BackendBinding* CreateBinding(WGPUBackendType type, SDL_Window* window, WGPUDevice device);
|
||||
bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, wgpu::BackendType type);
|
||||
BackendBinding* CreateBinding(wgpu::BackendType type, SDL_Window* window, WGPUDevice device);
|
||||
|
||||
} // namespace aurora::webgpu::utils
|
||||
|
|
|
@ -18,6 +18,7 @@ namespace aurora::gfx {
|
|||
static Module Log("aurora::gfx");
|
||||
|
||||
using webgpu::g_device;
|
||||
using webgpu::g_instance;
|
||||
using webgpu::g_queue;
|
||||
|
||||
#ifdef AURORA_GFX_DEBUG_GROUPS
|
||||
|
@ -90,31 +91,31 @@ namespace aurora {
|
|||
// we create specialized methods to handle them. Note that these are highly dependent on
|
||||
// the structure definition, which could easily change with Dawn updates.
|
||||
template <>
|
||||
inline HashType xxh3_hash(const WGPUBindGroupDescriptor& input, HashType seed) {
|
||||
inline HashType xxh3_hash(const wgpu::BindGroupDescriptor& input, HashType seed) {
|
||||
constexpr auto offset = sizeof(void*) * 2; // skip nextInChain, label
|
||||
const auto hash = xxh3_hash_s(reinterpret_cast<const u8*>(&input) + offset,
|
||||
sizeof(WGPUBindGroupDescriptor) - offset - sizeof(void*) /* skip entries */, seed);
|
||||
return xxh3_hash_s(input.entries, sizeof(WGPUBindGroupEntry) * input.entryCount, hash);
|
||||
sizeof(wgpu::BindGroupDescriptor) - offset - sizeof(void*) /* skip entries */, seed);
|
||||
return xxh3_hash_s(input.entries, sizeof(wgpu::BindGroupEntry) * input.entryCount, hash);
|
||||
}
|
||||
template <>
|
||||
inline HashType xxh3_hash(const WGPUSamplerDescriptor& input, HashType seed) {
|
||||
inline HashType xxh3_hash(const wgpu::SamplerDescriptor& input, HashType seed) {
|
||||
constexpr auto offset = sizeof(void*) * 2; // skip nextInChain, label
|
||||
return xxh3_hash_s(reinterpret_cast<const u8*>(&input) + offset,
|
||||
sizeof(WGPUSamplerDescriptor) - offset - 2 /* skip padding */, seed);
|
||||
sizeof(wgpu::SamplerDescriptor) - offset - 2 /* skip padding */, seed);
|
||||
}
|
||||
} // namespace aurora
|
||||
|
||||
namespace aurora::gfx {
|
||||
using NewPipelineCallback = std::function<WGPURenderPipeline()>;
|
||||
using NewPipelineCallback = std::function<wgpu::RenderPipeline()>;
|
||||
std::mutex g_pipelineMutex;
|
||||
static bool g_hasPipelineThread = false;
|
||||
static std::thread g_pipelineThread;
|
||||
static std::atomic_bool g_pipelineThreadEnd;
|
||||
static std::condition_variable g_pipelineCv;
|
||||
static absl::flat_hash_map<PipelineRef, WGPURenderPipeline> g_pipelines;
|
||||
static absl::flat_hash_map<PipelineRef, wgpu::RenderPipeline> g_pipelines;
|
||||
static std::deque<std::pair<PipelineRef, NewPipelineCallback>> g_queuedPipelines;
|
||||
static absl::flat_hash_map<BindGroupRef, WGPUBindGroup> g_cachedBindGroups;
|
||||
static absl::flat_hash_map<SamplerRef, WGPUSampler> g_cachedSamplers;
|
||||
static absl::flat_hash_map<BindGroupRef, wgpu::BindGroup> g_cachedBindGroups;
|
||||
static absl::flat_hash_map<SamplerRef, wgpu::Sampler> g_cachedSamplers;
|
||||
std::atomic_uint32_t queuedPipelines;
|
||||
std::atomic_uint32_t createdPipelines;
|
||||
|
||||
|
@ -123,12 +124,12 @@ static ByteBuffer g_uniforms;
|
|||
static ByteBuffer g_indices;
|
||||
static ByteBuffer g_storage;
|
||||
static ByteBuffer g_textureUpload;
|
||||
WGPUBuffer g_vertexBuffer;
|
||||
WGPUBuffer g_uniformBuffer;
|
||||
WGPUBuffer g_indexBuffer;
|
||||
WGPUBuffer g_storageBuffer;
|
||||
static std::array<WGPUBuffer, 3> g_stagingBuffers;
|
||||
static WGPUSupportedLimits g_cachedLimits;
|
||||
wgpu::Buffer g_vertexBuffer;
|
||||
wgpu::Buffer g_uniformBuffer;
|
||||
wgpu::Buffer g_indexBuffer;
|
||||
wgpu::Buffer g_storageBuffer;
|
||||
static std::array<wgpu::Buffer, 3> g_stagingBuffers;
|
||||
static wgpu::SupportedLimits g_cachedLimits;
|
||||
|
||||
static ShaderState g_state;
|
||||
static PipelineRef g_currentPipeline;
|
||||
|
@ -260,16 +261,16 @@ void set_scissor(uint32_t x, uint32_t y, uint32_t w, uint32_t h) noexcept {
|
|||
}
|
||||
}
|
||||
|
||||
static inline bool operator==(const WGPUExtent3D& lhs, const WGPUExtent3D& rhs) {
|
||||
static inline bool operator==(const wgpu::Extent3D& lhs, const wgpu::Extent3D& rhs) {
|
||||
return lhs.width == rhs.width && lhs.height == rhs.height && lhs.depthOrArrayLayers == rhs.depthOrArrayLayers;
|
||||
}
|
||||
static inline bool operator!=(const WGPUExtent3D& lhs, const WGPUExtent3D& rhs) { return !(lhs == rhs); }
|
||||
static inline bool operator!=(const wgpu::Extent3D& lhs, const wgpu::Extent3D& rhs) { return !(lhs == rhs); }
|
||||
|
||||
void resolve_color(const ClipRect& rect, uint32_t bind, GXTexFmt fmt, bool clear_depth) noexcept {
|
||||
if (g_resolvedTextures.size() < bind + 1) {
|
||||
g_resolvedTextures.resize(bind + 1);
|
||||
}
|
||||
const WGPUExtent3D size{
|
||||
const wgpu::Extent3D size{
|
||||
.width = static_cast<uint32_t>(rect.width),
|
||||
.height = static_cast<uint32_t>(rect.height),
|
||||
.depthOrArrayLayers = 1,
|
||||
|
@ -356,7 +357,8 @@ static void pipeline_worker() {
|
|||
|
||||
void initialize() {
|
||||
// No async pipelines for OpenGL (ES)
|
||||
if (webgpu::g_backendType == WGPUBackendType_OpenGL || webgpu::g_backendType == WGPUBackendType_OpenGLES) {
|
||||
if (webgpu::g_backendType == wgpu::BackendType::OpenGL || webgpu::g_backendType == wgpu::BackendType::OpenGLES ||
|
||||
webgpu::g_backendType == wgpu::BackendType::WebGPU) {
|
||||
g_hasPipelineThread = false;
|
||||
} else {
|
||||
g_pipelineThreadEnd = false;
|
||||
|
@ -365,29 +367,30 @@ void initialize() {
|
|||
}
|
||||
|
||||
// For uniform & storage buffer offset alignments
|
||||
wgpuDeviceGetLimits(g_device, &g_cachedLimits);
|
||||
g_device.GetLimits(&g_cachedLimits);
|
||||
|
||||
const auto createBuffer = [](WGPUBuffer& out, WGPUBufferUsageFlags usage, uint64_t size, const char* label) {
|
||||
const auto createBuffer = [](wgpu::Buffer& out, wgpu::BufferUsage usage, uint64_t size, const char* label) {
|
||||
if (size <= 0) {
|
||||
return;
|
||||
}
|
||||
const WGPUBufferDescriptor descriptor{
|
||||
const wgpu::BufferDescriptor descriptor{
|
||||
.label = label,
|
||||
.usage = usage,
|
||||
.size = size,
|
||||
};
|
||||
out = wgpuDeviceCreateBuffer(g_device, &descriptor);
|
||||
out = g_device.CreateBuffer(&descriptor);
|
||||
};
|
||||
createBuffer(g_uniformBuffer, WGPUBufferUsage_Uniform | WGPUBufferUsage_CopyDst, UniformBufferSize,
|
||||
createBuffer(g_uniformBuffer, wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst, UniformBufferSize,
|
||||
"Shared Uniform Buffer");
|
||||
createBuffer(g_vertexBuffer, WGPUBufferUsage_Vertex | WGPUBufferUsage_CopyDst, VertexBufferSize,
|
||||
createBuffer(g_vertexBuffer, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::CopyDst, VertexBufferSize,
|
||||
"Shared Vertex Buffer");
|
||||
createBuffer(g_indexBuffer, WGPUBufferUsage_Index | WGPUBufferUsage_CopyDst, IndexBufferSize, "Shared Index Buffer");
|
||||
createBuffer(g_storageBuffer, WGPUBufferUsage_Storage | WGPUBufferUsage_CopyDst, StorageBufferSize,
|
||||
createBuffer(g_indexBuffer, wgpu::BufferUsage::Index | wgpu::BufferUsage::CopyDst, IndexBufferSize,
|
||||
"Shared Index Buffer");
|
||||
createBuffer(g_storageBuffer, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst, StorageBufferSize,
|
||||
"Shared Storage Buffer");
|
||||
for (int i = 0; i < g_stagingBuffers.size(); ++i) {
|
||||
const auto label = fmt::format(FMT_STRING("Staging Buffer {}"), i);
|
||||
createBuffer(g_stagingBuffers[i], WGPUBufferUsage_MapWrite | WGPUBufferUsage_CopySrc, StagingBufferSize,
|
||||
createBuffer(g_stagingBuffers[i], wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, StagingBufferSize,
|
||||
label.c_str());
|
||||
}
|
||||
map_staging_buffer();
|
||||
|
@ -472,41 +475,15 @@ void shutdown() {
|
|||
|
||||
g_resolvedTextures.clear();
|
||||
g_textureUploads.clear();
|
||||
for (const auto& item : g_cachedBindGroups) {
|
||||
wgpuBindGroupRelease(item.second);
|
||||
}
|
||||
g_cachedBindGroups.clear();
|
||||
for (const auto& item : g_cachedSamplers) {
|
||||
wgpuSamplerRelease(item.second);
|
||||
}
|
||||
g_cachedSamplers.clear();
|
||||
for (const auto& item : g_pipelines) {
|
||||
wgpuRenderPipelineRelease(item.second);
|
||||
}
|
||||
g_pipelines.clear();
|
||||
g_queuedPipelines.clear();
|
||||
if (g_vertexBuffer != nullptr) {
|
||||
wgpuBufferDestroy(g_vertexBuffer);
|
||||
g_vertexBuffer = nullptr;
|
||||
}
|
||||
if (g_uniformBuffer != nullptr) {
|
||||
wgpuBufferDestroy(g_uniformBuffer);
|
||||
g_uniformBuffer = nullptr;
|
||||
}
|
||||
if (g_indexBuffer != nullptr) {
|
||||
wgpuBufferDestroy(g_indexBuffer);
|
||||
g_indexBuffer = nullptr;
|
||||
}
|
||||
if (g_storageBuffer != nullptr) {
|
||||
wgpuBufferDestroy(g_storageBuffer);
|
||||
g_storageBuffer = nullptr;
|
||||
}
|
||||
for (auto& item : g_stagingBuffers) {
|
||||
if (item != nullptr) {
|
||||
wgpuBufferDestroy(item);
|
||||
}
|
||||
item = nullptr;
|
||||
}
|
||||
g_vertexBuffer = {};
|
||||
g_uniformBuffer = {};
|
||||
g_indexBuffer = {};
|
||||
g_storageBuffer = {};
|
||||
g_stagingBuffers.fill({});
|
||||
g_renderPasses.clear();
|
||||
g_currentRenderPass = UINT32_MAX;
|
||||
|
||||
|
@ -520,8 +497,8 @@ static size_t currentStagingBuffer = 0;
|
|||
static bool bufferMapped = false;
|
||||
void map_staging_buffer() {
|
||||
bufferMapped = false;
|
||||
wgpuBufferMapAsync(
|
||||
g_stagingBuffers[currentStagingBuffer], WGPUMapMode_Write, 0, StagingBufferSize,
|
||||
g_stagingBuffers[currentStagingBuffer].MapAsync(
|
||||
wgpu::MapMode::Write, 0, StagingBufferSize,
|
||||
[](WGPUBufferMapAsyncStatus status, void* userdata) {
|
||||
if (status == WGPUBufferMapAsyncStatus_DestroyedBeforeCallback) {
|
||||
return;
|
||||
|
@ -536,7 +513,11 @@ void map_staging_buffer() {
|
|||
|
||||
void begin_frame() {
|
||||
while (!bufferMapped) {
|
||||
wgpuDeviceTick(g_device);
|
||||
#ifdef EMSCRIPTEN
|
||||
emscripten_sleep(0);
|
||||
#else
|
||||
g_device.Tick();
|
||||
#endif
|
||||
}
|
||||
size_t bufferOffset = 0;
|
||||
auto& stagingBuf = g_stagingBuffers[currentStagingBuffer];
|
||||
|
@ -544,8 +525,7 @@ void begin_frame() {
|
|||
if (size <= 0) {
|
||||
return;
|
||||
}
|
||||
buf = ByteBuffer{static_cast<u8*>(wgpuBufferGetMappedRange(stagingBuf, bufferOffset, size)),
|
||||
static_cast<size_t>(size)};
|
||||
buf = ByteBuffer{static_cast<u8*>(stagingBuf.GetMappedRange(bufferOffset, size)), static_cast<size_t>(size)};
|
||||
bufferOffset += size;
|
||||
};
|
||||
mapBuffer(g_verts, VertexBufferSize);
|
||||
|
@ -560,23 +540,22 @@ void begin_frame() {
|
|||
g_renderPasses.emplace_back();
|
||||
g_renderPasses[0].clearColor = gx::g_gxState.clearColor;
|
||||
g_currentRenderPass = 0;
|
||||
// push_command(CommandType::SetViewport, Command::Data{.setViewport = g_cachedViewport});
|
||||
// push_command(CommandType::SetScissor, Command::Data{.setScissor = g_cachedScissor});
|
||||
// push_command(CommandType::SetViewport, Command::Data{.setViewport = g_cachedViewport});
|
||||
// push_command(CommandType::SetScissor, Command::Data{.setScissor = g_cachedScissor});
|
||||
}
|
||||
|
||||
void end_frame(WGPUCommandEncoder cmd) {
|
||||
void end_frame(const wgpu::CommandEncoder& cmd) {
|
||||
uint64_t bufferOffset = 0;
|
||||
const auto writeBuffer = [&](ByteBuffer& buf, WGPUBuffer& out, uint64_t size, std::string_view label) {
|
||||
const auto writeBuffer = [&](ByteBuffer& buf, wgpu::Buffer& out, uint64_t size, std::string_view label) {
|
||||
const auto writeSize = buf.size(); // Only need to copy this many bytes
|
||||
if (writeSize > 0) {
|
||||
wgpuCommandEncoderCopyBufferToBuffer(cmd, g_stagingBuffers[currentStagingBuffer], bufferOffset, out, 0,
|
||||
ALIGN(writeSize, 4));
|
||||
cmd.CopyBufferToBuffer(g_stagingBuffers[currentStagingBuffer], bufferOffset, out, 0, ALIGN(writeSize, 4));
|
||||
buf.clear();
|
||||
}
|
||||
bufferOffset += size;
|
||||
return writeSize;
|
||||
};
|
||||
wgpuBufferUnmap(g_stagingBuffers[currentStagingBuffer]);
|
||||
g_stagingBuffers[currentStagingBuffer].Unmap();
|
||||
g_lastVertSize = writeBuffer(g_verts, g_vertexBuffer, VertexBufferSize, "Vertex");
|
||||
g_lastUniformSize = writeBuffer(g_uniforms, g_uniformBuffer, UniformBufferSize, "Uniform");
|
||||
g_lastIndexSize = writeBuffer(g_indices, g_indexBuffer, IndexBufferSize, "Index");
|
||||
|
@ -584,16 +563,16 @@ void end_frame(WGPUCommandEncoder cmd) {
|
|||
{
|
||||
// Perform texture copies
|
||||
for (const auto& item : g_textureUploads) {
|
||||
const WGPUImageCopyBuffer buf{
|
||||
const wgpu::ImageCopyBuffer buf{
|
||||
.layout =
|
||||
WGPUTextureDataLayout{
|
||||
wgpu::TextureDataLayout{
|
||||
.offset = item.layout.offset + bufferOffset,
|
||||
.bytesPerRow = ALIGN(item.layout.bytesPerRow, 256),
|
||||
.rowsPerImage = item.layout.rowsPerImage,
|
||||
},
|
||||
.buffer = g_stagingBuffers[currentStagingBuffer],
|
||||
};
|
||||
wgpuCommandEncoderCopyBufferToTexture(cmd, &buf, &item.tex, &item.size);
|
||||
cmd.CopyBufferToTexture(&buf, &item.tex, &item.size);
|
||||
}
|
||||
g_textureUploads.clear();
|
||||
g_textureUpload.clear();
|
||||
|
@ -603,7 +582,7 @@ void end_frame(WGPUCommandEncoder cmd) {
|
|||
g_currentRenderPass = UINT32_MAX;
|
||||
}
|
||||
|
||||
void render(WGPUCommandEncoder cmd) {
|
||||
void render(wgpu::CommandEncoder& cmd) {
|
||||
for (u32 i = 0; i < g_renderPasses.size(); ++i) {
|
||||
const auto& passInfo = g_renderPasses[i];
|
||||
bool finalPass = i == g_renderPasses.size() - 1;
|
||||
|
@ -612,12 +591,11 @@ void render(WGPUCommandEncoder cmd) {
|
|||
unreachable();
|
||||
}
|
||||
const std::array attachments{
|
||||
WGPURenderPassColorAttachment{
|
||||
wgpu::RenderPassColorAttachment{
|
||||
.view = webgpu::g_frameBuffer.view,
|
||||
.resolveTarget = webgpu::g_graphicsConfig.msaaSamples > 1 ? webgpu::g_frameBufferResolved.view : nullptr,
|
||||
.loadOp = passInfo.clear ? WGPULoadOp_Clear : WGPULoadOp_Load,
|
||||
.storeOp = WGPUStoreOp_Store,
|
||||
.clearColor = {NAN, NAN, NAN, NAN},
|
||||
.loadOp = passInfo.clear ? wgpu::LoadOp::Clear : wgpu::LoadOp::Load,
|
||||
.storeOp = wgpu::StoreOp::Store,
|
||||
.clearValue =
|
||||
{
|
||||
.r = passInfo.clearColor.x(),
|
||||
|
@ -627,29 +605,27 @@ void render(WGPUCommandEncoder cmd) {
|
|||
},
|
||||
},
|
||||
};
|
||||
const WGPURenderPassDepthStencilAttachment depthStencilAttachment{
|
||||
const wgpu::RenderPassDepthStencilAttachment depthStencilAttachment{
|
||||
.view = webgpu::g_depthBuffer.view,
|
||||
.depthLoadOp = passInfo.clear ? WGPULoadOp_Clear : WGPULoadOp_Load,
|
||||
.depthStoreOp = WGPUStoreOp_Store,
|
||||
.clearDepth = NAN,
|
||||
.depthLoadOp = passInfo.clear ? wgpu::LoadOp::Clear : wgpu::LoadOp::Load,
|
||||
.depthStoreOp = wgpu::StoreOp::Store,
|
||||
.depthClearValue = 1.f,
|
||||
};
|
||||
const auto label = fmt::format(FMT_STRING("Render pass {}"), i);
|
||||
const WGPURenderPassDescriptor renderPassDescriptor{
|
||||
const wgpu::RenderPassDescriptor renderPassDescriptor{
|
||||
.label = label.c_str(),
|
||||
.colorAttachmentCount = attachments.size(),
|
||||
.colorAttachments = attachments.data(),
|
||||
.depthStencilAttachment = &depthStencilAttachment,
|
||||
};
|
||||
auto pass = wgpuCommandEncoderBeginRenderPass(cmd, &renderPassDescriptor);
|
||||
auto pass = cmd.BeginRenderPass(&renderPassDescriptor);
|
||||
render_pass(pass, i);
|
||||
wgpuRenderPassEncoderEnd(pass);
|
||||
wgpuRenderPassEncoderRelease(pass);
|
||||
pass.End();
|
||||
|
||||
if (passInfo.resolveTarget != UINT32_MAX) {
|
||||
WGPUImageCopyTexture src{
|
||||
wgpu::ImageCopyTexture src{
|
||||
.origin =
|
||||
WGPUOrigin3D{
|
||||
wgpu::Origin3D{
|
||||
.x = static_cast<uint32_t>(passInfo.resolveRect.x),
|
||||
.y = static_cast<uint32_t>(passInfo.resolveRect.y),
|
||||
},
|
||||
|
@ -660,21 +636,21 @@ void render(WGPUCommandEncoder cmd) {
|
|||
src.texture = webgpu::g_frameBuffer.texture;
|
||||
}
|
||||
auto& target = g_resolvedTextures[passInfo.resolveTarget];
|
||||
const WGPUImageCopyTexture dst{
|
||||
const wgpu::ImageCopyTexture dst{
|
||||
.texture = target->texture,
|
||||
};
|
||||
const WGPUExtent3D size{
|
||||
const wgpu::Extent3D size{
|
||||
.width = static_cast<uint32_t>(passInfo.resolveRect.width),
|
||||
.height = static_cast<uint32_t>(passInfo.resolveRect.height),
|
||||
.depthOrArrayLayers = 1,
|
||||
};
|
||||
wgpuCommandEncoderCopyTextureToTexture(cmd, &src, &dst, &size);
|
||||
cmd.CopyTextureToTexture(&src, &dst, &size);
|
||||
}
|
||||
}
|
||||
g_renderPasses.clear();
|
||||
}
|
||||
|
||||
void render_pass(WGPURenderPassEncoder pass, u32 idx) {
|
||||
void render_pass(const wgpu::RenderPassEncoder& pass, u32 idx) {
|
||||
g_currentPipeline = UINTPTR_MAX;
|
||||
#ifdef AURORA_GFX_DEBUG_GROUPS
|
||||
std::vector<std::string> lastDebugGroupStack;
|
||||
|
@ -691,10 +667,10 @@ void render_pass(WGPURenderPassEncoder pass, u32 idx) {
|
|||
}
|
||||
}
|
||||
for (size_t i = firstDiff; i < lastDebugGroupStack.size(); ++i) {
|
||||
wgpuRenderPassEncoderPopDebugGroup(pass);
|
||||
pass.PopDebugGroup();
|
||||
}
|
||||
for (size_t i = firstDiff; i < cmd.debugGroupStack.size(); ++i) {
|
||||
wgpuRenderPassEncoderPushDebugGroup(pass, cmd.debugGroupStack[i].c_str());
|
||||
pass.PushDebugGroup(cmd.debugGroupStack[i].c_str());
|
||||
}
|
||||
lastDebugGroupStack = cmd.debugGroupStack;
|
||||
}
|
||||
|
@ -702,11 +678,11 @@ void render_pass(WGPURenderPassEncoder pass, u32 idx) {
|
|||
switch (cmd.type) {
|
||||
case CommandType::SetViewport: {
|
||||
const auto& vp = cmd.data.setViewport;
|
||||
wgpuRenderPassEncoderSetViewport(pass, vp.left, vp.top, vp.width, vp.height, vp.znear, vp.zfar);
|
||||
pass.SetViewport(vp.left, vp.top, vp.width, vp.height, vp.znear, vp.zfar);
|
||||
} break;
|
||||
case CommandType::SetScissor: {
|
||||
const auto& sc = cmd.data.setScissor;
|
||||
wgpuRenderPassEncoderSetScissorRect(pass, sc.x, sc.y, sc.w, sc.h);
|
||||
pass.SetScissorRect(sc.x, sc.y, sc.w, sc.h);
|
||||
} break;
|
||||
case CommandType::Draw: {
|
||||
const auto& draw = cmd.data.draw;
|
||||
|
@ -724,12 +700,12 @@ void render_pass(WGPURenderPassEncoder pass, u32 idx) {
|
|||
|
||||
#ifdef AURORA_GFX_DEBUG_GROUPS
|
||||
for (size_t i = 0; i < lastDebugGroupStack.size(); ++i) {
|
||||
wgpuRenderPassEncoderPopDebugGroup(pass);
|
||||
pass.PopDebugGroup();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool bind_pipeline(PipelineRef ref, WGPURenderPassEncoder pass) {
|
||||
bool bind_pipeline(PipelineRef ref, const wgpu::RenderPassEncoder& pass) {
|
||||
if (ref == g_currentPipeline) {
|
||||
return true;
|
||||
}
|
||||
|
@ -738,7 +714,7 @@ bool bind_pipeline(PipelineRef ref, WGPURenderPassEncoder pass) {
|
|||
if (it == g_pipelines.end()) {
|
||||
return false;
|
||||
}
|
||||
wgpuRenderPassEncoderSetPipeline(pass, it->second);
|
||||
pass.SetPipeline(it->second);
|
||||
g_currentPipeline = ref;
|
||||
return true;
|
||||
}
|
||||
|
@ -809,27 +785,38 @@ std::pair<ByteBuffer, Range> map_storage(size_t length) {
|
|||
return {ByteBuffer{g_storage.data() + range.offset, range.size}, range};
|
||||
}
|
||||
|
||||
BindGroupRef bind_group_ref(const WGPUBindGroupDescriptor& descriptor) {
|
||||
// TODO: should we avoid caching bind groups altogether?
|
||||
BindGroupRef bind_group_ref(const wgpu::BindGroupDescriptor& descriptor) {
|
||||
#ifdef EMSCRIPTEN
|
||||
const auto bg = g_device.CreateBindGroup(&descriptor);
|
||||
BindGroupRef id = reinterpret_cast<BindGroupRef>(bg.Get());
|
||||
g_cachedBindGroups.try_emplace(id, bg);
|
||||
#else
|
||||
const auto id = xxh3_hash(descriptor);
|
||||
if (!g_cachedBindGroups.contains(id)) {
|
||||
g_cachedBindGroups.try_emplace(id, wgpuDeviceCreateBindGroup(g_device, &descriptor));
|
||||
g_cachedBindGroups.try_emplace(id, g_device.CreateBindGroup(&descriptor));
|
||||
}
|
||||
#endif
|
||||
return id;
|
||||
}
|
||||
WGPUBindGroup find_bind_group(BindGroupRef id) {
|
||||
const wgpu::BindGroup& find_bind_group(BindGroupRef id) {
|
||||
#ifdef EMSCRIPTEN
|
||||
return g_cachedBindGroups[id];
|
||||
#else
|
||||
const auto it = g_cachedBindGroups.find(id);
|
||||
if (it == g_cachedBindGroups.end()) {
|
||||
Log.report(LOG_FATAL, FMT_STRING("get_bind_group: failed to locate {}"), id);
|
||||
unreachable();
|
||||
}
|
||||
return it->second;
|
||||
#endif
|
||||
}
|
||||
|
||||
WGPUSampler sampler_ref(const WGPUSamplerDescriptor& descriptor) {
|
||||
const wgpu::Sampler& sampler_ref(const wgpu::SamplerDescriptor& descriptor) {
|
||||
const auto id = xxh3_hash(descriptor);
|
||||
auto it = g_cachedSamplers.find(id);
|
||||
if (it == g_cachedSamplers.end()) {
|
||||
it = g_cachedSamplers.try_emplace(id, wgpuDeviceCreateSampler(g_device, &descriptor)).first;
|
||||
it = g_cachedSamplers.try_emplace(id, g_device.CreateSampler(&descriptor)).first;
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include <utility>
|
||||
#include <cstring>
|
||||
|
||||
#include <webgpu/webgpu.h>
|
||||
#include <webgpu/webgpu_cpp.h>
|
||||
#include <xxhash_impl.h>
|
||||
|
||||
namespace aurora {
|
||||
|
@ -123,10 +123,10 @@ private:
|
|||
} // namespace aurora
|
||||
|
||||
namespace aurora::gfx {
|
||||
extern WGPUBuffer g_vertexBuffer;
|
||||
extern WGPUBuffer g_uniformBuffer;
|
||||
extern WGPUBuffer g_indexBuffer;
|
||||
extern WGPUBuffer g_storageBuffer;
|
||||
extern wgpu::Buffer g_vertexBuffer;
|
||||
extern wgpu::Buffer g_uniformBuffer;
|
||||
extern wgpu::Buffer g_indexBuffer;
|
||||
extern wgpu::Buffer g_storageBuffer;
|
||||
|
||||
using BindGroupRef = HashType;
|
||||
using PipelineRef = HashType;
|
||||
|
@ -148,9 +148,9 @@ void initialize();
|
|||
void shutdown();
|
||||
|
||||
void begin_frame();
|
||||
void end_frame(WGPUCommandEncoder cmd);
|
||||
void render(WGPUCommandEncoder cmd);
|
||||
void render_pass(WGPURenderPassEncoder pass, uint32_t idx);
|
||||
void end_frame(const wgpu::CommandEncoder& cmd);
|
||||
void render(wgpu::CommandEncoder& cmd);
|
||||
void render_pass(const wgpu::RenderPassEncoder& pass, uint32_t idx);
|
||||
void map_staging_buffer();
|
||||
|
||||
Range push_verts(const uint8_t* data, size_t length);
|
||||
|
@ -192,12 +192,12 @@ void merge_draw_command(DrawData data);
|
|||
|
||||
template <typename PipelineConfig>
|
||||
PipelineRef pipeline_ref(PipelineConfig config);
|
||||
bool bind_pipeline(PipelineRef ref, WGPURenderPassEncoder pass);
|
||||
bool bind_pipeline(PipelineRef ref, const wgpu::RenderPassEncoder& pass);
|
||||
|
||||
BindGroupRef bind_group_ref(const WGPUBindGroupDescriptor& descriptor);
|
||||
WGPUBindGroup find_bind_group(BindGroupRef id);
|
||||
BindGroupRef bind_group_ref(const wgpu::BindGroupDescriptor& descriptor);
|
||||
const wgpu::BindGroup& find_bind_group(BindGroupRef id);
|
||||
|
||||
WGPUSampler sampler_ref(const WGPUSamplerDescriptor& descriptor);
|
||||
const wgpu::Sampler& sampler_ref(const wgpu::SamplerDescriptor& descriptor);
|
||||
|
||||
uint32_t align_uniform(uint32_t value);
|
||||
|
||||
|
|
274
lib/gfx/gx.cpp
274
lib/gfx/gx.cpp
|
@ -23,108 +23,108 @@ GXState g_gxState{};
|
|||
|
||||
const TextureBind& get_texture(GXTexMapID id) noexcept { return g_gxState.textures[static_cast<size_t>(id)]; }
|
||||
|
||||
static inline WGPUBlendFactor to_blend_factor(GXBlendFactor fac, bool isDst) {
|
||||
static inline wgpu::BlendFactor to_blend_factor(GXBlendFactor fac, bool isDst) {
|
||||
switch (fac) {
|
||||
case GX_BL_ZERO:
|
||||
return WGPUBlendFactor_Zero;
|
||||
return wgpu::BlendFactor::Zero;
|
||||
case GX_BL_ONE:
|
||||
return WGPUBlendFactor_One;
|
||||
return wgpu::BlendFactor::One;
|
||||
case GX_BL_SRCCLR: // + GX_BL_DSTCLR
|
||||
if (isDst) {
|
||||
return WGPUBlendFactor_Src;
|
||||
return wgpu::BlendFactor::Src;
|
||||
} else {
|
||||
return WGPUBlendFactor_Dst;
|
||||
return wgpu::BlendFactor::Dst;
|
||||
}
|
||||
case GX_BL_INVSRCCLR: // + GX_BL_INVDSTCLR
|
||||
if (isDst) {
|
||||
return WGPUBlendFactor_OneMinusSrc;
|
||||
return wgpu::BlendFactor::OneMinusSrc;
|
||||
} else {
|
||||
return WGPUBlendFactor_OneMinusDst;
|
||||
return wgpu::BlendFactor::OneMinusDst;
|
||||
}
|
||||
case GX_BL_SRCALPHA:
|
||||
return WGPUBlendFactor_SrcAlpha;
|
||||
return wgpu::BlendFactor::SrcAlpha;
|
||||
case GX_BL_INVSRCALPHA:
|
||||
return WGPUBlendFactor_OneMinusSrcAlpha;
|
||||
return wgpu::BlendFactor::OneMinusSrcAlpha;
|
||||
case GX_BL_DSTALPHA:
|
||||
return WGPUBlendFactor_DstAlpha;
|
||||
return wgpu::BlendFactor::DstAlpha;
|
||||
case GX_BL_INVDSTALPHA:
|
||||
return WGPUBlendFactor_OneMinusDstAlpha;
|
||||
return wgpu::BlendFactor::OneMinusDstAlpha;
|
||||
default:
|
||||
Log.report(LOG_FATAL, FMT_STRING("invalid blend factor {}"), fac);
|
||||
unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
static inline WGPUCompareFunction to_compare_function(GXCompare func) {
|
||||
static inline wgpu::CompareFunction to_compare_function(GXCompare func) {
|
||||
switch (func) {
|
||||
case GX_NEVER:
|
||||
return WGPUCompareFunction_Never;
|
||||
return wgpu::CompareFunction::Never;
|
||||
case GX_LESS:
|
||||
return WGPUCompareFunction_Less;
|
||||
return wgpu::CompareFunction::Less;
|
||||
case GX_EQUAL:
|
||||
return WGPUCompareFunction_Equal;
|
||||
return wgpu::CompareFunction::Equal;
|
||||
case GX_LEQUAL:
|
||||
return WGPUCompareFunction_LessEqual;
|
||||
return wgpu::CompareFunction::LessEqual;
|
||||
case GX_GREATER:
|
||||
return WGPUCompareFunction_Greater;
|
||||
return wgpu::CompareFunction::Greater;
|
||||
case GX_NEQUAL:
|
||||
return WGPUCompareFunction_NotEqual;
|
||||
return wgpu::CompareFunction::NotEqual;
|
||||
case GX_GEQUAL:
|
||||
return WGPUCompareFunction_GreaterEqual;
|
||||
return wgpu::CompareFunction::GreaterEqual;
|
||||
case GX_ALWAYS:
|
||||
return WGPUCompareFunction_Always;
|
||||
return wgpu::CompareFunction::Always;
|
||||
default:
|
||||
Log.report(LOG_FATAL, FMT_STRING("invalid depth fn {}"), func);
|
||||
unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
static inline WGPUBlendState to_blend_state(GXBlendMode mode, GXBlendFactor srcFac, GXBlendFactor dstFac, GXLogicOp op,
|
||||
u32 dstAlpha) {
|
||||
WGPUBlendComponent colorBlendComponent;
|
||||
static inline wgpu::BlendState to_blend_state(GXBlendMode mode, GXBlendFactor srcFac, GXBlendFactor dstFac,
|
||||
GXLogicOp op, u32 dstAlpha) {
|
||||
wgpu::BlendComponent colorBlendComponent;
|
||||
switch (mode) {
|
||||
case GX_BM_NONE:
|
||||
colorBlendComponent = {
|
||||
.operation = WGPUBlendOperation_Add,
|
||||
.srcFactor = WGPUBlendFactor_One,
|
||||
.dstFactor = WGPUBlendFactor_Zero,
|
||||
.operation = wgpu::BlendOperation::Add,
|
||||
.srcFactor = wgpu::BlendFactor::One,
|
||||
.dstFactor = wgpu::BlendFactor::Zero,
|
||||
};
|
||||
break;
|
||||
case GX_BM_BLEND:
|
||||
colorBlendComponent = {
|
||||
.operation = WGPUBlendOperation_Add,
|
||||
.operation = wgpu::BlendOperation::Add,
|
||||
.srcFactor = to_blend_factor(srcFac, false),
|
||||
.dstFactor = to_blend_factor(dstFac, true),
|
||||
};
|
||||
break;
|
||||
case GX_BM_SUBTRACT:
|
||||
colorBlendComponent = {
|
||||
.operation = WGPUBlendOperation_ReverseSubtract,
|
||||
.srcFactor = WGPUBlendFactor_One,
|
||||
.dstFactor = WGPUBlendFactor_One,
|
||||
.operation = wgpu::BlendOperation::ReverseSubtract,
|
||||
.srcFactor = wgpu::BlendFactor::One,
|
||||
.dstFactor = wgpu::BlendFactor::One,
|
||||
};
|
||||
break;
|
||||
case GX_BM_LOGIC:
|
||||
switch (op) {
|
||||
case GX_LO_CLEAR:
|
||||
colorBlendComponent = {
|
||||
.operation = WGPUBlendOperation_Add,
|
||||
.srcFactor = WGPUBlendFactor_Zero,
|
||||
.dstFactor = WGPUBlendFactor_Zero,
|
||||
.operation = wgpu::BlendOperation::Add,
|
||||
.srcFactor = wgpu::BlendFactor::Zero,
|
||||
.dstFactor = wgpu::BlendFactor::Zero,
|
||||
};
|
||||
break;
|
||||
case GX_LO_COPY:
|
||||
colorBlendComponent = {
|
||||
.operation = WGPUBlendOperation_Add,
|
||||
.srcFactor = WGPUBlendFactor_One,
|
||||
.dstFactor = WGPUBlendFactor_Zero,
|
||||
.operation = wgpu::BlendOperation::Add,
|
||||
.srcFactor = wgpu::BlendFactor::One,
|
||||
.dstFactor = wgpu::BlendFactor::Zero,
|
||||
};
|
||||
break;
|
||||
case GX_LO_NOOP:
|
||||
colorBlendComponent = {
|
||||
.operation = WGPUBlendOperation_Add,
|
||||
.srcFactor = WGPUBlendFactor_Zero,
|
||||
.dstFactor = WGPUBlendFactor_One,
|
||||
.operation = wgpu::BlendOperation::Add,
|
||||
.srcFactor = wgpu::BlendFactor::Zero,
|
||||
.dstFactor = wgpu::BlendFactor::One,
|
||||
};
|
||||
break;
|
||||
default:
|
||||
|
@ -136,16 +136,16 @@ static inline WGPUBlendState to_blend_state(GXBlendMode mode, GXBlendFactor srcF
|
|||
Log.report(LOG_FATAL, FMT_STRING("unsupported blend mode {}"), mode);
|
||||
unreachable();
|
||||
}
|
||||
WGPUBlendComponent alphaBlendComponent{
|
||||
.operation = WGPUBlendOperation_Add,
|
||||
.srcFactor = WGPUBlendFactor_One,
|
||||
.dstFactor = WGPUBlendFactor_Zero,
|
||||
wgpu::BlendComponent alphaBlendComponent{
|
||||
.operation = wgpu::BlendOperation::Add,
|
||||
.srcFactor = wgpu::BlendFactor::One,
|
||||
.dstFactor = wgpu::BlendFactor::Zero,
|
||||
};
|
||||
if (dstAlpha != UINT32_MAX) {
|
||||
alphaBlendComponent = WGPUBlendComponent{
|
||||
.operation = WGPUBlendOperation_Add,
|
||||
.srcFactor = WGPUBlendFactor_Constant,
|
||||
.dstFactor = WGPUBlendFactor_Zero,
|
||||
alphaBlendComponent = wgpu::BlendComponent{
|
||||
.operation = wgpu::BlendOperation::Add,
|
||||
.srcFactor = wgpu::BlendFactor::Constant,
|
||||
.dstFactor = wgpu::BlendFactor::Zero,
|
||||
};
|
||||
}
|
||||
return {
|
||||
|
@ -154,36 +154,36 @@ static inline WGPUBlendState to_blend_state(GXBlendMode mode, GXBlendFactor srcF
|
|||
};
|
||||
}
|
||||
|
||||
static inline WGPUColorWriteMaskFlags to_write_mask(bool colorUpdate, bool alphaUpdate) {
|
||||
WGPUColorWriteMaskFlags writeMask = WGPUColorWriteMask_None;
|
||||
static inline wgpu::ColorWriteMask to_write_mask(bool colorUpdate, bool alphaUpdate) {
|
||||
wgpu::ColorWriteMask writeMask = wgpu::ColorWriteMask::None;
|
||||
if (colorUpdate) {
|
||||
writeMask |= WGPUColorWriteMask_Red | WGPUColorWriteMask_Green | WGPUColorWriteMask_Blue;
|
||||
writeMask |= wgpu::ColorWriteMask::Red | wgpu::ColorWriteMask::Green | wgpu::ColorWriteMask::Blue;
|
||||
}
|
||||
if (alphaUpdate) {
|
||||
writeMask |= WGPUColorWriteMask_Alpha;
|
||||
writeMask |= wgpu::ColorWriteMask::Alpha;
|
||||
}
|
||||
return writeMask;
|
||||
}
|
||||
|
||||
static inline WGPUPrimitiveState to_primitive_state(GXPrimitive gx_prim, GXCullMode gx_cullMode) {
|
||||
WGPUPrimitiveTopology primitive = WGPUPrimitiveTopology_TriangleList;
|
||||
static inline wgpu::PrimitiveState to_primitive_state(GXPrimitive gx_prim, GXCullMode gx_cullMode) {
|
||||
wgpu::PrimitiveTopology primitive = wgpu::PrimitiveTopology::TriangleList;
|
||||
switch (gx_prim) {
|
||||
case GX_TRIANGLES:
|
||||
break;
|
||||
case GX_TRIANGLESTRIP:
|
||||
primitive = WGPUPrimitiveTopology_TriangleStrip;
|
||||
primitive = wgpu::PrimitiveTopology::TriangleStrip;
|
||||
break;
|
||||
default:
|
||||
Log.report(LOG_FATAL, FMT_STRING("Unsupported primitive type {}"), gx_prim);
|
||||
unreachable();
|
||||
}
|
||||
WGPUCullMode cullMode = WGPUCullMode_None;
|
||||
wgpu::CullMode cullMode = wgpu::CullMode::None;
|
||||
switch (gx_cullMode) {
|
||||
case GX_CULL_FRONT:
|
||||
cullMode = WGPUCullMode_Front;
|
||||
cullMode = wgpu::CullMode::Front;
|
||||
break;
|
||||
case GX_CULL_BACK:
|
||||
cullMode = WGPUCullMode_Back;
|
||||
cullMode = wgpu::CullMode::Back;
|
||||
break;
|
||||
case GX_CULL_NONE:
|
||||
break;
|
||||
|
@ -193,35 +193,35 @@ static inline WGPUPrimitiveState to_primitive_state(GXPrimitive gx_prim, GXCullM
|
|||
}
|
||||
return {
|
||||
.topology = primitive,
|
||||
.frontFace = WGPUFrontFace_CW,
|
||||
.frontFace = wgpu::FrontFace::CW,
|
||||
.cullMode = cullMode,
|
||||
};
|
||||
}
|
||||
|
||||
WGPURenderPipeline build_pipeline(const PipelineConfig& config, const ShaderInfo& info,
|
||||
ArrayRef<WGPUVertexBufferLayout> vtxBuffers, WGPUShaderModule shader,
|
||||
const char* label) noexcept {
|
||||
const WGPUDepthStencilState depthStencil{
|
||||
wgpu::RenderPipeline build_pipeline(const PipelineConfig& config, const ShaderInfo& info,
|
||||
ArrayRef<wgpu::VertexBufferLayout> vtxBuffers, wgpu::ShaderModule shader,
|
||||
const char* label) noexcept {
|
||||
const wgpu::DepthStencilState depthStencil{
|
||||
.format = g_graphicsConfig.depthFormat,
|
||||
.depthWriteEnabled = config.depthUpdate,
|
||||
.depthCompare = to_compare_function(config.depthFunc),
|
||||
.stencilFront =
|
||||
WGPUStencilFaceState{
|
||||
.compare = WGPUCompareFunction_Always,
|
||||
wgpu::StencilFaceState{
|
||||
.compare = wgpu::CompareFunction::Always,
|
||||
},
|
||||
.stencilBack =
|
||||
WGPUStencilFaceState{
|
||||
.compare = WGPUCompareFunction_Always,
|
||||
wgpu::StencilFaceState{
|
||||
.compare = wgpu::CompareFunction::Always,
|
||||
},
|
||||
};
|
||||
const auto blendState =
|
||||
to_blend_state(config.blendMode, config.blendFacSrc, config.blendFacDst, config.blendOp, config.dstAlpha);
|
||||
const std::array colorTargets{WGPUColorTargetState{
|
||||
.format = g_graphicsConfig.colorFormat,
|
||||
const std::array colorTargets{wgpu::ColorTargetState{
|
||||
.format = g_graphicsConfig.swapChainDescriptor.format,
|
||||
.blend = &blendState,
|
||||
.writeMask = to_write_mask(config.colorUpdate, config.alphaUpdate),
|
||||
}};
|
||||
const WGPUFragmentState fragmentState{
|
||||
const wgpu::FragmentState fragmentState{
|
||||
.module = shader,
|
||||
.entryPoint = "fs_main",
|
||||
.targetCount = colorTargets.size(),
|
||||
|
@ -233,13 +233,13 @@ WGPURenderPipeline build_pipeline(const PipelineConfig& config, const ShaderInfo
|
|||
layouts.samplerLayout,
|
||||
layouts.textureLayout,
|
||||
};
|
||||
const WGPUPipelineLayoutDescriptor pipelineLayoutDescriptor{
|
||||
const wgpu::PipelineLayoutDescriptor pipelineLayoutDescriptor{
|
||||
.label = "GX Pipeline Layout",
|
||||
.bindGroupLayoutCount = static_cast<uint32_t>(info.sampledTextures.any() ? bindGroupLayouts.size() : 1),
|
||||
.bindGroupLayouts = bindGroupLayouts.data(),
|
||||
};
|
||||
auto pipelineLayout = wgpuDeviceCreatePipelineLayout(g_device, &pipelineLayoutDescriptor);
|
||||
const WGPURenderPipelineDescriptor descriptor{
|
||||
auto pipelineLayout = g_device.CreatePipelineLayout(&pipelineLayoutDescriptor);
|
||||
const wgpu::RenderPipelineDescriptor descriptor{
|
||||
.label = label,
|
||||
.layout = pipelineLayout,
|
||||
.vertex =
|
||||
|
@ -252,15 +252,13 @@ WGPURenderPipeline build_pipeline(const PipelineConfig& config, const ShaderInfo
|
|||
.primitive = to_primitive_state(config.primitive, config.cullMode),
|
||||
.depthStencil = &depthStencil,
|
||||
.multisample =
|
||||
WGPUMultisampleState{
|
||||
wgpu::MultisampleState{
|
||||
.count = g_graphicsConfig.msaaSamples,
|
||||
.mask = UINT32_MAX,
|
||||
},
|
||||
.fragment = &fragmentState,
|
||||
};
|
||||
auto pipeline = wgpuDeviceCreateRenderPipeline(g_device, &descriptor);
|
||||
wgpuPipelineLayoutRelease(pipelineLayout);
|
||||
return pipeline;
|
||||
return g_device.CreateRenderPipeline(&descriptor);
|
||||
}
|
||||
|
||||
void populate_pipeline_config(PipelineConfig& config, GXPrimitive primitive) noexcept {
|
||||
|
@ -477,15 +475,15 @@ Range build_uniform(const ShaderInfo& info) noexcept {
|
|||
return range;
|
||||
}
|
||||
|
||||
static absl::flat_hash_map<u32, WGPUBindGroupLayout> sUniformBindGroupLayouts;
|
||||
static absl::flat_hash_map<u32, std::pair<WGPUBindGroupLayout, WGPUBindGroupLayout>> sTextureBindGroupLayouts;
|
||||
static absl::flat_hash_map<u32, wgpu::BindGroupLayout> sUniformBindGroupLayouts;
|
||||
static absl::flat_hash_map<u32, std::pair<wgpu::BindGroupLayout, wgpu::BindGroupLayout>> sTextureBindGroupLayouts;
|
||||
|
||||
GXBindGroups build_bind_groups(const ShaderInfo& info, const ShaderConfig& config,
|
||||
const BindGroupRanges& ranges) noexcept {
|
||||
const auto layouts = build_bind_group_layouts(info, config);
|
||||
|
||||
std::array<WGPUBindGroupEntry, GX_VA_MAX_ATTR + 1> uniformEntries{
|
||||
WGPUBindGroupEntry{
|
||||
std::array<wgpu::BindGroupEntry, GX_VA_MAX_ATTR + 1> uniformEntries{
|
||||
wgpu::BindGroupEntry{
|
||||
.binding = 0,
|
||||
.buffer = g_uniformBuffer,
|
||||
.size = info.uniformSize,
|
||||
|
@ -497,7 +495,7 @@ GXBindGroups build_bind_groups(const ShaderInfo& info, const ShaderConfig& confi
|
|||
if (range.size <= 0) {
|
||||
continue;
|
||||
}
|
||||
uniformEntries[uniformBindIdx] = WGPUBindGroupEntry{
|
||||
uniformEntries[uniformBindIdx] = wgpu::BindGroupEntry{
|
||||
.binding = uniformBindIdx,
|
||||
.buffer = g_storageBuffer,
|
||||
.size = range.size,
|
||||
|
@ -505,8 +503,8 @@ GXBindGroups build_bind_groups(const ShaderInfo& info, const ShaderConfig& confi
|
|||
++uniformBindIdx;
|
||||
}
|
||||
|
||||
std::array<WGPUBindGroupEntry, MaxTextures> samplerEntries;
|
||||
std::array<WGPUBindGroupEntry, MaxTextures * 2> textureEntries;
|
||||
std::array<wgpu::BindGroupEntry, MaxTextures> samplerEntries;
|
||||
std::array<wgpu::BindGroupEntry, MaxTextures * 2> textureEntries;
|
||||
u32 samplerCount = 0;
|
||||
u32 textureCount = 0;
|
||||
for (u32 i = 0; i < info.sampledTextures.size(); ++i) {
|
||||
|
@ -547,19 +545,19 @@ GXBindGroups build_bind_groups(const ShaderInfo& info, const ShaderConfig& confi
|
|||
}
|
||||
}
|
||||
return {
|
||||
.uniformBindGroup = bind_group_ref(WGPUBindGroupDescriptor{
|
||||
.uniformBindGroup = bind_group_ref(wgpu::BindGroupDescriptor{
|
||||
.label = "GX Uniform Bind Group",
|
||||
.layout = layouts.uniformLayout,
|
||||
.entryCount = uniformBindIdx,
|
||||
.entries = uniformEntries.data(),
|
||||
}),
|
||||
.samplerBindGroup = bind_group_ref(WGPUBindGroupDescriptor{
|
||||
.samplerBindGroup = bind_group_ref(wgpu::BindGroupDescriptor{
|
||||
.label = "GX Sampler Bind Group",
|
||||
.layout = layouts.samplerLayout,
|
||||
.entryCount = samplerCount,
|
||||
.entries = samplerEntries.data(),
|
||||
}),
|
||||
.textureBindGroup = bind_group_ref(WGPUBindGroupDescriptor{
|
||||
.textureBindGroup = bind_group_ref(wgpu::BindGroupDescriptor{
|
||||
.label = "GX Texture Bind Group",
|
||||
.layout = layouts.textureLayout,
|
||||
.entryCount = textureCount,
|
||||
|
@ -575,13 +573,13 @@ GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const Shader
|
|||
if (uniformIt != sUniformBindGroupLayouts.end()) {
|
||||
out.uniformLayout = uniformIt->second;
|
||||
} else {
|
||||
std::array<WGPUBindGroupLayoutEntry, GX_VA_MAX_ATTR + 1> uniformLayoutEntries{
|
||||
WGPUBindGroupLayoutEntry{
|
||||
std::array<wgpu::BindGroupLayoutEntry, GX_VA_MAX_ATTR + 1> uniformLayoutEntries{
|
||||
wgpu::BindGroupLayoutEntry{
|
||||
.binding = 0,
|
||||
.visibility = WGPUShaderStage_Vertex | WGPUShaderStage_Fragment,
|
||||
.visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment,
|
||||
.buffer =
|
||||
WGPUBufferBindingLayout{
|
||||
.type = WGPUBufferBindingType_Uniform,
|
||||
wgpu::BufferBindingLayout{
|
||||
.type = wgpu::BufferBindingType::Uniform,
|
||||
.hasDynamicOffset = true,
|
||||
.minBindingSize = info.uniformSize,
|
||||
},
|
||||
|
@ -590,24 +588,24 @@ GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const Shader
|
|||
u32 bindIdx = 1;
|
||||
for (int i = 0; i < GX_VA_MAX_ATTR; ++i) {
|
||||
if (config.attrMapping[i] == static_cast<GXAttr>(i)) {
|
||||
uniformLayoutEntries[bindIdx] = WGPUBindGroupLayoutEntry{
|
||||
uniformLayoutEntries[bindIdx] = wgpu::BindGroupLayoutEntry{
|
||||
.binding = bindIdx,
|
||||
.visibility = WGPUShaderStage_Vertex,
|
||||
.visibility = wgpu::ShaderStage::Vertex,
|
||||
.buffer =
|
||||
WGPUBufferBindingLayout{
|
||||
.type = WGPUBufferBindingType_ReadOnlyStorage,
|
||||
wgpu::BufferBindingLayout{
|
||||
.type = wgpu::BufferBindingType::ReadOnlyStorage,
|
||||
.hasDynamicOffset = true,
|
||||
},
|
||||
};
|
||||
++bindIdx;
|
||||
}
|
||||
}
|
||||
const auto uniformLayoutDescriptor = WGPUBindGroupLayoutDescriptor{
|
||||
const auto uniformLayoutDescriptor = wgpu::BindGroupLayoutDescriptor{
|
||||
.label = "GX Uniform Bind Group Layout",
|
||||
.entryCount = bindIdx,
|
||||
.entries = uniformLayoutEntries.data(),
|
||||
};
|
||||
out.uniformLayout = wgpuDeviceCreateBindGroupLayout(g_device, &uniformLayoutDescriptor);
|
||||
out.uniformLayout = g_device.CreateBindGroupLayout(&uniformLayoutDescriptor);
|
||||
// sUniformBindGroupLayouts.try_emplace(uniformSizeKey, out.uniformLayout);
|
||||
}
|
||||
|
||||
|
@ -620,8 +618,8 @@ GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const Shader
|
|||
// } else {
|
||||
u32 numSamplers = 0;
|
||||
u32 numTextures = 0;
|
||||
std::array<WGPUBindGroupLayoutEntry, MaxTextures> samplerEntries;
|
||||
std::array<WGPUBindGroupLayoutEntry, MaxTextures * 2> textureEntries;
|
||||
std::array<wgpu::BindGroupLayoutEntry, MaxTextures> samplerEntries;
|
||||
std::array<wgpu::BindGroupLayoutEntry, MaxTextures * 2> textureEntries;
|
||||
for (u32 i = 0; i < info.sampledTextures.size(); ++i) {
|
||||
if (!info.sampledTextures.test(i)) {
|
||||
continue;
|
||||
|
@ -631,60 +629,60 @@ GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const Shader
|
|||
bool loadAsPalette = is_palette_format(texConfig.loadFmt);
|
||||
samplerEntries[numSamplers] = {
|
||||
.binding = numSamplers,
|
||||
.visibility = WGPUShaderStage_Fragment,
|
||||
.sampler = {.type = copyAsPalette && loadAsPalette ? WGPUSamplerBindingType_NonFiltering
|
||||
: WGPUSamplerBindingType_Filtering},
|
||||
.visibility = wgpu::ShaderStage::Fragment,
|
||||
.sampler = {.type = copyAsPalette && loadAsPalette ? wgpu::SamplerBindingType::NonFiltering
|
||||
: wgpu::SamplerBindingType::Filtering},
|
||||
};
|
||||
++numSamplers;
|
||||
if (loadAsPalette) {
|
||||
textureEntries[numTextures] = {
|
||||
.binding = numTextures,
|
||||
.visibility = WGPUShaderStage_Fragment,
|
||||
.visibility = wgpu::ShaderStage::Fragment,
|
||||
.texture =
|
||||
{
|
||||
.sampleType = copyAsPalette ? WGPUTextureSampleType_Sint : WGPUTextureSampleType_Float,
|
||||
.viewDimension = WGPUTextureViewDimension_2D,
|
||||
.sampleType = copyAsPalette ? wgpu::TextureSampleType::Sint : wgpu::TextureSampleType::Float,
|
||||
.viewDimension = wgpu::TextureViewDimension::e2D,
|
||||
},
|
||||
};
|
||||
++numTextures;
|
||||
textureEntries[numTextures] = {
|
||||
.binding = numTextures,
|
||||
.visibility = WGPUShaderStage_Fragment,
|
||||
.visibility = wgpu::ShaderStage::Fragment,
|
||||
.texture =
|
||||
{
|
||||
.sampleType = WGPUTextureSampleType_Float,
|
||||
.viewDimension = WGPUTextureViewDimension_2D,
|
||||
.sampleType = wgpu::TextureSampleType::Float,
|
||||
.viewDimension = wgpu::TextureViewDimension::e2D,
|
||||
},
|
||||
};
|
||||
++numTextures;
|
||||
} else {
|
||||
textureEntries[numTextures] = {
|
||||
.binding = numTextures,
|
||||
.visibility = WGPUShaderStage_Fragment,
|
||||
.visibility = wgpu::ShaderStage::Fragment,
|
||||
.texture =
|
||||
{
|
||||
.sampleType = WGPUTextureSampleType_Float,
|
||||
.viewDimension = WGPUTextureViewDimension_2D,
|
||||
.sampleType = wgpu::TextureSampleType::Float,
|
||||
.viewDimension = wgpu::TextureViewDimension::e2D,
|
||||
},
|
||||
};
|
||||
++numTextures;
|
||||
}
|
||||
}
|
||||
{
|
||||
const WGPUBindGroupLayoutDescriptor descriptor{
|
||||
const wgpu::BindGroupLayoutDescriptor descriptor{
|
||||
.label = "GX Sampler Bind Group Layout",
|
||||
.entryCount = numSamplers,
|
||||
.entries = samplerEntries.data(),
|
||||
};
|
||||
out.samplerLayout = wgpuDeviceCreateBindGroupLayout(g_device, &descriptor);
|
||||
out.samplerLayout = g_device.CreateBindGroupLayout(&descriptor);
|
||||
}
|
||||
{
|
||||
const WGPUBindGroupLayoutDescriptor descriptor{
|
||||
const wgpu::BindGroupLayoutDescriptor descriptor{
|
||||
.label = "GX Texture Bind Group Layout",
|
||||
.entryCount = numTextures,
|
||||
.entries = textureEntries.data(),
|
||||
};
|
||||
out.textureLayout = wgpuDeviceCreateBindGroupLayout(g_device, &descriptor);
|
||||
out.textureLayout = g_device.CreateBindGroupLayout(&descriptor);
|
||||
}
|
||||
// sTextureBindGroupLayouts.try_emplace(textureCount, out.samplerLayout, out.textureLayout);
|
||||
// }
|
||||
|
@ -692,17 +690,10 @@ GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const Shader
|
|||
}
|
||||
|
||||
// TODO this is awkward
|
||||
extern absl::flat_hash_map<ShaderRef, std::pair<WGPUShaderModule, gx::ShaderInfo>> g_gxCachedShaders;
|
||||
extern absl::flat_hash_map<ShaderRef, std::pair<wgpu::ShaderModule, gx::ShaderInfo>> g_gxCachedShaders;
|
||||
void shutdown() noexcept {
|
||||
// TODO we should probably store this all in g_state.gx instead
|
||||
for (const auto& item : sUniformBindGroupLayouts) {
|
||||
wgpuBindGroupLayoutRelease(item.second);
|
||||
}
|
||||
sUniformBindGroupLayouts.clear();
|
||||
for (const auto& item : sTextureBindGroupLayouts) {
|
||||
wgpuBindGroupLayoutRelease(item.second.first);
|
||||
wgpuBindGroupLayoutRelease(item.second.second);
|
||||
}
|
||||
sTextureBindGroupLayouts.clear();
|
||||
for (auto& item : g_gxState.textures) {
|
||||
item.texObj.ref.reset();
|
||||
|
@ -710,40 +701,37 @@ void shutdown() noexcept {
|
|||
for (auto& item : g_gxState.tluts) {
|
||||
item.ref.reset();
|
||||
}
|
||||
for (const auto& item : g_gxCachedShaders) {
|
||||
wgpuShaderModuleRelease(item.second.first);
|
||||
}
|
||||
g_gxCachedShaders.clear();
|
||||
}
|
||||
} // namespace gx
|
||||
|
||||
static WGPUAddressMode wgpu_address_mode(GXTexWrapMode mode) {
|
||||
static wgpu::AddressMode wgpu_address_mode(GXTexWrapMode mode) {
|
||||
switch (mode) {
|
||||
case GX_CLAMP:
|
||||
return WGPUAddressMode_ClampToEdge;
|
||||
return wgpu::AddressMode::ClampToEdge;
|
||||
case GX_REPEAT:
|
||||
return WGPUAddressMode_Repeat;
|
||||
return wgpu::AddressMode::Repeat;
|
||||
case GX_MIRROR:
|
||||
return WGPUAddressMode_MirrorRepeat;
|
||||
return wgpu::AddressMode::MirrorRepeat;
|
||||
default:
|
||||
Log.report(LOG_FATAL, FMT_STRING("invalid wrap mode {}"), mode);
|
||||
unreachable();
|
||||
}
|
||||
}
|
||||
static std::pair<WGPUFilterMode, WGPUFilterMode> wgpu_filter_mode(GXTexFilter filter) {
|
||||
static std::pair<wgpu::FilterMode, wgpu::FilterMode> wgpu_filter_mode(GXTexFilter filter) {
|
||||
switch (filter) {
|
||||
case GX_NEAR:
|
||||
return {WGPUFilterMode_Nearest, WGPUFilterMode_Linear};
|
||||
return {wgpu::FilterMode::Nearest, wgpu::FilterMode::Linear};
|
||||
case GX_LINEAR:
|
||||
return {WGPUFilterMode_Linear, WGPUFilterMode_Linear};
|
||||
return {wgpu::FilterMode::Linear, wgpu::FilterMode::Linear};
|
||||
case GX_NEAR_MIP_NEAR:
|
||||
return {WGPUFilterMode_Nearest, WGPUFilterMode_Nearest};
|
||||
return {wgpu::FilterMode::Nearest, wgpu::FilterMode::Nearest};
|
||||
case GX_LIN_MIP_NEAR:
|
||||
return {WGPUFilterMode_Linear, WGPUFilterMode_Nearest};
|
||||
return {wgpu::FilterMode::Linear, wgpu::FilterMode::Nearest};
|
||||
case GX_NEAR_MIP_LIN:
|
||||
return {WGPUFilterMode_Nearest, WGPUFilterMode_Linear};
|
||||
return {wgpu::FilterMode::Nearest, wgpu::FilterMode::Linear};
|
||||
case GX_LIN_MIP_LIN:
|
||||
return {WGPUFilterMode_Linear, WGPUFilterMode_Linear};
|
||||
return {wgpu::FilterMode::Linear, wgpu::FilterMode::Linear};
|
||||
default:
|
||||
Log.report(LOG_FATAL, FMT_STRING("invalid filter mode {}"), filter);
|
||||
unreachable();
|
||||
|
@ -762,16 +750,16 @@ static u16 wgpu_aniso(GXAnisotropy aniso) {
|
|||
unreachable();
|
||||
}
|
||||
}
|
||||
WGPUSamplerDescriptor TextureBind::get_descriptor() const noexcept {
|
||||
wgpu::SamplerDescriptor TextureBind::get_descriptor() const noexcept {
|
||||
if (gx::requires_copy_conversion(texObj) && gx::is_palette_format(texObj.ref->gxFormat)) {
|
||||
return {
|
||||
.label = "Generated Non-Filtering Sampler",
|
||||
.addressModeU = wgpu_address_mode(texObj.wrapS),
|
||||
.addressModeV = wgpu_address_mode(texObj.wrapT),
|
||||
.addressModeW = WGPUAddressMode_Repeat,
|
||||
.magFilter = WGPUFilterMode_Nearest,
|
||||
.minFilter = WGPUFilterMode_Nearest,
|
||||
.mipmapFilter = WGPUFilterMode_Nearest,
|
||||
.addressModeW = wgpu::AddressMode::Repeat,
|
||||
.magFilter = wgpu::FilterMode::Nearest,
|
||||
.minFilter = wgpu::FilterMode::Nearest,
|
||||
.mipmapFilter = wgpu::FilterMode::Nearest,
|
||||
.lodMinClamp = 0.f,
|
||||
.lodMaxClamp = 1000.f,
|
||||
.maxAnisotropy = 1,
|
||||
|
@ -783,7 +771,7 @@ WGPUSamplerDescriptor TextureBind::get_descriptor() const noexcept {
|
|||
.label = "Generated Filtering Sampler",
|
||||
.addressModeU = wgpu_address_mode(texObj.wrapS),
|
||||
.addressModeV = wgpu_address_mode(texObj.wrapT),
|
||||
.addressModeW = WGPUAddressMode_Repeat,
|
||||
.addressModeW = wgpu::AddressMode::Repeat,
|
||||
.magFilter = magFilter,
|
||||
.minFilter = minFilter,
|
||||
.mipmapFilter = mipFilter,
|
||||
|
|
|
@ -363,9 +363,9 @@ struct PipelineConfig {
|
|||
static_assert(std::has_unique_object_representations_v<PipelineConfig>);
|
||||
|
||||
struct GXBindGroupLayouts {
|
||||
WGPUBindGroupLayout uniformLayout;
|
||||
WGPUBindGroupLayout samplerLayout;
|
||||
WGPUBindGroupLayout textureLayout;
|
||||
wgpu::BindGroupLayout uniformLayout;
|
||||
wgpu::BindGroupLayout samplerLayout;
|
||||
wgpu::BindGroupLayout textureLayout;
|
||||
};
|
||||
struct GXBindGroups {
|
||||
BindGroupRef uniformBindGroup;
|
||||
|
@ -390,11 +390,11 @@ struct BindGroupRanges {
|
|||
std::array<Range, GX_VA_MAX_ATTR> vaRanges{};
|
||||
};
|
||||
void populate_pipeline_config(PipelineConfig& config, GXPrimitive primitive) noexcept;
|
||||
WGPURenderPipeline build_pipeline(const PipelineConfig& config, const ShaderInfo& info,
|
||||
ArrayRef<WGPUVertexBufferLayout> vtxBuffers, WGPUShaderModule shader,
|
||||
const char* label) noexcept;
|
||||
wgpu::RenderPipeline build_pipeline(const PipelineConfig& config, const ShaderInfo& info,
|
||||
ArrayRef<wgpu::VertexBufferLayout> vtxBuffers, wgpu::ShaderModule shader,
|
||||
const char* label) noexcept;
|
||||
ShaderInfo build_shader_info(const ShaderConfig& config) noexcept;
|
||||
WGPUShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& info) noexcept;
|
||||
wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& info) noexcept;
|
||||
// Range build_vertex_buffer(const GXShaderInfo& info) noexcept;
|
||||
Range build_uniform(const ShaderInfo& info) noexcept;
|
||||
GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const ShaderConfig& config) noexcept;
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include <absl/container/flat_hash_map.h>
|
||||
|
||||
constexpr bool EnableNormalVisualization = false;
|
||||
constexpr bool EnableDebugPrints = true;
|
||||
constexpr bool EnableDebugPrints = false;
|
||||
constexpr bool UsePerPixelLighting = true;
|
||||
|
||||
namespace aurora::gfx::gx {
|
||||
|
@ -16,7 +16,7 @@ using namespace std::string_view_literals;
|
|||
|
||||
static Module Log("aurora::gfx::gx");
|
||||
|
||||
absl::flat_hash_map<ShaderRef, std::pair<WGPUShaderModule, gx::ShaderInfo>> g_gxCachedShaders;
|
||||
absl::flat_hash_map<ShaderRef, std::pair<wgpu::ShaderModule, gx::ShaderInfo>> g_gxCachedShaders;
|
||||
#ifndef NDEBUG
|
||||
static absl::flat_hash_map<ShaderRef, gx::ShaderConfig> g_gxCachedShaderConfigs;
|
||||
#endif
|
||||
|
@ -701,7 +701,7 @@ ShaderInfo build_shader_info(const ShaderConfig& config) noexcept {
|
|||
return info;
|
||||
}
|
||||
|
||||
WGPUShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& info) noexcept {
|
||||
wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& info) noexcept {
|
||||
const auto hash = xxh3_hash(config);
|
||||
const auto it = g_gxCachedShaders.find(hash);
|
||||
if (it != g_gxCachedShaders.end()) {
|
||||
|
@ -1371,16 +1371,14 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {{{8}{7}
|
|||
Log.report(LOG_INFO, FMT_STRING("Generated shader: {}"), shaderSource);
|
||||
}
|
||||
|
||||
const WGPUShaderModuleWGSLDescriptor wgslDescriptor{
|
||||
.chain = {.sType = WGPUSType_ShaderModuleWGSLDescriptor},
|
||||
.source = shaderSource.c_str(),
|
||||
};
|
||||
wgpu::ShaderModuleWGSLDescriptor wgslDescriptor{};
|
||||
wgslDescriptor.source = shaderSource.c_str();
|
||||
const auto label = fmt::format(FMT_STRING("GX Shader {:x}"), hash);
|
||||
const auto shaderDescriptor = WGPUShaderModuleDescriptor{
|
||||
.nextInChain = &wgslDescriptor.chain,
|
||||
const auto shaderDescriptor = wgpu::ShaderModuleDescriptor{
|
||||
.nextInChain = &wgslDescriptor,
|
||||
.label = label.c_str(),
|
||||
};
|
||||
auto shader = wgpuDeviceCreateShaderModule(webgpu::g_device, &shaderDescriptor);
|
||||
auto shader = webgpu::g_device.CreateShaderModule(&shaderDescriptor);
|
||||
|
||||
auto pair = std::make_pair(shader, info);
|
||||
g_gxCachedShaders.emplace(hash, pair);
|
||||
|
|
|
@ -371,11 +371,11 @@ void queue_surface(const u8* dlStart, u32 dlSize) noexcept {
|
|||
|
||||
State construct_state() { return {}; }
|
||||
|
||||
WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config) {
|
||||
wgpu::RenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config) {
|
||||
const auto info = build_shader_info(config.shaderConfig); // TODO remove
|
||||
const auto shader = build_shader(config.shaderConfig, info);
|
||||
|
||||
std::array<WGPUVertexAttribute, gx::MaxVtxAttr> vtxAttrs{};
|
||||
std::array<wgpu::VertexAttribute, gx::MaxVtxAttr> vtxAttrs{};
|
||||
auto [num4xAttr, rem] = std::div(config.shaderConfig.indexedAttributeCount, 4);
|
||||
u32 num2xAttr = 0;
|
||||
if (rem > 2) {
|
||||
|
@ -390,7 +390,7 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
|
|||
// Indexed attributes
|
||||
for (u32 i = 0; i < num4xAttr; ++i) {
|
||||
vtxAttrs[shaderLocation] = {
|
||||
.format = WGPUVertexFormat_Sint16x4,
|
||||
.format = wgpu::VertexFormat::Sint16x4,
|
||||
.offset = offset,
|
||||
.shaderLocation = shaderLocation,
|
||||
};
|
||||
|
@ -399,7 +399,7 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
|
|||
}
|
||||
for (u32 i = 0; i < num2xAttr; ++i) {
|
||||
vtxAttrs[shaderLocation] = {
|
||||
.format = WGPUVertexFormat_Sint16x2,
|
||||
.format = wgpu::VertexFormat::Sint16x2,
|
||||
.offset = offset,
|
||||
.shaderLocation = shaderLocation,
|
||||
};
|
||||
|
@ -417,8 +417,8 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
|
|||
switch (attr) {
|
||||
case GX_VA_POS:
|
||||
case GX_VA_NRM:
|
||||
vtxAttrs[shaderLocation] = WGPUVertexAttribute{
|
||||
.format = WGPUVertexFormat_Float32x3,
|
||||
vtxAttrs[shaderLocation] = wgpu::VertexAttribute{
|
||||
.format = wgpu::VertexFormat::Float32x3,
|
||||
.offset = offset,
|
||||
.shaderLocation = shaderLocation,
|
||||
};
|
||||
|
@ -426,8 +426,8 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
|
|||
break;
|
||||
case GX_VA_CLR0:
|
||||
case GX_VA_CLR1:
|
||||
vtxAttrs[shaderLocation] = WGPUVertexAttribute{
|
||||
.format = WGPUVertexFormat_Float32x4,
|
||||
vtxAttrs[shaderLocation] = wgpu::VertexAttribute{
|
||||
.format = wgpu::VertexFormat::Float32x4,
|
||||
.offset = offset,
|
||||
.shaderLocation = shaderLocation,
|
||||
};
|
||||
|
@ -441,8 +441,8 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
|
|||
case GX_VA_TEX5:
|
||||
case GX_VA_TEX6:
|
||||
case GX_VA_TEX7:
|
||||
vtxAttrs[shaderLocation] = WGPUVertexAttribute{
|
||||
.format = WGPUVertexFormat_Float32x2,
|
||||
vtxAttrs[shaderLocation] = wgpu::VertexAttribute{
|
||||
.format = wgpu::VertexFormat::Float32x2,
|
||||
.offset = offset,
|
||||
.shaderLocation = shaderLocation,
|
||||
};
|
||||
|
@ -454,9 +454,9 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
|
|||
++shaderLocation;
|
||||
}
|
||||
|
||||
const std::array vtxBuffers{WGPUVertexBufferLayout{
|
||||
const std::array vtxBuffers{wgpu::VertexBufferLayout{
|
||||
.arrayStride = offset,
|
||||
.stepMode = WGPUVertexStepMode_Vertex,
|
||||
.stepMode = wgpu::VertexStepMode::Vertex,
|
||||
.attributeCount = shaderLocation,
|
||||
.attributes = vtxAttrs.data(),
|
||||
}};
|
||||
|
@ -464,7 +464,7 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
|
|||
return build_pipeline(config, info, vtxBuffers, shader, "GX Pipeline");
|
||||
}
|
||||
|
||||
void render(const State& state, const DrawData& data, const WGPURenderPassEncoder& pass) {
|
||||
void render(const State& state, const DrawData& data, const wgpu::RenderPassEncoder& pass) {
|
||||
if (!bind_pipeline(data.pipeline, pass)) {
|
||||
return;
|
||||
}
|
||||
|
@ -479,20 +479,18 @@ void render(const State& state, const DrawData& data, const WGPURenderPassEncode
|
|||
offsets[bindIdx] = range.offset;
|
||||
++bindIdx;
|
||||
}
|
||||
wgpuRenderPassEncoderSetBindGroup(pass, 0, find_bind_group(data.bindGroups.uniformBindGroup), bindIdx,
|
||||
offsets.data());
|
||||
pass.SetBindGroup(0, find_bind_group(data.bindGroups.uniformBindGroup), bindIdx, offsets.data());
|
||||
if (data.bindGroups.samplerBindGroup && data.bindGroups.textureBindGroup) {
|
||||
wgpuRenderPassEncoderSetBindGroup(pass, 1, find_bind_group(data.bindGroups.samplerBindGroup), 0, nullptr);
|
||||
wgpuRenderPassEncoderSetBindGroup(pass, 2, find_bind_group(data.bindGroups.textureBindGroup), 0, nullptr);
|
||||
pass.SetBindGroup(1, find_bind_group(data.bindGroups.samplerBindGroup));
|
||||
pass.SetBindGroup(2, find_bind_group(data.bindGroups.textureBindGroup));
|
||||
}
|
||||
wgpuRenderPassEncoderSetVertexBuffer(pass, 0, g_vertexBuffer, data.vertRange.offset, data.vertRange.size);
|
||||
wgpuRenderPassEncoderSetIndexBuffer(pass, g_indexBuffer, WGPUIndexFormat_Uint16, data.idxRange.offset,
|
||||
data.idxRange.size);
|
||||
pass.SetVertexBuffer(0, g_vertexBuffer, data.vertRange.offset, data.vertRange.size);
|
||||
pass.SetIndexBuffer(g_indexBuffer, wgpu::IndexFormat::Uint16, data.idxRange.offset, data.idxRange.size);
|
||||
if (data.dstAlpha != UINT32_MAX) {
|
||||
const WGPUColor color{0.f, 0.f, 0.f, data.dstAlpha / 255.f};
|
||||
wgpuRenderPassEncoderSetBlendConstant(pass, &color);
|
||||
const wgpu::Color color{0.f, 0.f, 0.f, data.dstAlpha / 255.f};
|
||||
pass.SetBlendConstant(&color);
|
||||
}
|
||||
wgpuRenderPassEncoderDrawIndexed(pass, data.indexCount, 1, 0, 0, 0);
|
||||
pass.DrawIndexed(data.indexCount);
|
||||
}
|
||||
} // namespace aurora::gfx::model
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@ struct PipelineConfig : gx::PipelineConfig {};
|
|||
struct State {};
|
||||
|
||||
State construct_state();
|
||||
WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config);
|
||||
void render(const State& state, const DrawData& data, const WGPURenderPassEncoder& pass);
|
||||
wgpu::RenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config);
|
||||
void render(const State& state, const DrawData& data, const wgpu::RenderPassEncoder& pass);
|
||||
|
||||
void queue_surface(const u8* dlStart, u32 dlSize) noexcept;
|
||||
} // namespace aurora::gfx::model
|
||||
|
|
|
@ -7,21 +7,21 @@ static Module Log("aurora::gfx::stream");
|
|||
|
||||
using webgpu::g_device;
|
||||
|
||||
WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config) {
|
||||
wgpu::RenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config) {
|
||||
const auto info = build_shader_info(config.shaderConfig); // TODO remove
|
||||
const auto shader = build_shader(config.shaderConfig, info);
|
||||
|
||||
std::array<WGPUVertexAttribute, 4> attributes{};
|
||||
attributes[0] = WGPUVertexAttribute{
|
||||
.format = WGPUVertexFormat_Float32x3,
|
||||
std::array<wgpu::VertexAttribute, 4> attributes{};
|
||||
attributes[0] = wgpu::VertexAttribute{
|
||||
.format = wgpu::VertexFormat::Float32x3,
|
||||
.offset = 0,
|
||||
.shaderLocation = 0,
|
||||
};
|
||||
uint64_t offset = 12;
|
||||
uint32_t shaderLocation = 1;
|
||||
if (config.shaderConfig.vtxAttrs[GX_VA_NRM] == GX_DIRECT) {
|
||||
attributes[shaderLocation] = WGPUVertexAttribute{
|
||||
.format = WGPUVertexFormat_Float32x3,
|
||||
attributes[shaderLocation] = wgpu::VertexAttribute{
|
||||
.format = wgpu::VertexFormat::Float32x3,
|
||||
.offset = offset,
|
||||
.shaderLocation = shaderLocation,
|
||||
};
|
||||
|
@ -29,8 +29,8 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
|
|||
shaderLocation++;
|
||||
}
|
||||
if (config.shaderConfig.vtxAttrs[GX_VA_CLR0] == GX_DIRECT) {
|
||||
attributes[shaderLocation] = WGPUVertexAttribute{
|
||||
.format = WGPUVertexFormat_Float32x4,
|
||||
attributes[shaderLocation] = wgpu::VertexAttribute{
|
||||
.format = wgpu::VertexFormat::Float32x4,
|
||||
.offset = offset,
|
||||
.shaderLocation = shaderLocation,
|
||||
};
|
||||
|
@ -41,15 +41,15 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
|
|||
if (config.shaderConfig.vtxAttrs[i] != GX_DIRECT) {
|
||||
continue;
|
||||
}
|
||||
attributes[shaderLocation] = WGPUVertexAttribute{
|
||||
.format = WGPUVertexFormat_Float32x2,
|
||||
attributes[shaderLocation] = wgpu::VertexAttribute{
|
||||
.format = wgpu::VertexFormat::Float32x2,
|
||||
.offset = offset,
|
||||
.shaderLocation = shaderLocation,
|
||||
};
|
||||
offset += 8;
|
||||
shaderLocation++;
|
||||
}
|
||||
const std::array vertexBuffers{WGPUVertexBufferLayout{
|
||||
const std::array vertexBuffers{wgpu::VertexBufferLayout{
|
||||
.arrayStride = offset,
|
||||
.attributeCount = shaderLocation,
|
||||
.attributes = attributes.data(),
|
||||
|
@ -60,25 +60,23 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
|
|||
|
||||
State construct_state() { return {}; }
|
||||
|
||||
void render(const State& state, const DrawData& data, const WGPURenderPassEncoder& pass) {
|
||||
void render(const State& state, const DrawData& data, const wgpu::RenderPassEncoder& pass) {
|
||||
if (!bind_pipeline(data.pipeline, pass)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const std::array offsets{data.uniformRange.offset};
|
||||
wgpuRenderPassEncoderSetBindGroup(pass, 0, find_bind_group(data.bindGroups.uniformBindGroup), offsets.size(),
|
||||
offsets.data());
|
||||
pass.SetBindGroup(0, find_bind_group(data.bindGroups.uniformBindGroup), offsets.size(), offsets.data());
|
||||
if (data.bindGroups.samplerBindGroup && data.bindGroups.textureBindGroup) {
|
||||
wgpuRenderPassEncoderSetBindGroup(pass, 1, find_bind_group(data.bindGroups.samplerBindGroup), 0, nullptr);
|
||||
wgpuRenderPassEncoderSetBindGroup(pass, 2, find_bind_group(data.bindGroups.textureBindGroup), 0, nullptr);
|
||||
pass.SetBindGroup(1, find_bind_group(data.bindGroups.samplerBindGroup));
|
||||
pass.SetBindGroup(2, find_bind_group(data.bindGroups.textureBindGroup));
|
||||
}
|
||||
wgpuRenderPassEncoderSetVertexBuffer(pass, 0, g_vertexBuffer, data.vertRange.offset, data.vertRange.size);
|
||||
wgpuRenderPassEncoderSetIndexBuffer(pass, g_indexBuffer, WGPUIndexFormat_Uint16, data.indexRange.offset,
|
||||
data.indexRange.size);
|
||||
pass.SetVertexBuffer(0, g_vertexBuffer, data.vertRange.offset, data.vertRange.size);
|
||||
pass.SetIndexBuffer(g_indexBuffer, wgpu::IndexFormat::Uint16, data.indexRange.offset, data.indexRange.size);
|
||||
if (data.dstAlpha != UINT32_MAX) {
|
||||
const WGPUColor color{0.f, 0.f, 0.f, data.dstAlpha / 255.f};
|
||||
wgpuRenderPassEncoderSetBlendConstant(pass, &color);
|
||||
const wgpu::Color color{0.f, 0.f, 0.f, data.dstAlpha / 255.f};
|
||||
pass.SetBlendConstant(&color);
|
||||
}
|
||||
wgpuRenderPassEncoderDrawIndexed(pass, data.indexCount, 1, 0, 0, 0);
|
||||
pass.DrawIndexed(data.indexCount);
|
||||
}
|
||||
} // namespace aurora::gfx::stream
|
||||
|
|
|
@ -19,6 +19,6 @@ struct PipelineConfig : public gx::PipelineConfig {};
|
|||
struct State {};
|
||||
|
||||
State construct_state();
|
||||
WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config);
|
||||
void render(const State& state, const DrawData& data, const WGPURenderPassEncoder& pass);
|
||||
wgpu::RenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config);
|
||||
void render(const State& state, const DrawData& data, const wgpu::RenderPassEncoder& pass);
|
||||
} // namespace aurora::gfx::stream
|
||||
|
|
|
@ -19,30 +19,30 @@ struct TextureFormatInfo {
|
|||
uint8_t blockSize;
|
||||
bool compressed;
|
||||
};
|
||||
static TextureFormatInfo format_info(WGPUTextureFormat format) {
|
||||
static TextureFormatInfo format_info(wgpu::TextureFormat format) {
|
||||
switch (format) {
|
||||
case WGPUTextureFormat_R8Unorm:
|
||||
case wgpu::TextureFormat::R8Unorm:
|
||||
return {1, 1, 1, false};
|
||||
case WGPUTextureFormat_R16Sint:
|
||||
case wgpu::TextureFormat::R16Sint:
|
||||
return {1, 1, 2, false};
|
||||
case WGPUTextureFormat_RGBA8Unorm:
|
||||
case WGPUTextureFormat_R32Float:
|
||||
case wgpu::TextureFormat::RGBA8Unorm:
|
||||
case wgpu::TextureFormat::R32Float:
|
||||
return {1, 1, 4, false};
|
||||
case WGPUTextureFormat_BC1RGBAUnorm:
|
||||
case wgpu::TextureFormat::BC1RGBAUnorm:
|
||||
return {4, 4, 8, true};
|
||||
default:
|
||||
Log.report(LOG_FATAL, FMT_STRING("format_info: unimplemented format {}"), magic_enum::enum_name(format));
|
||||
unreachable();
|
||||
}
|
||||
}
|
||||
static WGPUExtent3D physical_size(WGPUExtent3D size, TextureFormatInfo info) {
|
||||
static wgpu::Extent3D physical_size(wgpu::Extent3D size, TextureFormatInfo info) {
|
||||
const uint32_t width = ((size.width + info.blockWidth - 1) / info.blockWidth) * info.blockWidth;
|
||||
const uint32_t height = ((size.height + info.blockHeight - 1) / info.blockHeight) * info.blockHeight;
|
||||
return {width, height, size.depthOrArrayLayers};
|
||||
}
|
||||
|
||||
TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format,
|
||||
ArrayRef<uint8_t> data, const char* label) noexcept {
|
||||
TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format, ArrayRef<uint8_t> data,
|
||||
const char* label) noexcept {
|
||||
auto handle = new_dynamic_texture_2d(width, height, mips, format, label);
|
||||
const auto& ref = *handle;
|
||||
|
||||
|
@ -56,7 +56,7 @@ TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mi
|
|||
|
||||
uint32_t offset = 0;
|
||||
for (uint32_t mip = 0; mip < mips; ++mip) {
|
||||
const WGPUExtent3D mipSize{
|
||||
const wgpu::Extent3D mipSize{
|
||||
.width = std::max(ref.size.width >> mip, 1u),
|
||||
.height = std::max(ref.size.height >> mip, 1u),
|
||||
.depthOrArrayLayers = ref.size.depthOrArrayLayers,
|
||||
|
@ -72,19 +72,19 @@ TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mi
|
|||
offset + dataSize, data.size());
|
||||
unreachable();
|
||||
}
|
||||
const WGPUImageCopyTexture dstView{
|
||||
const wgpu::ImageCopyTexture dstView{
|
||||
.texture = ref.texture,
|
||||
.mipLevel = mip,
|
||||
};
|
||||
// const auto range = push_texture_data(data.data() + offset, dataSize, bytesPerRow, heightBlocks);
|
||||
const WGPUTextureDataLayout dataLayout{
|
||||
const wgpu::TextureDataLayout dataLayout{
|
||||
// .offset = range.offset,
|
||||
.bytesPerRow = bytesPerRow,
|
||||
.rowsPerImage = heightBlocks,
|
||||
};
|
||||
// TODO
|
||||
// g_textureUploads.emplace_back(dataLayout, std::move(dstView), physicalSize);
|
||||
wgpuQueueWriteTexture(g_queue, &dstView, data.data() + offset, dataSize, &dataLayout, &physicalSize);
|
||||
g_queue.WriteTexture(&dstView, data.data() + offset, dataSize, &dataLayout, &physicalSize);
|
||||
offset += dataSize;
|
||||
}
|
||||
if (data.size() != UINT32_MAX && offset < data.size()) {
|
||||
|
@ -97,60 +97,61 @@ TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mi
|
|||
TextureHandle new_dynamic_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format,
|
||||
const char* label) noexcept {
|
||||
const auto wgpuFormat = to_wgpu(format);
|
||||
const WGPUExtent3D size{
|
||||
const wgpu::Extent3D size{
|
||||
.width = width,
|
||||
.height = height,
|
||||
.depthOrArrayLayers = 1,
|
||||
};
|
||||
const WGPUTextureDescriptor textureDescriptor{
|
||||
const wgpu::TextureDescriptor textureDescriptor{
|
||||
.label = label,
|
||||
.usage = WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopyDst,
|
||||
.dimension = WGPUTextureDimension_2D,
|
||||
.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst,
|
||||
.dimension = wgpu::TextureDimension::e2D,
|
||||
.size = size,
|
||||
.format = wgpuFormat,
|
||||
.mipLevelCount = mips,
|
||||
.sampleCount = 1,
|
||||
};
|
||||
const auto viewLabel = fmt::format(FMT_STRING("{} view"), label);
|
||||
const WGPUTextureViewDescriptor textureViewDescriptor{
|
||||
const wgpu::TextureViewDescriptor textureViewDescriptor{
|
||||
.label = viewLabel.c_str(),
|
||||
.format = wgpuFormat,
|
||||
.dimension = WGPUTextureViewDimension_2D,
|
||||
.dimension = wgpu::TextureViewDimension::e2D,
|
||||
.mipLevelCount = mips,
|
||||
.arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED,
|
||||
};
|
||||
auto texture = wgpuDeviceCreateTexture(g_device, &textureDescriptor);
|
||||
auto textureView = wgpuTextureCreateView(texture, &textureViewDescriptor);
|
||||
return std::make_shared<TextureRef>(texture, textureView, size, wgpuFormat, mips, format, false);
|
||||
auto texture = g_device.CreateTexture(&textureDescriptor);
|
||||
auto textureView = texture.CreateView(&textureViewDescriptor);
|
||||
return std::make_shared<TextureRef>(std::move(texture), std::move(textureView), size, wgpuFormat, mips, format,
|
||||
false);
|
||||
}
|
||||
|
||||
TextureHandle new_render_texture(uint32_t width, uint32_t height, u32 fmt, const char* label) noexcept {
|
||||
const auto wgpuFormat = webgpu::g_graphicsConfig.colorFormat;
|
||||
const WGPUExtent3D size{
|
||||
const auto wgpuFormat = webgpu::g_graphicsConfig.swapChainDescriptor.format;
|
||||
const wgpu::Extent3D size{
|
||||
.width = width,
|
||||
.height = height,
|
||||
.depthOrArrayLayers = 1,
|
||||
};
|
||||
const WGPUTextureDescriptor textureDescriptor{
|
||||
const wgpu::TextureDescriptor textureDescriptor{
|
||||
.label = label,
|
||||
.usage = WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopyDst,
|
||||
.dimension = WGPUTextureDimension_2D,
|
||||
.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst,
|
||||
.dimension = wgpu::TextureDimension::e2D,
|
||||
.size = size,
|
||||
.format = wgpuFormat,
|
||||
.mipLevelCount = 1,
|
||||
.sampleCount = 1,
|
||||
};
|
||||
const auto viewLabel = fmt::format(FMT_STRING("{} view"), label);
|
||||
const WGPUTextureViewDescriptor textureViewDescriptor{
|
||||
const wgpu::TextureViewDescriptor textureViewDescriptor{
|
||||
.label = viewLabel.c_str(),
|
||||
.format = wgpuFormat,
|
||||
.dimension = WGPUTextureViewDimension_2D,
|
||||
.dimension = wgpu::TextureViewDimension::e2D,
|
||||
.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED,
|
||||
.arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED,
|
||||
};
|
||||
auto texture = wgpuDeviceCreateTexture(g_device, &textureDescriptor);
|
||||
auto textureView = wgpuTextureCreateView(texture, &textureViewDescriptor);
|
||||
return std::make_shared<TextureRef>(texture, textureView, size, wgpuFormat, 1, fmt, true);
|
||||
auto texture = g_device.CreateTexture(&textureDescriptor);
|
||||
auto textureView = texture.CreateView(&textureViewDescriptor);
|
||||
return std::make_shared<TextureRef>(std::move(texture), std::move(textureView), size, wgpuFormat, 1, fmt, true);
|
||||
}
|
||||
|
||||
void write_texture(const TextureRef& ref, ArrayRef<uint8_t> data) noexcept {
|
||||
|
@ -164,7 +165,7 @@ void write_texture(const TextureRef& ref, ArrayRef<uint8_t> data) noexcept {
|
|||
|
||||
uint32_t offset = 0;
|
||||
for (uint32_t mip = 0; mip < ref.mipCount; ++mip) {
|
||||
const WGPUExtent3D mipSize{
|
||||
const wgpu::Extent3D mipSize{
|
||||
.width = std::max(ref.size.width >> mip, 1u),
|
||||
.height = std::max(ref.size.height >> mip, 1u),
|
||||
.depthOrArrayLayers = ref.size.depthOrArrayLayers,
|
||||
|
@ -180,26 +181,26 @@ void write_texture(const TextureRef& ref, ArrayRef<uint8_t> data) noexcept {
|
|||
data.size());
|
||||
unreachable();
|
||||
}
|
||||
// auto dstView = WGPUImageCopyTexture{
|
||||
// auto dstView = wgpu::ImageCopyTexture{
|
||||
// .texture = ref.texture,
|
||||
// .mipLevel = mip,
|
||||
// };
|
||||
// const auto range = push_texture_data(data.data() + offset, dataSize, bytesPerRow, heightBlocks);
|
||||
// const auto dataLayout = WGPUTextureDataLayout{
|
||||
// const auto dataLayout = wgpu::TextureDataLayout{
|
||||
// .offset = range.offset,
|
||||
// .bytesPerRow = bytesPerRow,
|
||||
// .rowsPerImage = heightBlocks,
|
||||
// };
|
||||
// g_textureUploads.emplace_back(dataLayout, std::move(dstView), physicalSize);
|
||||
const WGPUImageCopyTexture dstView{
|
||||
const wgpu::ImageCopyTexture dstView{
|
||||
.texture = ref.texture,
|
||||
.mipLevel = mip,
|
||||
};
|
||||
const WGPUTextureDataLayout dataLayout{
|
||||
const wgpu::TextureDataLayout dataLayout{
|
||||
.bytesPerRow = bytesPerRow,
|
||||
.rowsPerImage = heightBlocks,
|
||||
};
|
||||
wgpuQueueWriteTexture(g_queue, &dstView, data.data() + offset, dataSize, &dataLayout, &physicalSize);
|
||||
g_queue.WriteTexture(&dstView, data.data() + offset, dataSize, &dataLayout, &physicalSize);
|
||||
offset += dataSize;
|
||||
}
|
||||
if (data.size() != UINT32_MAX && offset < data.size()) {
|
||||
|
|
|
@ -5,45 +5,40 @@
|
|||
|
||||
namespace aurora::gfx {
|
||||
struct TextureUpload {
|
||||
WGPUTextureDataLayout layout;
|
||||
WGPUImageCopyTexture tex;
|
||||
WGPUExtent3D size;
|
||||
wgpu::TextureDataLayout layout;
|
||||
wgpu::ImageCopyTexture tex;
|
||||
wgpu::Extent3D size;
|
||||
|
||||
TextureUpload(WGPUTextureDataLayout layout, WGPUImageCopyTexture tex, WGPUExtent3D size) noexcept
|
||||
TextureUpload(wgpu::TextureDataLayout layout, wgpu::ImageCopyTexture tex, wgpu::Extent3D size) noexcept
|
||||
: layout(layout), tex(tex), size(size) {}
|
||||
};
|
||||
extern std::vector<TextureUpload> g_textureUploads;
|
||||
|
||||
constexpr u32 InvalidTextureFormat = -1;
|
||||
struct TextureRef {
|
||||
WGPUTexture texture;
|
||||
WGPUTextureView view;
|
||||
WGPUExtent3D size;
|
||||
WGPUTextureFormat format;
|
||||
wgpu::Texture texture;
|
||||
wgpu::TextureView view;
|
||||
wgpu::Extent3D size;
|
||||
wgpu::TextureFormat format;
|
||||
uint32_t mipCount;
|
||||
u32 gxFormat;
|
||||
bool isRenderTexture; // :shrug: for now
|
||||
|
||||
TextureRef(WGPUTexture texture, WGPUTextureView view, WGPUExtent3D size, WGPUTextureFormat format, uint32_t mipCount,
|
||||
u32 gxFormat, bool isRenderTexture)
|
||||
: texture(texture)
|
||||
, view(view)
|
||||
TextureRef(wgpu::Texture texture, wgpu::TextureView view, wgpu::Extent3D size, wgpu::TextureFormat format,
|
||||
uint32_t mipCount, u32 gxFormat, bool isRenderTexture)
|
||||
: texture(std::move(texture))
|
||||
, view(std::move(view))
|
||||
, size(size)
|
||||
, format(format)
|
||||
, mipCount(mipCount)
|
||||
, gxFormat(gxFormat)
|
||||
, isRenderTexture(isRenderTexture) {}
|
||||
|
||||
~TextureRef() {
|
||||
wgpuTextureViewRelease(view);
|
||||
wgpuTextureRelease(texture);
|
||||
}
|
||||
};
|
||||
|
||||
using TextureHandle = std::shared_ptr<TextureRef>;
|
||||
|
||||
TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format,
|
||||
ArrayRef<uint8_t> data, const char* label) noexcept;
|
||||
TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format, ArrayRef<uint8_t> data,
|
||||
const char* label) noexcept;
|
||||
TextureHandle new_dynamic_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format,
|
||||
const char* label) noexcept;
|
||||
TextureHandle new_render_texture(uint32_t width, uint32_t height, u32 fmt, const char* label) noexcept;
|
||||
|
@ -84,7 +79,7 @@ struct TextureBind {
|
|||
TextureBind() noexcept = default;
|
||||
TextureBind(GXTexObj_ obj) noexcept : texObj(std::move(obj)) {}
|
||||
void reset() noexcept { texObj.ref.reset(); };
|
||||
[[nodiscard]] WGPUSamplerDescriptor get_descriptor() const noexcept;
|
||||
[[nodiscard]] wgpu::SamplerDescriptor get_descriptor() const noexcept;
|
||||
operator bool() const noexcept { return texObj.ref.operator bool(); }
|
||||
};
|
||||
} // namespace aurora::gfx
|
||||
|
|
|
@ -597,7 +597,7 @@ ByteBuffer convert_texture(u32 format, uint32_t width, uint32_t height, uint32_t
|
|||
case GX_TF_RGBA8:
|
||||
return BuildRGBA8FromGCN(width, height, mips, data);
|
||||
case GX_TF_CMPR:
|
||||
if (wgpuDeviceHasFeature(webgpu::g_device, WGPUFeatureName_TextureCompressionBC)) {
|
||||
if (webgpu::g_device.HasFeature(wgpu::FeatureName::TextureCompressionBC)) {
|
||||
return BuildDXT1FromGCN(width, height, mips, data);
|
||||
} else {
|
||||
return BuildRGBA8FromCMPR(width, height, mips, data);
|
||||
|
|
|
@ -5,23 +5,23 @@
|
|||
#include "../webgpu/gpu.hpp"
|
||||
|
||||
namespace aurora::gfx {
|
||||
static WGPUTextureFormat to_wgpu(u32 format) {
|
||||
static wgpu::TextureFormat to_wgpu(u32 format) {
|
||||
switch (format) {
|
||||
case GX_TF_I4:
|
||||
case GX_TF_I8:
|
||||
case GX_TF_R8_PC:
|
||||
return WGPUTextureFormat_R8Unorm;
|
||||
return wgpu::TextureFormat::R8Unorm;
|
||||
case GX_TF_C4:
|
||||
case GX_TF_C8:
|
||||
case GX_TF_C14X2:
|
||||
return WGPUTextureFormat_R16Sint;
|
||||
return wgpu::TextureFormat::R16Sint;
|
||||
case GX_TF_CMPR:
|
||||
if (wgpuDeviceHasFeature(webgpu::g_device, WGPUFeatureName_TextureCompressionBC)) {
|
||||
return WGPUTextureFormat_BC1RGBAUnorm;
|
||||
if (webgpu::g_device.HasFeature(wgpu::FeatureName::TextureCompressionBC)) {
|
||||
return wgpu::TextureFormat::BC1RGBAUnorm;
|
||||
}
|
||||
[[fallthrough]];
|
||||
default:
|
||||
return WGPUTextureFormat_RGBA8Unorm;
|
||||
return wgpu::TextureFormat::RGBA8Unorm;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ static std::string g_imguiLog{};
|
|||
static bool g_useSdlRenderer = false;
|
||||
|
||||
static std::vector<SDL_Texture*> g_sdlTextures;
|
||||
static std::vector<WGPUTexture> g_wgpuTextures;
|
||||
static std::vector<wgpu::Texture> g_wgpuTextures;
|
||||
|
||||
void create_context() noexcept {
|
||||
IMGUI_CHECKVERSION();
|
||||
|
@ -41,7 +41,8 @@ void initialize() noexcept {
|
|||
if (g_useSdlRenderer) {
|
||||
ImGui_ImplSDLRenderer_Init(renderer);
|
||||
} else {
|
||||
ImGui_ImplWGPU_Init(webgpu::g_device, 1, webgpu::g_graphicsConfig.colorFormat);
|
||||
const auto format = webgpu::g_graphicsConfig.swapChainDescriptor.format;
|
||||
ImGui_ImplWGPU_Init(webgpu::g_device.Get(), 1, static_cast<WGPUTextureFormat>(format));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,9 +58,6 @@ void shutdown() noexcept {
|
|||
SDL_DestroyTexture(texture);
|
||||
}
|
||||
g_sdlTextures.clear();
|
||||
for (const auto& texture : g_wgpuTextures) {
|
||||
wgpuTextureDestroy(texture);
|
||||
}
|
||||
g_wgpuTextures.clear();
|
||||
}
|
||||
|
||||
|
@ -99,7 +97,7 @@ void new_frame(const AuroraWindowSize& size) noexcept {
|
|||
ImGui::NewFrame();
|
||||
}
|
||||
|
||||
void render(WGPURenderPassEncoder pass) noexcept {
|
||||
void render(const wgpu::RenderPassEncoder& pass) noexcept {
|
||||
ImGui::Render();
|
||||
|
||||
auto* data = ImGui::GetDrawData();
|
||||
|
@ -111,7 +109,7 @@ void render(WGPURenderPassEncoder pass) noexcept {
|
|||
ImGui_ImplSDLRenderer_RenderDrawData(data);
|
||||
SDL_RenderPresent(renderer);
|
||||
} else {
|
||||
ImGui_ImplWGPU_RenderDrawData(data, pass);
|
||||
ImGui_ImplWGPU_RenderDrawData(data, pass.Get());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -124,41 +122,41 @@ ImTextureID add_texture(uint32_t width, uint32_t height, const uint8_t* data) no
|
|||
g_sdlTextures.push_back(texture);
|
||||
return texture;
|
||||
}
|
||||
const auto size = WGPUExtent3D{
|
||||
const wgpu::Extent3D size{
|
||||
.width = width,
|
||||
.height = height,
|
||||
.depthOrArrayLayers = 1,
|
||||
};
|
||||
const auto textureDescriptor = WGPUTextureDescriptor{
|
||||
const wgpu::TextureDescriptor textureDescriptor{
|
||||
.label = "imgui texture",
|
||||
.usage = WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopyDst,
|
||||
.dimension = WGPUTextureDimension_2D,
|
||||
.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst,
|
||||
.dimension = wgpu::TextureDimension::e2D,
|
||||
.size = size,
|
||||
.format = WGPUTextureFormat_RGBA8Unorm,
|
||||
.format = wgpu::TextureFormat::RGBA8Unorm,
|
||||
.mipLevelCount = 1,
|
||||
.sampleCount = 1,
|
||||
};
|
||||
const auto textureViewDescriptor = WGPUTextureViewDescriptor{
|
||||
const wgpu::TextureViewDescriptor textureViewDescriptor{
|
||||
.label = "imgui texture view",
|
||||
.format = WGPUTextureFormat_RGBA8Unorm,
|
||||
.dimension = WGPUTextureViewDimension_2D,
|
||||
.format = wgpu::TextureFormat::RGBA8Unorm,
|
||||
.dimension = wgpu::TextureViewDimension::e2D,
|
||||
.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED,
|
||||
.arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED,
|
||||
};
|
||||
auto texture = wgpuDeviceCreateTexture(webgpu::g_device, &textureDescriptor);
|
||||
auto textureView = wgpuTextureCreateView(texture, &textureViewDescriptor);
|
||||
auto texture = webgpu::g_device.CreateTexture(&textureDescriptor);
|
||||
auto textureView = texture.CreateView(&textureViewDescriptor);
|
||||
{
|
||||
const auto dstView = WGPUImageCopyTexture{
|
||||
const wgpu::ImageCopyTexture dstView{
|
||||
.texture = texture,
|
||||
};
|
||||
const auto dataLayout = WGPUTextureDataLayout{
|
||||
const wgpu::TextureDataLayout dataLayout{
|
||||
.bytesPerRow = 4 * width,
|
||||
.rowsPerImage = height,
|
||||
};
|
||||
wgpuQueueWriteTexture(webgpu::g_queue, &dstView, data, width * height * 4, &dataLayout, &size);
|
||||
webgpu::g_queue.WriteTexture(&dstView, data, width * height * 4, &dataLayout, &size);
|
||||
}
|
||||
g_wgpuTextures.push_back(texture);
|
||||
return textureView;
|
||||
return textureView.Release();
|
||||
}
|
||||
} // namespace aurora::imgui
|
||||
|
||||
|
|
|
@ -5,7 +5,10 @@
|
|||
#include <string_view>
|
||||
|
||||
union SDL_Event;
|
||||
typedef struct WGPURenderPassEncoderImpl* WGPURenderPassEncoder;
|
||||
|
||||
namespace wgpu {
|
||||
class RenderPassEncoder;
|
||||
} // namespace wgpu
|
||||
|
||||
namespace aurora::imgui {
|
||||
void create_context() noexcept;
|
||||
|
@ -14,5 +17,5 @@ void shutdown() noexcept;
|
|||
|
||||
void process_event(const SDL_Event& event) noexcept;
|
||||
void new_frame(const AuroraWindowSize& size) noexcept;
|
||||
void render(WGPURenderPassEncoder pass) noexcept;
|
||||
void render(const wgpu::RenderPassEncoder& pass) noexcept;
|
||||
} // namespace aurora::imgui
|
||||
|
|
|
@ -6,140 +6,149 @@
|
|||
#include "../internal.hpp"
|
||||
|
||||
#include <SDL.h>
|
||||
#include <dawn/native/DawnNative.h>
|
||||
#include <magic_enum.hpp>
|
||||
#include <memory>
|
||||
#include <algorithm>
|
||||
|
||||
#ifdef WEBGPU_DAWN
|
||||
#include <dawn/native/DawnNative.h>
|
||||
#include "../dawn/BackendBinding.hpp"
|
||||
#endif
|
||||
|
||||
namespace aurora::webgpu {
|
||||
static Module Log("aurora::gpu");
|
||||
|
||||
WGPUDevice g_device;
|
||||
WGPUQueue g_queue;
|
||||
WGPUSwapChain g_swapChain;
|
||||
WGPUBackendType g_backendType;
|
||||
wgpu::Device g_device;
|
||||
wgpu::Queue g_queue;
|
||||
wgpu::SwapChain g_swapChain;
|
||||
wgpu::BackendType g_backendType;
|
||||
GraphicsConfig g_graphicsConfig;
|
||||
TextureWithSampler g_frameBuffer;
|
||||
TextureWithSampler g_frameBufferResolved;
|
||||
TextureWithSampler g_depthBuffer;
|
||||
|
||||
// EFB -> XFB copy pipeline
|
||||
static WGPUBindGroupLayout g_CopyBindGroupLayout;
|
||||
WGPURenderPipeline g_CopyPipeline;
|
||||
WGPUBindGroup g_CopyBindGroup;
|
||||
static wgpu::BindGroupLayout g_CopyBindGroupLayout;
|
||||
wgpu::RenderPipeline g_CopyPipeline;
|
||||
wgpu::BindGroup g_CopyBindGroup;
|
||||
|
||||
static std::unique_ptr<dawn::native::Instance> g_Instance;
|
||||
static dawn::native::Adapter g_Adapter;
|
||||
static WGPUAdapterProperties g_AdapterProperties;
|
||||
static std::unique_ptr<utils::BackendBinding> g_BackendBinding;
|
||||
#ifdef WEBGPU_DAWN
|
||||
static std::unique_ptr<dawn::native::Instance> g_dawnInstance;
|
||||
static dawn::native::Adapter g_adapter;
|
||||
static std::unique_ptr<utils::BackendBinding> g_backendBinding;
|
||||
#else
|
||||
wgpu::Instance g_instance;
|
||||
static wgpu::Adapter g_adapter;
|
||||
#endif
|
||||
static wgpu::Surface g_surface;
|
||||
static wgpu::AdapterProperties g_adapterProperties;
|
||||
|
||||
TextureWithSampler create_render_texture(bool multisampled) {
|
||||
const WGPUExtent3D size{
|
||||
.width = g_graphicsConfig.width,
|
||||
.height = g_graphicsConfig.height,
|
||||
const wgpu::Extent3D size{
|
||||
.width = g_graphicsConfig.swapChainDescriptor.width,
|
||||
.height = g_graphicsConfig.swapChainDescriptor.height,
|
||||
.depthOrArrayLayers = 1,
|
||||
};
|
||||
const auto format = g_graphicsConfig.colorFormat;
|
||||
const auto format = g_graphicsConfig.swapChainDescriptor.format;
|
||||
uint32_t sampleCount = 1;
|
||||
if (multisampled) {
|
||||
sampleCount = g_graphicsConfig.msaaSamples;
|
||||
}
|
||||
const WGPUTextureDescriptor textureDescriptor{
|
||||
const wgpu::TextureDescriptor textureDescriptor{
|
||||
.label = "Render texture",
|
||||
.usage = WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopySrc |
|
||||
WGPUTextureUsage_CopyDst,
|
||||
.dimension = WGPUTextureDimension_2D,
|
||||
.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
|
||||
wgpu::TextureUsage::CopyDst,
|
||||
.dimension = wgpu::TextureDimension::e2D,
|
||||
.size = size,
|
||||
.format = format,
|
||||
.mipLevelCount = 1,
|
||||
.sampleCount = sampleCount,
|
||||
};
|
||||
auto texture = wgpuDeviceCreateTexture(g_device, &textureDescriptor);
|
||||
auto texture = g_device.CreateTexture(&textureDescriptor);
|
||||
|
||||
const WGPUTextureViewDescriptor viewDescriptor{
|
||||
.dimension = WGPUTextureViewDimension_2D,
|
||||
const wgpu::TextureViewDescriptor viewDescriptor{
|
||||
.label = "Render texture view",
|
||||
.dimension = wgpu::TextureViewDimension::e2D,
|
||||
.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED,
|
||||
.arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED,
|
||||
};
|
||||
auto view = wgpuTextureCreateView(texture, &viewDescriptor);
|
||||
auto view = texture.CreateView(&viewDescriptor);
|
||||
|
||||
const WGPUSamplerDescriptor samplerDescriptor{
|
||||
const wgpu::SamplerDescriptor samplerDescriptor{
|
||||
.label = "Render sampler",
|
||||
.addressModeU = WGPUAddressMode_ClampToEdge,
|
||||
.addressModeV = WGPUAddressMode_ClampToEdge,
|
||||
.addressModeW = WGPUAddressMode_ClampToEdge,
|
||||
.magFilter = WGPUFilterMode_Linear,
|
||||
.minFilter = WGPUFilterMode_Linear,
|
||||
.mipmapFilter = WGPUFilterMode_Linear,
|
||||
.addressModeU = wgpu::AddressMode::ClampToEdge,
|
||||
.addressModeV = wgpu::AddressMode::ClampToEdge,
|
||||
.addressModeW = wgpu::AddressMode::ClampToEdge,
|
||||
.magFilter = wgpu::FilterMode::Linear,
|
||||
.minFilter = wgpu::FilterMode::Linear,
|
||||
.mipmapFilter = wgpu::FilterMode::Linear,
|
||||
.lodMinClamp = 0.f,
|
||||
.lodMaxClamp = 1000.f,
|
||||
.maxAnisotropy = 1,
|
||||
};
|
||||
auto sampler = wgpuDeviceCreateSampler(g_device, &samplerDescriptor);
|
||||
auto sampler = g_device.CreateSampler(&samplerDescriptor);
|
||||
|
||||
return {
|
||||
.texture{texture},
|
||||
.view{view},
|
||||
.texture = std::move(texture),
|
||||
.view = std::move(view),
|
||||
.size = size,
|
||||
.format = format,
|
||||
.sampler{sampler},
|
||||
.sampler = std::move(sampler),
|
||||
};
|
||||
}
|
||||
|
||||
static TextureWithSampler create_depth_texture() {
|
||||
const WGPUExtent3D size{
|
||||
.width = g_graphicsConfig.width,
|
||||
.height = g_graphicsConfig.height,
|
||||
const wgpu::Extent3D size{
|
||||
.width = g_graphicsConfig.swapChainDescriptor.width,
|
||||
.height = g_graphicsConfig.swapChainDescriptor.height,
|
||||
.depthOrArrayLayers = 1,
|
||||
};
|
||||
const auto format = g_graphicsConfig.depthFormat;
|
||||
const WGPUTextureDescriptor textureDescriptor{
|
||||
const wgpu::TextureDescriptor textureDescriptor{
|
||||
.label = "Depth texture",
|
||||
.usage = WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_TextureBinding,
|
||||
.dimension = WGPUTextureDimension_2D,
|
||||
.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding,
|
||||
.dimension = wgpu::TextureDimension::e2D,
|
||||
.size = size,
|
||||
.format = format,
|
||||
.mipLevelCount = 1,
|
||||
.sampleCount = g_graphicsConfig.msaaSamples,
|
||||
};
|
||||
auto texture = wgpuDeviceCreateTexture(g_device, &textureDescriptor);
|
||||
auto texture = g_device.CreateTexture(&textureDescriptor);
|
||||
|
||||
const WGPUTextureViewDescriptor viewDescriptor{
|
||||
.dimension = WGPUTextureViewDimension_2D,
|
||||
const wgpu::TextureViewDescriptor viewDescriptor{
|
||||
.label = "Depth texture view",
|
||||
.dimension = wgpu::TextureViewDimension::e2D,
|
||||
.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED,
|
||||
.arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED,
|
||||
};
|
||||
auto view = wgpuTextureCreateView(texture, &viewDescriptor);
|
||||
auto view = texture.CreateView(&viewDescriptor);
|
||||
|
||||
const WGPUSamplerDescriptor samplerDescriptor{
|
||||
const wgpu::SamplerDescriptor samplerDescriptor{
|
||||
.label = "Depth sampler",
|
||||
.addressModeU = WGPUAddressMode_ClampToEdge,
|
||||
.addressModeV = WGPUAddressMode_ClampToEdge,
|
||||
.addressModeW = WGPUAddressMode_ClampToEdge,
|
||||
.magFilter = WGPUFilterMode_Linear,
|
||||
.minFilter = WGPUFilterMode_Linear,
|
||||
.mipmapFilter = WGPUFilterMode_Linear,
|
||||
.addressModeU = wgpu::AddressMode::ClampToEdge,
|
||||
.addressModeV = wgpu::AddressMode::ClampToEdge,
|
||||
.addressModeW = wgpu::AddressMode::ClampToEdge,
|
||||
.magFilter = wgpu::FilterMode::Linear,
|
||||
.minFilter = wgpu::FilterMode::Linear,
|
||||
.mipmapFilter = wgpu::FilterMode::Linear,
|
||||
.lodMinClamp = 0.f,
|
||||
.lodMaxClamp = 1000.f,
|
||||
.maxAnisotropy = 1,
|
||||
};
|
||||
auto sampler = wgpuDeviceCreateSampler(g_device, &samplerDescriptor);
|
||||
auto sampler = g_device.CreateSampler(&samplerDescriptor);
|
||||
|
||||
return {
|
||||
.texture{texture},
|
||||
.view{view},
|
||||
.texture = std::move(texture),
|
||||
.view = std::move(view),
|
||||
.size = size,
|
||||
.format = format,
|
||||
.sampler{sampler},
|
||||
.sampler = std::move(sampler),
|
||||
};
|
||||
}
|
||||
|
||||
void create_copy_pipeline() {
|
||||
WGPUShaderModuleWGSLDescriptor sourceDescriptor{
|
||||
.chain = {.sType = WGPUSType_ShaderModuleWGSLDescriptor},
|
||||
.source = R"""(
|
||||
wgpu::ShaderModuleWGSLDescriptor sourceDescriptor{};
|
||||
sourceDescriptor.source = R"""(
|
||||
@group(0) @binding(0)
|
||||
var efb_sampler: sampler;
|
||||
@group(0) @binding(1)
|
||||
|
@ -173,179 +182,237 @@ fn vs_main(@builtin(vertex_index) vtxIdx: u32) -> VertexOutput {
|
|||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
return textureSample(efb_texture, efb_sampler, in.uv);
|
||||
}
|
||||
)""",
|
||||
};
|
||||
const WGPUShaderModuleDescriptor moduleDescriptor{
|
||||
.nextInChain = &sourceDescriptor.chain,
|
||||
)""";
|
||||
const wgpu::ShaderModuleDescriptor moduleDescriptor{
|
||||
.nextInChain = &sourceDescriptor,
|
||||
.label = "XFB Copy Module",
|
||||
};
|
||||
auto module = wgpuDeviceCreateShaderModule(g_device, &moduleDescriptor);
|
||||
const std::array colorTargets{WGPUColorTargetState{
|
||||
.format = g_graphicsConfig.colorFormat,
|
||||
.writeMask = WGPUColorWriteMask_All,
|
||||
auto module = g_device.CreateShaderModule(&moduleDescriptor);
|
||||
const std::array colorTargets{wgpu::ColorTargetState{
|
||||
.format = g_graphicsConfig.swapChainDescriptor.format,
|
||||
.writeMask = wgpu::ColorWriteMask::All,
|
||||
}};
|
||||
const WGPUFragmentState fragmentState{
|
||||
const wgpu::FragmentState fragmentState{
|
||||
.module = module,
|
||||
.entryPoint = "fs_main",
|
||||
.targetCount = colorTargets.size(),
|
||||
.targets = colorTargets.data(),
|
||||
};
|
||||
const std::array bindGroupLayoutEntries{
|
||||
WGPUBindGroupLayoutEntry{
|
||||
wgpu::BindGroupLayoutEntry{
|
||||
.binding = 0,
|
||||
.visibility = WGPUShaderStage_Fragment,
|
||||
.visibility = wgpu::ShaderStage::Fragment,
|
||||
.sampler =
|
||||
WGPUSamplerBindingLayout{
|
||||
.type = WGPUSamplerBindingType_Filtering,
|
||||
wgpu::SamplerBindingLayout{
|
||||
.type = wgpu::SamplerBindingType::Filtering,
|
||||
},
|
||||
},
|
||||
WGPUBindGroupLayoutEntry{
|
||||
wgpu::BindGroupLayoutEntry{
|
||||
.binding = 1,
|
||||
.visibility = WGPUShaderStage_Fragment,
|
||||
.visibility = wgpu::ShaderStage::Fragment,
|
||||
.texture =
|
||||
WGPUTextureBindingLayout{
|
||||
.sampleType = WGPUTextureSampleType_Float,
|
||||
.viewDimension = WGPUTextureViewDimension_2D,
|
||||
wgpu::TextureBindingLayout{
|
||||
.sampleType = wgpu::TextureSampleType::Float,
|
||||
.viewDimension = wgpu::TextureViewDimension::e2D,
|
||||
},
|
||||
},
|
||||
};
|
||||
const WGPUBindGroupLayoutDescriptor bindGroupLayoutDescriptor{
|
||||
const wgpu::BindGroupLayoutDescriptor bindGroupLayoutDescriptor{
|
||||
.entryCount = bindGroupLayoutEntries.size(),
|
||||
.entries = bindGroupLayoutEntries.data(),
|
||||
};
|
||||
g_CopyBindGroupLayout = wgpuDeviceCreateBindGroupLayout(g_device, &bindGroupLayoutDescriptor);
|
||||
const WGPUPipelineLayoutDescriptor layoutDescriptor{
|
||||
g_CopyBindGroupLayout = g_device.CreateBindGroupLayout(&bindGroupLayoutDescriptor);
|
||||
const wgpu::PipelineLayoutDescriptor layoutDescriptor{
|
||||
.bindGroupLayoutCount = 1,
|
||||
.bindGroupLayouts = &g_CopyBindGroupLayout,
|
||||
};
|
||||
auto pipelineLayout = wgpuDeviceCreatePipelineLayout(g_device, &layoutDescriptor);
|
||||
const WGPURenderPipelineDescriptor pipelineDescriptor{
|
||||
auto pipelineLayout = g_device.CreatePipelineLayout(&layoutDescriptor);
|
||||
const wgpu::RenderPipelineDescriptor pipelineDescriptor{
|
||||
.layout = pipelineLayout,
|
||||
.vertex =
|
||||
WGPUVertexState{
|
||||
wgpu::VertexState{
|
||||
.module = module,
|
||||
.entryPoint = "vs_main",
|
||||
},
|
||||
.primitive =
|
||||
WGPUPrimitiveState{
|
||||
.topology = WGPUPrimitiveTopology_TriangleList,
|
||||
wgpu::PrimitiveState{
|
||||
.topology = wgpu::PrimitiveTopology::TriangleList,
|
||||
},
|
||||
.multisample =
|
||||
WGPUMultisampleState{
|
||||
wgpu::MultisampleState{
|
||||
.count = 1,
|
||||
.mask = UINT32_MAX,
|
||||
},
|
||||
.fragment = &fragmentState,
|
||||
};
|
||||
g_CopyPipeline = wgpuDeviceCreateRenderPipeline(g_device, &pipelineDescriptor);
|
||||
wgpuPipelineLayoutRelease(pipelineLayout);
|
||||
g_CopyPipeline = g_device.CreateRenderPipeline(&pipelineDescriptor);
|
||||
}
|
||||
|
||||
void create_copy_bind_group() {
|
||||
const std::array bindGroupEntries{
|
||||
WGPUBindGroupEntry{
|
||||
wgpu::BindGroupEntry{
|
||||
.binding = 0,
|
||||
.sampler = g_graphicsConfig.msaaSamples > 1 ? g_frameBufferResolved.sampler : g_frameBuffer.sampler,
|
||||
},
|
||||
WGPUBindGroupEntry{
|
||||
wgpu::BindGroupEntry{
|
||||
.binding = 1,
|
||||
.textureView = g_graphicsConfig.msaaSamples > 1 ? g_frameBufferResolved.view : g_frameBuffer.view,
|
||||
},
|
||||
};
|
||||
const WGPUBindGroupDescriptor bindGroupDescriptor{
|
||||
const wgpu::BindGroupDescriptor bindGroupDescriptor{
|
||||
.layout = g_CopyBindGroupLayout,
|
||||
.entryCount = bindGroupEntries.size(),
|
||||
.entries = bindGroupEntries.data(),
|
||||
};
|
||||
g_CopyBindGroup = wgpuDeviceCreateBindGroup(g_device, &bindGroupDescriptor);
|
||||
g_CopyBindGroup = g_device.CreateBindGroup(&bindGroupDescriptor);
|
||||
}
|
||||
|
||||
static void error_callback(WGPUErrorType type, char const* message, void* userdata) {
|
||||
Log.report(LOG_FATAL, FMT_STRING("Dawn error {}: {}"), magic_enum::enum_name(static_cast<WGPUErrorType>(type)),
|
||||
Log.report(LOG_FATAL, FMT_STRING("WebGPU error {}: {}"), magic_enum::enum_name(static_cast<WGPUErrorType>(type)),
|
||||
message);
|
||||
}
|
||||
|
||||
#ifndef WEBGPU_DAWN
|
||||
static void adapter_callback(WGPURequestAdapterStatus status, WGPUAdapter adapter, char const* message,
|
||||
void* userdata) {
|
||||
if (status == WGPURequestAdapterStatus_Success) {
|
||||
g_adapter = wgpu::Adapter::Acquire(adapter);
|
||||
} else {
|
||||
Log.report(LOG_WARNING, FMT_STRING("Adapter request failed with message: {}"), message);
|
||||
}
|
||||
*static_cast<bool*>(userdata) = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void device_callback(WGPURequestDeviceStatus status, WGPUDevice device, char const* message, void* userdata) {
|
||||
if (status == WGPURequestDeviceStatus_Success) {
|
||||
g_device = device;
|
||||
g_device = wgpu::Device::Acquire(device);
|
||||
} else {
|
||||
Log.report(LOG_WARNING, FMT_STRING("Device request failed with message: {}"), message);
|
||||
}
|
||||
*static_cast<bool*>(userdata) = true;
|
||||
}
|
||||
|
||||
static WGPUBackendType to_wgpu_backend(AuroraBackend backend) {
|
||||
static wgpu::BackendType to_wgpu_backend(AuroraBackend backend) {
|
||||
switch (backend) {
|
||||
case BACKEND_WEBGPU:
|
||||
return WGPUBackendType_WebGPU;
|
||||
return wgpu::BackendType::WebGPU;
|
||||
case BACKEND_D3D12:
|
||||
return WGPUBackendType_D3D12;
|
||||
return wgpu::BackendType::D3D12;
|
||||
case BACKEND_METAL:
|
||||
return WGPUBackendType_Metal;
|
||||
return wgpu::BackendType::Metal;
|
||||
case BACKEND_VULKAN:
|
||||
return WGPUBackendType_Vulkan;
|
||||
return wgpu::BackendType::Vulkan;
|
||||
case BACKEND_OPENGL:
|
||||
return WGPUBackendType_OpenGL;
|
||||
return wgpu::BackendType::OpenGL;
|
||||
case BACKEND_OPENGLES:
|
||||
return WGPUBackendType_OpenGLES;
|
||||
return wgpu::BackendType::OpenGLES;
|
||||
default:
|
||||
return WGPUBackendType_Null;
|
||||
return wgpu::BackendType::Null;
|
||||
}
|
||||
}
|
||||
|
||||
bool initialize(AuroraBackend auroraBackend) {
|
||||
if (!g_Instance) {
|
||||
#ifdef WEBGPU_DAWN
|
||||
if (!g_dawnInstance) {
|
||||
Log.report(LOG_INFO, FMT_STRING("Creating Dawn instance"));
|
||||
g_Instance = std::make_unique<dawn::native::Instance>();
|
||||
g_dawnInstance = std::make_unique<dawn::native::Instance>();
|
||||
}
|
||||
WGPUBackendType backend = to_wgpu_backend(auroraBackend);
|
||||
#else
|
||||
if (!g_instance) {
|
||||
const wgpu::InstanceDescriptor instanceDescriptor{};
|
||||
g_instance = {}; // TODO use wgpuCreateInstance when supported
|
||||
}
|
||||
#endif
|
||||
wgpu::BackendType backend = to_wgpu_backend(auroraBackend);
|
||||
#ifdef EMSCRIPTEN
|
||||
if (backend != wgpu::BackendType::WebGPU) {
|
||||
Log.report(LOG_WARNING, FMT_STRING("Backend type {} unsupported"), magic_enum::enum_name(backend));
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
Log.report(LOG_INFO, FMT_STRING("Attempting to initialize {}"), magic_enum::enum_name(backend));
|
||||
#if 0
|
||||
// D3D12's debug layer is very slow
|
||||
g_Instance->EnableBackendValidation(backend != WGPUBackendType::D3D12);
|
||||
g_dawnInstance->EnableBackendValidation(backend != WGPUBackendType::D3D12);
|
||||
#endif
|
||||
|
||||
#ifdef WEBGPU_DAWN
|
||||
SDL_Window* window = window::get_sdl_window();
|
||||
if (!utils::DiscoverAdapter(g_Instance.get(), window, backend)) {
|
||||
if (!utils::DiscoverAdapter(g_dawnInstance.get(), window, backend)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
{
|
||||
std::vector<dawn::native::Adapter> adapters = g_Instance->GetAdapters();
|
||||
std::vector<dawn::native::Adapter> adapters = g_dawnInstance->GetAdapters();
|
||||
std::sort(adapters.begin(), adapters.end(), [&](const auto& a, const auto& b) {
|
||||
WGPUAdapterProperties propertiesA;
|
||||
WGPUAdapterProperties propertiesB;
|
||||
wgpu::AdapterProperties propertiesA;
|
||||
wgpu::AdapterProperties propertiesB;
|
||||
a.GetProperties(&propertiesA);
|
||||
b.GetProperties(&propertiesB);
|
||||
constexpr std::array PreferredTypeOrder{
|
||||
WGPUAdapterType_DiscreteGPU,
|
||||
WGPUAdapterType_IntegratedGPU,
|
||||
WGPUAdapterType_CPU,
|
||||
wgpu::AdapterType::DiscreteGPU,
|
||||
wgpu::AdapterType::IntegratedGPU,
|
||||
wgpu::AdapterType::CPU,
|
||||
};
|
||||
const auto typeItA = std::find(PreferredTypeOrder.begin(), PreferredTypeOrder.end(), propertiesA.adapterType);
|
||||
const auto typeItB = std::find(PreferredTypeOrder.begin(), PreferredTypeOrder.end(), propertiesB.adapterType);
|
||||
return typeItA < typeItB;
|
||||
});
|
||||
const auto adapterIt = std::find_if(adapters.begin(), adapters.end(), [=](const auto& adapter) -> bool {
|
||||
WGPUAdapterProperties properties;
|
||||
wgpu::AdapterProperties properties;
|
||||
adapter.GetProperties(&properties);
|
||||
return properties.backendType == backend;
|
||||
});
|
||||
if (adapterIt == adapters.end()) {
|
||||
return false;
|
||||
}
|
||||
g_Adapter = *adapterIt;
|
||||
g_adapter = *adapterIt;
|
||||
}
|
||||
g_Adapter.GetProperties(&g_AdapterProperties);
|
||||
g_backendType = g_AdapterProperties.backendType;
|
||||
#else
|
||||
const WGPUSurfaceDescriptorFromCanvasHTMLSelector canvasDescriptor{
|
||||
.chain = {.sType = WGPUSType_SurfaceDescriptorFromCanvasHTMLSelector},
|
||||
.selector = "#canvas",
|
||||
};
|
||||
const WGPUSurfaceDescriptor surfaceDescriptor{
|
||||
.nextInChain = &canvasDescriptor.chain,
|
||||
.label = "Surface",
|
||||
};
|
||||
g_surface = wgpu::Surface::Acquire(wgpuInstanceCreateSurface(g_instance.Get(), &surfaceDescriptor));
|
||||
if (!g_surface) {
|
||||
Log.report(LOG_FATAL, FMT_STRING("Failed to initialize surface"));
|
||||
}
|
||||
const WGPURequestAdapterOptions options{
|
||||
.compatibleSurface = g_surface.Get(),
|
||||
.powerPreference = WGPUPowerPreference_HighPerformance,
|
||||
.forceFallbackAdapter = false,
|
||||
};
|
||||
bool adapterCallbackRecieved = false;
|
||||
wgpuInstanceRequestAdapter(g_instance.Get(), &options, adapter_callback, &adapterCallbackRecieved);
|
||||
while (!adapterCallbackRecieved) {
|
||||
emscripten_log(EM_LOG_CONSOLE, "Waiting for adapter...\n");
|
||||
emscripten_sleep(100);
|
||||
}
|
||||
#endif
|
||||
g_adapter.GetProperties(&g_adapterProperties);
|
||||
g_backendType = g_adapterProperties.backendType;
|
||||
const auto backendName = magic_enum::enum_name(g_backendType);
|
||||
const char* adapterName = g_adapterProperties.name;
|
||||
if (adapterName == nullptr) {
|
||||
adapterName = "Unknown";
|
||||
}
|
||||
const char* driverDescription = g_adapterProperties.driverDescription;
|
||||
if (driverDescription == nullptr) {
|
||||
driverDescription = "Unknown";
|
||||
}
|
||||
Log.report(LOG_INFO, FMT_STRING("Graphics adapter information\n API: {}\n Device: {} ({})\n Driver: {}"),
|
||||
backendName, g_AdapterProperties.name, magic_enum::enum_name(g_AdapterProperties.adapterType),
|
||||
g_AdapterProperties.driverDescription);
|
||||
backendName, adapterName, magic_enum::enum_name(g_adapterProperties.adapterType), driverDescription);
|
||||
|
||||
{
|
||||
// TODO: emscripten doesn't implement wgpuAdapterGetLimits
|
||||
#ifdef WEBGPU_DAWN
|
||||
WGPUSupportedLimits supportedLimits{};
|
||||
g_Adapter.GetLimits(&supportedLimits);
|
||||
const WGPURequiredLimits requiredLimits{
|
||||
g_adapter.GetLimits(&supportedLimits);
|
||||
const wgpu::RequiredLimits requiredLimits{
|
||||
.limits =
|
||||
{
|
||||
// Use "best" supported alignments
|
||||
|
@ -357,13 +424,27 @@ bool initialize(AuroraBackend auroraBackend) {
|
|||
: supportedLimits.limits.minStorageBufferOffsetAlignment,
|
||||
},
|
||||
};
|
||||
std::vector<WGPUFeatureName> features;
|
||||
const auto supportedFeatures = g_Adapter.GetSupportedFeatures();
|
||||
#endif
|
||||
std::vector<wgpu::FeatureName> features;
|
||||
#ifdef WEBGPU_DAWN
|
||||
const auto supportedFeatures = g_adapter.GetSupportedFeatures();
|
||||
for (const auto* const feature : supportedFeatures) {
|
||||
if (strcmp(feature, "texture-compression-bc") == 0) {
|
||||
features.push_back(WGPUFeatureName_TextureCompressionBC);
|
||||
features.push_back(wgpu::FeatureName::TextureCompressionBC);
|
||||
}
|
||||
}
|
||||
#else
|
||||
std::vector<wgpu::FeatureName> supportedFeatures;
|
||||
size_t featureCount = g_adapter.EnumerateFeatures(nullptr);
|
||||
supportedFeatures.resize(featureCount);
|
||||
g_adapter.EnumerateFeatures(supportedFeatures.data());
|
||||
for (const auto& feature : supportedFeatures) {
|
||||
if (feature == wgpu::FeatureName::TextureCompressionBC) {
|
||||
features.push_back(wgpu::FeatureName::TextureCompressionBC);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef WEBGPU_DAWN
|
||||
const std::array enableToggles {
|
||||
/* clang-format off */
|
||||
#if _WIN32
|
||||
|
@ -377,87 +458,110 @@ bool initialize(AuroraBackend auroraBackend) {
|
|||
"disable_symbol_renaming",
|
||||
/* clang-format on */
|
||||
};
|
||||
const WGPUDawnTogglesDeviceDescriptor togglesDescriptor{
|
||||
.chain = {.sType = WGPUSType_DawnTogglesDeviceDescriptor},
|
||||
.forceEnabledTogglesCount = enableToggles.size(),
|
||||
.forceEnabledToggles = enableToggles.data(),
|
||||
};
|
||||
const WGPUDeviceDescriptor deviceDescriptor{
|
||||
.nextInChain = &togglesDescriptor.chain,
|
||||
wgpu::DawnTogglesDeviceDescriptor togglesDescriptor{};
|
||||
togglesDescriptor.forceEnabledTogglesCount = enableToggles.size();
|
||||
togglesDescriptor.forceEnabledToggles = enableToggles.data();
|
||||
#endif
|
||||
const wgpu::DeviceDescriptor deviceDescriptor{
|
||||
#ifdef WEBGPU_DAWN
|
||||
.nextInChain = &togglesDescriptor,
|
||||
#endif
|
||||
.requiredFeaturesCount = static_cast<uint32_t>(features.size()),
|
||||
.requiredFeatures = features.data(),
|
||||
#ifdef WEBGPU_DAWN
|
||||
.requiredLimits = &requiredLimits,
|
||||
#endif
|
||||
};
|
||||
bool deviceCallbackReceived = false;
|
||||
g_Adapter.RequestDevice(&deviceDescriptor, &device_callback, &deviceCallbackReceived);
|
||||
// while (!deviceCallbackReceived) {
|
||||
// TODO wgpuInstanceProcessEvents
|
||||
// }
|
||||
g_adapter.RequestDevice(&deviceDescriptor, device_callback, &deviceCallbackReceived);
|
||||
#ifdef EMSCRIPTEN
|
||||
while (!deviceCallbackReceived) {
|
||||
emscripten_log(EM_LOG_CONSOLE, "Waiting for device...\n");
|
||||
emscripten_sleep(100);
|
||||
}
|
||||
#endif
|
||||
if (!g_device) {
|
||||
return false;
|
||||
}
|
||||
wgpuDeviceSetUncapturedErrorCallback(g_device, &error_callback, nullptr);
|
||||
g_device.SetUncapturedErrorCallback(&error_callback, nullptr);
|
||||
}
|
||||
wgpuDeviceSetDeviceLostCallback(g_device, nullptr, nullptr);
|
||||
g_queue = wgpuDeviceGetQueue(g_device);
|
||||
g_device.SetDeviceLostCallback(nullptr, nullptr);
|
||||
g_queue = g_device.GetQueue();
|
||||
|
||||
g_BackendBinding = std::unique_ptr<utils::BackendBinding>(utils::CreateBinding(g_backendType, window, g_device));
|
||||
if (!g_BackendBinding) {
|
||||
#if WEBGPU_DAWN
|
||||
g_backendBinding =
|
||||
std::unique_ptr<utils::BackendBinding>(utils::CreateBinding(g_backendType, window, g_device.Get()));
|
||||
if (!g_backendBinding) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto swapChainFormat = static_cast<WGPUTextureFormat>(g_BackendBinding->GetPreferredSwapChainTextureFormat());
|
||||
if (swapChainFormat == WGPUTextureFormat_RGBA8UnormSrgb) {
|
||||
swapChainFormat = WGPUTextureFormat_RGBA8Unorm;
|
||||
} else if (swapChainFormat == WGPUTextureFormat_BGRA8UnormSrgb) {
|
||||
swapChainFormat = WGPUTextureFormat_BGRA8Unorm;
|
||||
auto swapChainFormat = static_cast<wgpu::TextureFormat>(g_backendBinding->GetPreferredSwapChainTextureFormat());
|
||||
#else
|
||||
auto swapChainFormat = g_surface.GetPreferredFormat(g_adapter);
|
||||
#endif
|
||||
if (swapChainFormat == wgpu::TextureFormat::RGBA8UnormSrgb) {
|
||||
swapChainFormat = wgpu::TextureFormat::RGBA8Unorm;
|
||||
} else if (swapChainFormat == wgpu::TextureFormat::BGRA8UnormSrgb) {
|
||||
swapChainFormat = wgpu::TextureFormat::BGRA8Unorm;
|
||||
}
|
||||
Log.report(LOG_INFO, FMT_STRING("Using swapchain format {}"), magic_enum::enum_name(swapChainFormat));
|
||||
{
|
||||
const WGPUSwapChainDescriptor descriptor{
|
||||
.format = swapChainFormat,
|
||||
.implementation = g_BackendBinding->GetSwapChainImplementation(),
|
||||
};
|
||||
g_swapChain = wgpuDeviceCreateSwapChain(g_device, nullptr, &descriptor);
|
||||
}
|
||||
{
|
||||
const auto size = window::get_window_size();
|
||||
g_graphicsConfig = GraphicsConfig{
|
||||
.width = size.fb_width,
|
||||
.height = size.fb_height,
|
||||
.colorFormat = swapChainFormat,
|
||||
.depthFormat = WGPUTextureFormat_Depth32Float,
|
||||
.msaaSamples = g_config.msaa,
|
||||
.textureAnisotropy = g_config.maxTextureAnisotropy,
|
||||
};
|
||||
create_copy_pipeline();
|
||||
resize_swapchain(size.fb_width, size.fb_height, true);
|
||||
// g_windowSize = size;
|
||||
}
|
||||
const auto size = window::get_window_size();
|
||||
g_graphicsConfig = GraphicsConfig{
|
||||
.swapChainDescriptor =
|
||||
wgpu::SwapChainDescriptor{
|
||||
.usage = wgpu::TextureUsage::RenderAttachment,
|
||||
.format = swapChainFormat,
|
||||
.width = size.fb_width,
|
||||
.height = size.fb_height,
|
||||
.presentMode = wgpu::PresentMode::Fifo,
|
||||
#ifdef WEBGPU_DAWN
|
||||
.implementation = g_backendBinding->GetSwapChainImplementation(),
|
||||
#endif
|
||||
},
|
||||
.depthFormat = wgpu::TextureFormat::Depth32Float,
|
||||
.msaaSamples = g_config.msaa,
|
||||
.textureAnisotropy = g_config.maxTextureAnisotropy,
|
||||
};
|
||||
create_copy_pipeline();
|
||||
resize_swapchain(size.fb_width, size.fb_height, true);
|
||||
return true;
|
||||
}
|
||||
|
||||
void shutdown() {
|
||||
wgpuBindGroupLayoutRelease(g_CopyBindGroupLayout);
|
||||
wgpuRenderPipelineRelease(g_CopyPipeline);
|
||||
wgpuBindGroupRelease(g_CopyBindGroup);
|
||||
g_CopyBindGroupLayout = {};
|
||||
g_CopyPipeline = {};
|
||||
g_CopyBindGroup = {};
|
||||
g_frameBuffer = {};
|
||||
g_frameBufferResolved = {};
|
||||
g_depthBuffer = {};
|
||||
wgpuSwapChainRelease(g_swapChain);
|
||||
wgpuQueueRelease(g_queue);
|
||||
g_BackendBinding.reset();
|
||||
wgpuDeviceDestroy(g_device);
|
||||
g_Instance.reset();
|
||||
wgpuSwapChainRelease(g_swapChain.Release());
|
||||
wgpuQueueRelease(g_queue.Release());
|
||||
wgpuDeviceDestroy(g_device.Release());
|
||||
g_adapter = {};
|
||||
#ifdef WEBGPU_DAWN
|
||||
g_backendBinding.reset();
|
||||
g_dawnInstance.reset();
|
||||
#else
|
||||
g_surface = {};
|
||||
g_instance = {};
|
||||
#endif
|
||||
}
|
||||
|
||||
void resize_swapchain(uint32_t width, uint32_t height, bool force) {
|
||||
if (!force && g_graphicsConfig.width == width && g_graphicsConfig.height == height) {
|
||||
if (!force && g_graphicsConfig.swapChainDescriptor.width == width &&
|
||||
g_graphicsConfig.swapChainDescriptor.height == height) {
|
||||
return;
|
||||
}
|
||||
g_graphicsConfig.width = width;
|
||||
g_graphicsConfig.height = height;
|
||||
wgpuSwapChainConfigure(g_swapChain, g_graphicsConfig.colorFormat, WGPUTextureUsage_RenderAttachment, width, height);
|
||||
g_graphicsConfig.swapChainDescriptor.width = width;
|
||||
g_graphicsConfig.swapChainDescriptor.height = height;
|
||||
#ifdef WEBGPU_DAWN
|
||||
if (!g_swapChain) {
|
||||
g_swapChain = g_device.CreateSwapChain(g_surface, &g_graphicsConfig.swapChainDescriptor);
|
||||
}
|
||||
g_swapChain.Configure(g_graphicsConfig.swapChainDescriptor.format, g_graphicsConfig.swapChainDescriptor.usage, width,
|
||||
height);
|
||||
#else
|
||||
g_swapChain = g_device.CreateSwapChain(g_surface, &g_graphicsConfig.swapChainDescriptor);
|
||||
#endif
|
||||
g_frameBuffer = create_render_texture(true);
|
||||
g_frameBufferResolved = create_render_texture(false);
|
||||
g_depthBuffer = create_depth_texture();
|
||||
|
|
|
@ -11,75 +11,30 @@ struct SDL_Window;
|
|||
|
||||
namespace aurora::webgpu {
|
||||
struct GraphicsConfig {
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
WGPUTextureFormat colorFormat;
|
||||
WGPUTextureFormat depthFormat;
|
||||
wgpu::SwapChainDescriptor swapChainDescriptor;
|
||||
wgpu::TextureFormat depthFormat;
|
||||
uint32_t msaaSamples;
|
||||
uint16_t textureAnisotropy;
|
||||
};
|
||||
struct TextureWithSampler {
|
||||
wgpu::Texture texture;
|
||||
wgpu::TextureView view;
|
||||
WGPUExtent3D size;
|
||||
WGPUTextureFormat format;
|
||||
wgpu::Extent3D size;
|
||||
wgpu::TextureFormat format;
|
||||
wgpu::Sampler sampler;
|
||||
|
||||
// TextureWithSampler() = default;
|
||||
// TextureWithSampler(WGPUTexture texture, WGPUTextureView view, WGPUExtent3D size, WGPUTextureFormat format,
|
||||
// WGPUSampler sampler) noexcept
|
||||
// : texture(texture), view(view), size(size), format(format), sampler(sampler) {}
|
||||
// TextureWithSampler(const TextureWithSampler& rhs) noexcept
|
||||
// : texture(rhs.texture), view(rhs.view), size(rhs.size), format(rhs.format), sampler(rhs.sampler) {
|
||||
// wgpuTextureReference(texture);
|
||||
// wgpuTextureViewReference(view);
|
||||
// wgpuSamplerReference(sampler);
|
||||
// }
|
||||
// TextureWithSampler(TextureWithSampler&& rhs) noexcept
|
||||
// : texture(rhs.texture), view(rhs.view), size(rhs.size), format(rhs.format), sampler(rhs.sampler) {
|
||||
// rhs.texture = nullptr;
|
||||
// rhs.view = nullptr;
|
||||
// rhs.sampler = nullptr;
|
||||
// }
|
||||
// ~TextureWithSampler() { reset(); }
|
||||
// TextureWithSampler& operator=(const TextureWithSampler& rhs) noexcept {
|
||||
// reset();
|
||||
// texture = rhs.texture;
|
||||
// view = rhs.view;
|
||||
// size = rhs.size;
|
||||
// format = rhs.format;
|
||||
// sampler = rhs.sampler;
|
||||
// wgpuTextureReference(texture);
|
||||
// wgpuTextureViewReference(view);
|
||||
// wgpuSamplerReference(sampler);
|
||||
// return *this;
|
||||
// }
|
||||
// void reset() {
|
||||
// if (texture != nullptr) {
|
||||
// wgpuTextureRelease(texture);
|
||||
// texture = nullptr;
|
||||
// }
|
||||
// if (view != nullptr) {
|
||||
// wgpuTextureViewRelease(view);
|
||||
// view = nullptr;
|
||||
// }
|
||||
// if (sampler != nullptr) {
|
||||
// wgpuSamplerRelease(sampler);
|
||||
// sampler = nullptr;
|
||||
// }
|
||||
// }
|
||||
};
|
||||
|
||||
extern WGPUDevice g_device;
|
||||
extern WGPUQueue g_queue;
|
||||
extern WGPUSwapChain g_swapChain;
|
||||
extern WGPUBackendType g_backendType;
|
||||
extern wgpu::Device g_device;
|
||||
extern wgpu::Queue g_queue;
|
||||
extern wgpu::SwapChain g_swapChain;
|
||||
extern wgpu::BackendType g_backendType;
|
||||
extern GraphicsConfig g_graphicsConfig;
|
||||
extern TextureWithSampler g_frameBuffer;
|
||||
extern TextureWithSampler g_frameBufferResolved;
|
||||
extern TextureWithSampler g_depthBuffer;
|
||||
extern WGPURenderPipeline g_CopyPipeline;
|
||||
extern WGPUBindGroup g_CopyBindGroup;
|
||||
extern wgpu::RenderPipeline g_CopyPipeline;
|
||||
extern wgpu::BindGroup g_CopyBindGroup;
|
||||
extern wgpu::Instance g_instance;
|
||||
|
||||
bool initialize(AuroraBackend backend);
|
||||
void shutdown();
|
||||
|
|
|
@ -1,102 +1,4 @@
|
|||
#include <webgpu/webgpu.h>
|
||||
|
||||
namespace wgpu {
|
||||
template <typename Derived, typename CType>
|
||||
struct ObjectBase {
|
||||
ObjectBase() = default;
|
||||
ObjectBase(CType handle) : mHandle(handle) {}
|
||||
~ObjectBase() { Reset(); }
|
||||
|
||||
ObjectBase(ObjectBase const& other) : ObjectBase(other.Get()) {}
|
||||
Derived& operator=(ObjectBase const& other) {
|
||||
if (&other != this) {
|
||||
if (mHandle) {
|
||||
Derived::WGPURelease(mHandle);
|
||||
}
|
||||
mHandle = other.mHandle;
|
||||
if (mHandle) {
|
||||
Derived::WGPUReference(mHandle);
|
||||
}
|
||||
}
|
||||
return static_cast<Derived&>(*this);
|
||||
}
|
||||
|
||||
ObjectBase(ObjectBase&& other) noexcept {
|
||||
mHandle = other.mHandle;
|
||||
other.mHandle = 0;
|
||||
}
|
||||
Derived& operator=(ObjectBase&& other) noexcept {
|
||||
if (&other != this) {
|
||||
if (mHandle) {
|
||||
Derived::WGPURelease(mHandle);
|
||||
}
|
||||
mHandle = other.mHandle;
|
||||
other.mHandle = nullptr;
|
||||
}
|
||||
return static_cast<Derived&>(*this);
|
||||
}
|
||||
|
||||
ObjectBase(std::nullptr_t) {}
|
||||
Derived& operator=(std::nullptr_t) {
|
||||
if (mHandle != nullptr) {
|
||||
Derived::WGPURelease(mHandle);
|
||||
mHandle = nullptr;
|
||||
}
|
||||
return static_cast<Derived&>(*this);
|
||||
}
|
||||
|
||||
bool operator==(std::nullptr_t) const { return mHandle == nullptr; }
|
||||
bool operator!=(std::nullptr_t) const { return mHandle != nullptr; }
|
||||
|
||||
explicit operator bool() const { return mHandle != nullptr; }
|
||||
operator CType() { return mHandle; }
|
||||
[[nodiscard]] CType Get() const { return mHandle; }
|
||||
CType Release() {
|
||||
CType result = mHandle;
|
||||
mHandle = 0;
|
||||
return result;
|
||||
}
|
||||
void Reset() {
|
||||
if (mHandle) {
|
||||
Derived::WGPURelease(mHandle);
|
||||
mHandle = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
CType mHandle = nullptr;
|
||||
};
|
||||
|
||||
class Texture : public ObjectBase<Texture, WGPUTexture> {
|
||||
public:
|
||||
using ObjectBase::ObjectBase;
|
||||
using ObjectBase::operator=;
|
||||
|
||||
private:
|
||||
friend ObjectBase<Texture, WGPUTexture>;
|
||||
static void WGPUReference(WGPUTexture handle) { wgpuTextureReference(handle); }
|
||||
static void WGPURelease(WGPUTexture handle) { wgpuTextureRelease(handle); }
|
||||
};
|
||||
|
||||
class TextureView : public ObjectBase<TextureView, WGPUTextureView> {
|
||||
public:
|
||||
using ObjectBase::ObjectBase;
|
||||
using ObjectBase::operator=;
|
||||
|
||||
private:
|
||||
friend ObjectBase<TextureView, WGPUTextureView>;
|
||||
static void WGPUReference(WGPUTextureView handle) { wgpuTextureViewReference(handle); }
|
||||
static void WGPURelease(WGPUTextureView handle) { wgpuTextureViewRelease(handle); }
|
||||
};
|
||||
|
||||
class Sampler : public ObjectBase<Sampler, WGPUSampler> {
|
||||
public:
|
||||
using ObjectBase::ObjectBase;
|
||||
using ObjectBase::operator=;
|
||||
|
||||
private:
|
||||
friend ObjectBase<Sampler, WGPUSampler>;
|
||||
static void WGPUReference(WGPUSampler handle) { wgpuSamplerReference(handle); }
|
||||
static void WGPURelease(WGPUSampler handle) { wgpuSamplerRelease(handle); }
|
||||
};
|
||||
} // namespace wgpu
|
||||
#include <webgpu/webgpu_cpp.h>
|
||||
#ifdef EMSCRIPTEN
|
||||
#include <emscripten.h>
|
||||
#endif
|
||||
|
|
|
@ -197,7 +197,7 @@ void show_window() {
|
|||
}
|
||||
|
||||
bool initialize() {
|
||||
if (SDL_Init(SDL_INIT_EVERYTHING) < 0) {
|
||||
if (SDL_Init(SDL_INIT_EVERYTHING & ~SDL_INIT_HAPTIC) < 0) {
|
||||
Log.report(LOG_FATAL, FMT_STRING("Error initializing SDL: {}"), SDL_GetError());
|
||||
unreachable();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue