Transition back to webgpu_cpp; initial emscripten support

This commit is contained in:
Luke Street 2022-08-02 16:37:56 -04:00
parent 04590f30cd
commit 893cabe55a
25 changed files with 799 additions and 849 deletions

View File

@ -12,7 +12,6 @@ add_library(aurora STATIC
lib/imgui.cpp lib/imgui.cpp
lib/input.cpp lib/input.cpp
lib/window.cpp lib/window.cpp
lib/dawn/BackendBinding.cpp
lib/gfx/common.cpp lib/gfx/common.cpp
lib/gfx/texture.cpp lib/gfx/texture.cpp
lib/gfx/gx.cpp lib/gfx/gx.cpp
@ -50,7 +49,15 @@ if (NOT TARGET SDL2::SDL2-static)
find_package(SDL2 REQUIRED) find_package(SDL2 REQUIRED)
endif () endif ()
target_link_libraries(aurora PUBLIC SDL2::SDL2-static fmt::fmt imgui xxhash) target_link_libraries(aurora PUBLIC SDL2::SDL2-static fmt::fmt imgui xxhash)
target_link_libraries(aurora PRIVATE dawn_native dawncpp webgpu_dawn absl::btree absl::flat_hash_map) if (EMSCRIPTEN)
target_link_options(aurora PUBLIC -sUSE_WEBGPU=1 -sASYNCIFY -sEXIT_RUNTIME)
target_compile_definitions(aurora PRIVATE ENABLE_BACKEND_WEBGPU)
else ()
target_link_libraries(aurora PRIVATE dawn_native dawncpp webgpu_dawn)
target_sources(aurora PRIVATE lib/dawn/BackendBinding.cpp)
target_compile_definitions(aurora PRIVATE WEBGPU_DAWN)
endif ()
target_link_libraries(aurora PRIVATE absl::btree absl::flat_hash_map)
if (DAWN_ENABLE_VULKAN) if (DAWN_ENABLE_VULKAN)
target_compile_definitions(aurora PRIVATE DAWN_ENABLE_BACKEND_VULKAN) target_compile_definitions(aurora PRIVATE DAWN_ENABLE_BACKEND_VULKAN)
target_sources(aurora PRIVATE lib/dawn/VulkanBinding.cpp) target_sources(aurora PRIVATE lib/dawn/VulkanBinding.cpp)

59
extern/CMakeLists.txt vendored
View File

@ -1,32 +1,37 @@
if (NOT TARGET dawn_native) if (NOT EMSCRIPTEN)
if (CMAKE_SYSTEM_NAME STREQUAL Windows) if (NOT TARGET dawn_native)
set(DAWN_ENABLE_DESKTOP_GL ON CACHE BOOL "Enable compilation of the OpenGL backend" FORCE) if (CMAKE_SYSTEM_NAME STREQUAL Windows)
set(DAWN_ENABLE_DESKTOP_GL ON CACHE BOOL "Enable compilation of the OpenGL backend" FORCE)
endif ()
if (CMAKE_SYSTEM_NAME STREQUAL Linux)
set(DAWN_ENABLE_OPENGLES ON CACHE BOOL "Enable compilation of the OpenGL ES backend" FORCE)
endif ()
add_subdirectory(dawn EXCLUDE_FROM_ALL)
if (DAWN_ENABLE_VULKAN)
target_compile_definitions(dawn_native PRIVATE
DAWN_ENABLE_VULKAN_VALIDATION_LAYERS
DAWN_VK_DATA_DIR="vulkandata")
endif ()
if (MSVC)
target_compile_options(dawn_native PRIVATE /bigobj)
else ()
target_compile_options(SPIRV-Tools-static PRIVATE -Wno-implicit-fallthrough)
target_compile_options(SPIRV-Tools-opt PRIVATE -Wno-implicit-fallthrough)
endif ()
endif () endif ()
if (CMAKE_SYSTEM_NAME STREQUAL Linux)
set(DAWN_ENABLE_OPENGLES ON CACHE BOOL "Enable compilation of the OpenGL ES backend" FORCE)
endif ()
add_subdirectory(dawn EXCLUDE_FROM_ALL)
if (DAWN_ENABLE_VULKAN)
target_compile_definitions(dawn_native PRIVATE
DAWN_ENABLE_VULKAN_VALIDATION_LAYERS
DAWN_VK_DATA_DIR="vulkandata")
endif ()
if (MSVC)
target_compile_options(dawn_native PRIVATE /bigobj)
else ()
target_compile_options(SPIRV-Tools-static PRIVATE -Wno-implicit-fallthrough)
target_compile_options(SPIRV-Tools-opt PRIVATE -Wno-implicit-fallthrough)
endif ()
endif ()
if (NOT TARGET SDL2-static) if (NOT TARGET SDL2-static)
if (WIN32) if (WIN32)
set(SDL_LIBC ON CACHE BOOL "Use the system C library" FORCE) set(SDL_LIBC ON CACHE BOOL "Use the system C library" FORCE)
endif () endif ()
add_subdirectory(SDL EXCLUDE_FROM_ALL) add_subdirectory(SDL EXCLUDE_FROM_ALL)
if (NOT MSVC) if (NOT MSVC)
target_compile_options(SDL2-static PRIVATE -Wno-implicit-fallthrough -Wno-shadow) target_compile_options(SDL2-static PRIVATE -Wno-implicit-fallthrough -Wno-shadow)
endif ()
endif () endif ()
else ()
set(ABSL_PROPAGATE_CXX_STD ON)
add_subdirectory(dawn/third_party/abseil-cpp EXCLUDE_FROM_ALL)
endif () endif ()
if (NOT TARGET xxhash) if (NOT TARGET xxhash)
@ -48,7 +53,7 @@ if (NOT TARGET imgui)
if (CMAKE_COMPILER_IS_GNUCXX) if (CMAKE_COMPILER_IS_GNUCXX)
# currently explicitly ignored for clang in imgui code, but not gcc (yet) # currently explicitly ignored for clang in imgui code, but not gcc (yet)
target_compile_options(imgui PRIVATE -Wno-deprecated-enum-enum-conversion) target_compile_options(imgui PRIVATE -Wno-deprecated-enum-enum-conversion)
endif() endif ()
# Optional, replaces stb_freetype if available # Optional, replaces stb_freetype if available
find_package(Freetype) find_package(Freetype)

View File

@ -2,10 +2,16 @@
#define DOLPHIN_TYPES_H #define DOLPHIN_TYPES_H
#ifdef TARGET_PC #ifdef TARGET_PC
#include <bits/wordsize.h> #include <stdint.h>
#endif typedef int8_t s8;
typedef int16_t s16;
#ifdef __MWERKS__ typedef int32_t s32;
typedef int64_t s64;
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
#else
typedef signed char s8; typedef signed char s8;
typedef signed short int s16; typedef signed short int s16;
typedef signed long s32; typedef signed long s32;
@ -14,23 +20,6 @@ typedef unsigned char u8;
typedef unsigned short int u16; typedef unsigned short int u16;
typedef unsigned long u32; typedef unsigned long u32;
typedef unsigned long long int u64; typedef unsigned long long int u64;
#else
typedef signed char s8;
typedef signed short int s16;
typedef signed int s32;
#if __WORDSIZE == 64
typedef signed long int s64;
#else
typedef signed long long int s64;
#endif
typedef unsigned char u8;
typedef unsigned short int u16;
typedef unsigned int u32;
#if __WORDSIZE == 64
typedef unsigned long int u64;
#else
typedef unsigned long long int u64;
#endif
#endif #endif
typedef volatile u8 vu8; typedef volatile u8 vu8;
@ -49,31 +38,43 @@ typedef double f64;
typedef volatile f32 vf32; typedef volatile f32 vf32;
typedef volatile f64 vf64; typedef volatile f64 vf64;
#ifdef TARGET_PC #if defined(TARGET_PC) && !defined(_WIN32)
#include <stdbool.h> #include <stdbool.h>
typedef bool BOOL; typedef bool BOOL;
#ifndef FALSE
#define FALSE false #define FALSE false
#endif
#ifndef TRUE
#define TRUE true #define TRUE true
#endif
#else #else
typedef int BOOL; typedef int BOOL;
#ifndef FALSE
#define FALSE 0 #define FALSE 0
#endif
#ifndef TRUE
#define TRUE 1 #define TRUE 1
#endif #endif
#endif
#ifdef TARGET_PC #ifdef TARGET_PC
#include <stddef.h> #include <stddef.h>
#else #else
#ifndef NULL
#define NULL 0 #define NULL 0
#endif #endif
#endif
#ifndef __cplusplus #ifndef __cplusplus
#ifndef nullptr
#define nullptr NULL #define nullptr NULL
#endif #endif
#endif
#if defined(__MWERKS__) #if defined(__MWERKS__)
#define AT_ADDRESS(addr) : (addr) #define AT_ADDRESS(addr) : (addr)
#define ATTRIBUTE_ALIGN(num) __attribute__((aligned(num))) #define ATTRIBUTE_ALIGN(num) __attribute__((aligned(num)))
#elif defined(__GNUC__) #elif defined(__GNUC__)
#define AT_ADDRESS(addr) // was removed in GCC. define in linker script instead. #define AT_ADDRESS(addr)
#define ATTRIBUTE_ALIGN(num) __attribute__((aligned(num))) #define ATTRIBUTE_ALIGN(num) __attribute__((aligned(num)))
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
#define AT_ADDRESS(addr) #define AT_ADDRESS(addr)

View File

@ -20,6 +20,9 @@ using webgpu::g_queue;
using webgpu::g_swapChain; using webgpu::g_swapChain;
constexpr std::array PreferredBackendOrder{ constexpr std::array PreferredBackendOrder{
#ifdef ENABLE_BACKEND_WEBGPU
BACKEND_WEBGPU,
#endif
#ifdef DAWN_ENABLE_BACKEND_D3D12 #ifdef DAWN_ENABLE_BACKEND_D3D12
// BACKEND_D3D12, // BACKEND_D3D12,
#endif #endif
@ -90,7 +93,7 @@ static AuroraInfo initialize(int argc, char* argv[], const AuroraConfig& config)
} }
// Initialize SDL_Renderer for ImGui when we can't use a Dawn backend // Initialize SDL_Renderer for ImGui when we can't use a Dawn backend
if (webgpu::g_backendType == WGPUBackendType_Null) { if (webgpu::g_backendType == wgpu::BackendType::Null) {
if (!window::create_renderer()) { if (!window::create_renderer()) {
Log.report(LOG_FATAL, FMT_STRING("Failed to initialize SDL renderer: {}"), SDL_GetError()); Log.report(LOG_FATAL, FMT_STRING("Failed to initialize SDL renderer: {}"), SDL_GetError());
unreachable(); unreachable();
@ -118,13 +121,14 @@ static AuroraInfo initialize(int argc, char* argv[], const AuroraConfig& config)
}; };
} }
static WGPUTextureView g_currentView = nullptr; #ifndef EMSCRIPTEN
static wgpu::TextureView g_currentView;
#endif
static void shutdown() noexcept { static void shutdown() noexcept {
if (g_currentView != nullptr) { #ifndef EMSCRIPTEN
wgpuTextureViewRelease(g_currentView); g_currentView = {};
g_currentView = nullptr; #endif
}
imgui::shutdown(); imgui::shutdown();
gfx::shutdown(); gfx::shutdown();
webgpu::shutdown(); webgpu::shutdown();
@ -142,7 +146,8 @@ static const AuroraEvent* update() noexcept {
} }
static bool begin_frame() noexcept { static bool begin_frame() noexcept {
g_currentView = wgpuSwapChainGetCurrentTextureView(g_swapChain); #ifndef EMSCRIPTEN
g_currentView = g_swapChain.GetCurrentTextureView();
if (!g_currentView) { if (!g_currentView) {
ImGui::EndFrame(); ImGui::EndFrame();
// Force swapchain recreation // Force swapchain recreation
@ -150,50 +155,55 @@ static bool begin_frame() noexcept {
webgpu::resize_swapchain(size.fb_width, size.fb_height, true); webgpu::resize_swapchain(size.fb_width, size.fb_height, true);
return false; return false;
} }
#endif
gfx::begin_frame(); gfx::begin_frame();
return true; return true;
} }
static void end_frame() noexcept { static void end_frame() noexcept {
const auto encoderDescriptor = WGPUCommandEncoderDescriptor{ const auto encoderDescriptor = wgpu::CommandEncoderDescriptor{
.label = "Redraw encoder", .label = "Redraw encoder",
}; };
auto encoder = wgpuDeviceCreateCommandEncoder(g_device, &encoderDescriptor); auto encoder = g_device.CreateCommandEncoder(&encoderDescriptor);
gfx::end_frame(encoder); gfx::end_frame(encoder);
gfx::render(encoder); gfx::render(encoder);
{ {
const std::array attachments{ const std::array attachments{
WGPURenderPassColorAttachment{ wgpu::RenderPassColorAttachment{
#ifdef EMSCRIPTEN
.view = g_swapChain.GetCurrentTextureView(),
#else
.view = g_currentView, .view = g_currentView,
.loadOp = WGPULoadOp_Clear, #endif
.storeOp = WGPUStoreOp_Store, .loadOp = wgpu::LoadOp::Clear,
.storeOp = wgpu::StoreOp::Store,
}, },
}; };
const WGPURenderPassDescriptor renderPassDescriptor{ const wgpu::RenderPassDescriptor renderPassDescriptor{
.label = "Post render pass", .label = "Post render pass",
.colorAttachmentCount = attachments.size(), .colorAttachmentCount = attachments.size(),
.colorAttachments = attachments.data(), .colorAttachments = attachments.data(),
}; };
auto pass = wgpuCommandEncoderBeginRenderPass(encoder, &renderPassDescriptor); auto pass = encoder.BeginRenderPass(&renderPassDescriptor);
// Copy EFB -> XFB (swapchain) // Copy EFB -> XFB (swapchain)
wgpuRenderPassEncoderSetPipeline(pass, webgpu::g_CopyPipeline); pass.SetPipeline(webgpu::g_CopyPipeline);
wgpuRenderPassEncoderSetBindGroup(pass, 0, webgpu::g_CopyBindGroup, 0, nullptr); pass.SetBindGroup(0, webgpu::g_CopyBindGroup, 0, nullptr);
wgpuRenderPassEncoderDraw(pass, 3, 1, 0, 0); pass.Draw(3);
if (!g_initialFrame) { if (!g_initialFrame) {
// Render ImGui // Render ImGui
imgui::render(pass); imgui::render(pass);
} }
wgpuRenderPassEncoderEnd(pass); pass.End();
wgpuRenderPassEncoderRelease(pass);
} }
const WGPUCommandBufferDescriptor cmdBufDescriptor{.label = "Redraw command buffer"}; const wgpu::CommandBufferDescriptor cmdBufDescriptor{.label = "Redraw command buffer"};
const auto buffer = wgpuCommandEncoderFinish(encoder, &cmdBufDescriptor); const auto buffer = encoder.Finish(&cmdBufDescriptor);
wgpuQueueSubmit(g_queue, 1, &buffer); g_queue.Submit(1, &buffer);
wgpuCommandBufferRelease(buffer); #ifdef WEBGPU_DAWN
wgpuCommandEncoderRelease(encoder); g_swapChain.Present();
wgpuSwapChainPresent(g_swapChain); g_currentView = {};
wgpuTextureViewRelease(g_currentView); #else
g_currentView = nullptr; emscripten_sleep(0);
#endif
if (!g_initialFrame) { if (!g_initialFrame) {
ImGui::EndFrame(); ImGui::EndFrame();
} }

View File

@ -37,28 +37,28 @@ BackendBinding* CreateVulkanBinding(SDL_Window* window, WGPUDevice device);
BackendBinding::BackendBinding(SDL_Window* window, WGPUDevice device) : m_window(window), m_device(device) {} BackendBinding::BackendBinding(SDL_Window* window, WGPUDevice device) : m_window(window), m_device(device) {}
bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, WGPUBackendType type) { bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, wgpu::BackendType type) {
switch (type) { switch (type) {
#if defined(DAWN_ENABLE_BACKEND_D3D12) #if defined(DAWN_ENABLE_BACKEND_D3D12)
case WGPUBackendType_D3D12: { case wgpu::BackendType::D3D12: {
dawn::native::d3d12::AdapterDiscoveryOptions options; dawn::native::d3d12::AdapterDiscoveryOptions options;
return instance->DiscoverAdapters(&options); return instance->DiscoverAdapters(&options);
} }
#endif #endif
#if defined(DAWN_ENABLE_BACKEND_METAL) #if defined(DAWN_ENABLE_BACKEND_METAL)
case WGPUBackendType_Metal: { case wgpu::BackendType::Metal: {
dawn::native::metal::AdapterDiscoveryOptions options; dawn::native::metal::AdapterDiscoveryOptions options;
return instance->DiscoverAdapters(&options); return instance->DiscoverAdapters(&options);
} }
#endif #endif
#if defined(DAWN_ENABLE_BACKEND_VULKAN) #if defined(DAWN_ENABLE_BACKEND_VULKAN)
case WGPUBackendType_Vulkan: { case wgpu::BackendType::Vulkan: {
dawn::native::vulkan::AdapterDiscoveryOptions options; dawn::native::vulkan::AdapterDiscoveryOptions options;
return instance->DiscoverAdapters(&options); return instance->DiscoverAdapters(&options);
} }
#endif #endif
#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL) #if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
case WGPUBackendType_OpenGL: { case wgpu::BackendType::OpenGL: {
SDL_GL_ResetAttributes(); SDL_GL_ResetAttributes();
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE); SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4); SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
@ -71,7 +71,7 @@ bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, WGPUB
} }
#endif #endif
#if defined(DAWN_ENABLE_BACKEND_OPENGLES) #if defined(DAWN_ENABLE_BACKEND_OPENGLES)
case WGPUBackendType_OpenGLES: { case wgpu::BackendType::OpenGLES: {
SDL_GL_ResetAttributes(); SDL_GL_ResetAttributes();
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES); SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3); SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
@ -84,7 +84,7 @@ bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, WGPUB
} }
#endif #endif
#if defined(DAWN_ENABLE_BACKEND_NULL) #if defined(DAWN_ENABLE_BACKEND_NULL)
case WGPUBackendType_Null: case wgpu::BackendType::Null:
instance->DiscoverDefaultAdapters(); instance->DiscoverDefaultAdapters();
return true; return true;
#endif #endif
@ -93,30 +93,30 @@ bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, WGPUB
} }
} }
BackendBinding* CreateBinding(WGPUBackendType type, SDL_Window* window, WGPUDevice device) { BackendBinding* CreateBinding(wgpu::BackendType type, SDL_Window* window, WGPUDevice device) {
switch (type) { switch (type) {
#if defined(DAWN_ENABLE_BACKEND_D3D12) #if defined(DAWN_ENABLE_BACKEND_D3D12)
case WGPUBackendType_D3D12: case wgpu::BackendType::D3D12:
return CreateD3D12Binding(window, device); return CreateD3D12Binding(window, device);
#endif #endif
#if defined(DAWN_ENABLE_BACKEND_METAL) #if defined(DAWN_ENABLE_BACKEND_METAL)
case WGPUBackendType_Metal: case wgpu::BackendType::Metal:
return CreateMetalBinding(window, device); return CreateMetalBinding(window, device);
#endif #endif
#if defined(DAWN_ENABLE_BACKEND_NULL) #if defined(DAWN_ENABLE_BACKEND_NULL)
case WGPUBackendType_Null: case wgpu::BackendType::Null:
return CreateNullBinding(window, device); return CreateNullBinding(window, device);
#endif #endif
#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL) #if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
case WGPUBackendType_OpenGL: case wgpu::BackendType::OpenGL:
return CreateOpenGLBinding(window, device); return CreateOpenGLBinding(window, device);
#endif #endif
#if defined(DAWN_ENABLE_BACKEND_OPENGLES) #if defined(DAWN_ENABLE_BACKEND_OPENGLES)
case WGPUBackendType_OpenGLES: case wgpu::BackendType::OpenGLES:
return CreateOpenGLBinding(window, device); return CreateOpenGLBinding(window, device);
#endif #endif
#if defined(DAWN_ENABLE_BACKEND_VULKAN) #if defined(DAWN_ENABLE_BACKEND_VULKAN)
case WGPUBackendType_Vulkan: case wgpu::BackendType::Vulkan:
return CreateVulkanBinding(window, device); return CreateVulkanBinding(window, device);
#endif #endif
default: default:

View File

@ -1,7 +1,7 @@
#pragma once #pragma once
#include <dawn/native/DawnNative.h> #include <dawn/native/DawnNative.h>
#include <webgpu/webgpu.h> #include <webgpu/webgpu_cpp.h>
struct SDL_Window; struct SDL_Window;
@ -21,7 +21,7 @@ protected:
WGPUDevice m_device = nullptr; WGPUDevice m_device = nullptr;
}; };
bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, WGPUBackendType type); bool DiscoverAdapter(dawn::native::Instance* instance, SDL_Window* window, wgpu::BackendType type);
BackendBinding* CreateBinding(WGPUBackendType type, SDL_Window* window, WGPUDevice device); BackendBinding* CreateBinding(wgpu::BackendType type, SDL_Window* window, WGPUDevice device);
} // namespace aurora::webgpu::utils } // namespace aurora::webgpu::utils

View File

@ -18,6 +18,7 @@ namespace aurora::gfx {
static Module Log("aurora::gfx"); static Module Log("aurora::gfx");
using webgpu::g_device; using webgpu::g_device;
using webgpu::g_instance;
using webgpu::g_queue; using webgpu::g_queue;
#ifdef AURORA_GFX_DEBUG_GROUPS #ifdef AURORA_GFX_DEBUG_GROUPS
@ -90,31 +91,31 @@ namespace aurora {
// we create specialized methods to handle them. Note that these are highly dependent on // we create specialized methods to handle them. Note that these are highly dependent on
// the structure definition, which could easily change with Dawn updates. // the structure definition, which could easily change with Dawn updates.
template <> template <>
inline HashType xxh3_hash(const WGPUBindGroupDescriptor& input, HashType seed) { inline HashType xxh3_hash(const wgpu::BindGroupDescriptor& input, HashType seed) {
constexpr auto offset = sizeof(void*) * 2; // skip nextInChain, label constexpr auto offset = sizeof(void*) * 2; // skip nextInChain, label
const auto hash = xxh3_hash_s(reinterpret_cast<const u8*>(&input) + offset, const auto hash = xxh3_hash_s(reinterpret_cast<const u8*>(&input) + offset,
sizeof(WGPUBindGroupDescriptor) - offset - sizeof(void*) /* skip entries */, seed); sizeof(wgpu::BindGroupDescriptor) - offset - sizeof(void*) /* skip entries */, seed);
return xxh3_hash_s(input.entries, sizeof(WGPUBindGroupEntry) * input.entryCount, hash); return xxh3_hash_s(input.entries, sizeof(wgpu::BindGroupEntry) * input.entryCount, hash);
} }
template <> template <>
inline HashType xxh3_hash(const WGPUSamplerDescriptor& input, HashType seed) { inline HashType xxh3_hash(const wgpu::SamplerDescriptor& input, HashType seed) {
constexpr auto offset = sizeof(void*) * 2; // skip nextInChain, label constexpr auto offset = sizeof(void*) * 2; // skip nextInChain, label
return xxh3_hash_s(reinterpret_cast<const u8*>(&input) + offset, return xxh3_hash_s(reinterpret_cast<const u8*>(&input) + offset,
sizeof(WGPUSamplerDescriptor) - offset - 2 /* skip padding */, seed); sizeof(wgpu::SamplerDescriptor) - offset - 2 /* skip padding */, seed);
} }
} // namespace aurora } // namespace aurora
namespace aurora::gfx { namespace aurora::gfx {
using NewPipelineCallback = std::function<WGPURenderPipeline()>; using NewPipelineCallback = std::function<wgpu::RenderPipeline()>;
std::mutex g_pipelineMutex; std::mutex g_pipelineMutex;
static bool g_hasPipelineThread = false; static bool g_hasPipelineThread = false;
static std::thread g_pipelineThread; static std::thread g_pipelineThread;
static std::atomic_bool g_pipelineThreadEnd; static std::atomic_bool g_pipelineThreadEnd;
static std::condition_variable g_pipelineCv; static std::condition_variable g_pipelineCv;
static absl::flat_hash_map<PipelineRef, WGPURenderPipeline> g_pipelines; static absl::flat_hash_map<PipelineRef, wgpu::RenderPipeline> g_pipelines;
static std::deque<std::pair<PipelineRef, NewPipelineCallback>> g_queuedPipelines; static std::deque<std::pair<PipelineRef, NewPipelineCallback>> g_queuedPipelines;
static absl::flat_hash_map<BindGroupRef, WGPUBindGroup> g_cachedBindGroups; static absl::flat_hash_map<BindGroupRef, wgpu::BindGroup> g_cachedBindGroups;
static absl::flat_hash_map<SamplerRef, WGPUSampler> g_cachedSamplers; static absl::flat_hash_map<SamplerRef, wgpu::Sampler> g_cachedSamplers;
std::atomic_uint32_t queuedPipelines; std::atomic_uint32_t queuedPipelines;
std::atomic_uint32_t createdPipelines; std::atomic_uint32_t createdPipelines;
@ -123,12 +124,12 @@ static ByteBuffer g_uniforms;
static ByteBuffer g_indices; static ByteBuffer g_indices;
static ByteBuffer g_storage; static ByteBuffer g_storage;
static ByteBuffer g_textureUpload; static ByteBuffer g_textureUpload;
WGPUBuffer g_vertexBuffer; wgpu::Buffer g_vertexBuffer;
WGPUBuffer g_uniformBuffer; wgpu::Buffer g_uniformBuffer;
WGPUBuffer g_indexBuffer; wgpu::Buffer g_indexBuffer;
WGPUBuffer g_storageBuffer; wgpu::Buffer g_storageBuffer;
static std::array<WGPUBuffer, 3> g_stagingBuffers; static std::array<wgpu::Buffer, 3> g_stagingBuffers;
static WGPUSupportedLimits g_cachedLimits; static wgpu::SupportedLimits g_cachedLimits;
static ShaderState g_state; static ShaderState g_state;
static PipelineRef g_currentPipeline; static PipelineRef g_currentPipeline;
@ -260,16 +261,16 @@ void set_scissor(uint32_t x, uint32_t y, uint32_t w, uint32_t h) noexcept {
} }
} }
static inline bool operator==(const WGPUExtent3D& lhs, const WGPUExtent3D& rhs) { static inline bool operator==(const wgpu::Extent3D& lhs, const wgpu::Extent3D& rhs) {
return lhs.width == rhs.width && lhs.height == rhs.height && lhs.depthOrArrayLayers == rhs.depthOrArrayLayers; return lhs.width == rhs.width && lhs.height == rhs.height && lhs.depthOrArrayLayers == rhs.depthOrArrayLayers;
} }
static inline bool operator!=(const WGPUExtent3D& lhs, const WGPUExtent3D& rhs) { return !(lhs == rhs); } static inline bool operator!=(const wgpu::Extent3D& lhs, const wgpu::Extent3D& rhs) { return !(lhs == rhs); }
void resolve_color(const ClipRect& rect, uint32_t bind, GXTexFmt fmt, bool clear_depth) noexcept { void resolve_color(const ClipRect& rect, uint32_t bind, GXTexFmt fmt, bool clear_depth) noexcept {
if (g_resolvedTextures.size() < bind + 1) { if (g_resolvedTextures.size() < bind + 1) {
g_resolvedTextures.resize(bind + 1); g_resolvedTextures.resize(bind + 1);
} }
const WGPUExtent3D size{ const wgpu::Extent3D size{
.width = static_cast<uint32_t>(rect.width), .width = static_cast<uint32_t>(rect.width),
.height = static_cast<uint32_t>(rect.height), .height = static_cast<uint32_t>(rect.height),
.depthOrArrayLayers = 1, .depthOrArrayLayers = 1,
@ -356,7 +357,8 @@ static void pipeline_worker() {
void initialize() { void initialize() {
// No async pipelines for OpenGL (ES) // No async pipelines for OpenGL (ES)
if (webgpu::g_backendType == WGPUBackendType_OpenGL || webgpu::g_backendType == WGPUBackendType_OpenGLES) { if (webgpu::g_backendType == wgpu::BackendType::OpenGL || webgpu::g_backendType == wgpu::BackendType::OpenGLES ||
webgpu::g_backendType == wgpu::BackendType::WebGPU) {
g_hasPipelineThread = false; g_hasPipelineThread = false;
} else { } else {
g_pipelineThreadEnd = false; g_pipelineThreadEnd = false;
@ -365,29 +367,30 @@ void initialize() {
} }
// For uniform & storage buffer offset alignments // For uniform & storage buffer offset alignments
wgpuDeviceGetLimits(g_device, &g_cachedLimits); g_device.GetLimits(&g_cachedLimits);
const auto createBuffer = [](WGPUBuffer& out, WGPUBufferUsageFlags usage, uint64_t size, const char* label) { const auto createBuffer = [](wgpu::Buffer& out, wgpu::BufferUsage usage, uint64_t size, const char* label) {
if (size <= 0) { if (size <= 0) {
return; return;
} }
const WGPUBufferDescriptor descriptor{ const wgpu::BufferDescriptor descriptor{
.label = label, .label = label,
.usage = usage, .usage = usage,
.size = size, .size = size,
}; };
out = wgpuDeviceCreateBuffer(g_device, &descriptor); out = g_device.CreateBuffer(&descriptor);
}; };
createBuffer(g_uniformBuffer, WGPUBufferUsage_Uniform | WGPUBufferUsage_CopyDst, UniformBufferSize, createBuffer(g_uniformBuffer, wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst, UniformBufferSize,
"Shared Uniform Buffer"); "Shared Uniform Buffer");
createBuffer(g_vertexBuffer, WGPUBufferUsage_Vertex | WGPUBufferUsage_CopyDst, VertexBufferSize, createBuffer(g_vertexBuffer, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::CopyDst, VertexBufferSize,
"Shared Vertex Buffer"); "Shared Vertex Buffer");
createBuffer(g_indexBuffer, WGPUBufferUsage_Index | WGPUBufferUsage_CopyDst, IndexBufferSize, "Shared Index Buffer"); createBuffer(g_indexBuffer, wgpu::BufferUsage::Index | wgpu::BufferUsage::CopyDst, IndexBufferSize,
createBuffer(g_storageBuffer, WGPUBufferUsage_Storage | WGPUBufferUsage_CopyDst, StorageBufferSize, "Shared Index Buffer");
createBuffer(g_storageBuffer, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst, StorageBufferSize,
"Shared Storage Buffer"); "Shared Storage Buffer");
for (int i = 0; i < g_stagingBuffers.size(); ++i) { for (int i = 0; i < g_stagingBuffers.size(); ++i) {
const auto label = fmt::format(FMT_STRING("Staging Buffer {}"), i); const auto label = fmt::format(FMT_STRING("Staging Buffer {}"), i);
createBuffer(g_stagingBuffers[i], WGPUBufferUsage_MapWrite | WGPUBufferUsage_CopySrc, StagingBufferSize, createBuffer(g_stagingBuffers[i], wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc, StagingBufferSize,
label.c_str()); label.c_str());
} }
map_staging_buffer(); map_staging_buffer();
@ -472,41 +475,15 @@ void shutdown() {
g_resolvedTextures.clear(); g_resolvedTextures.clear();
g_textureUploads.clear(); g_textureUploads.clear();
for (const auto& item : g_cachedBindGroups) {
wgpuBindGroupRelease(item.second);
}
g_cachedBindGroups.clear(); g_cachedBindGroups.clear();
for (const auto& item : g_cachedSamplers) {
wgpuSamplerRelease(item.second);
}
g_cachedSamplers.clear(); g_cachedSamplers.clear();
for (const auto& item : g_pipelines) {
wgpuRenderPipelineRelease(item.second);
}
g_pipelines.clear(); g_pipelines.clear();
g_queuedPipelines.clear(); g_queuedPipelines.clear();
if (g_vertexBuffer != nullptr) { g_vertexBuffer = {};
wgpuBufferDestroy(g_vertexBuffer); g_uniformBuffer = {};
g_vertexBuffer = nullptr; g_indexBuffer = {};
} g_storageBuffer = {};
if (g_uniformBuffer != nullptr) { g_stagingBuffers.fill({});
wgpuBufferDestroy(g_uniformBuffer);
g_uniformBuffer = nullptr;
}
if (g_indexBuffer != nullptr) {
wgpuBufferDestroy(g_indexBuffer);
g_indexBuffer = nullptr;
}
if (g_storageBuffer != nullptr) {
wgpuBufferDestroy(g_storageBuffer);
g_storageBuffer = nullptr;
}
for (auto& item : g_stagingBuffers) {
if (item != nullptr) {
wgpuBufferDestroy(item);
}
item = nullptr;
}
g_renderPasses.clear(); g_renderPasses.clear();
g_currentRenderPass = UINT32_MAX; g_currentRenderPass = UINT32_MAX;
@ -520,8 +497,8 @@ static size_t currentStagingBuffer = 0;
static bool bufferMapped = false; static bool bufferMapped = false;
void map_staging_buffer() { void map_staging_buffer() {
bufferMapped = false; bufferMapped = false;
wgpuBufferMapAsync( g_stagingBuffers[currentStagingBuffer].MapAsync(
g_stagingBuffers[currentStagingBuffer], WGPUMapMode_Write, 0, StagingBufferSize, wgpu::MapMode::Write, 0, StagingBufferSize,
[](WGPUBufferMapAsyncStatus status, void* userdata) { [](WGPUBufferMapAsyncStatus status, void* userdata) {
if (status == WGPUBufferMapAsyncStatus_DestroyedBeforeCallback) { if (status == WGPUBufferMapAsyncStatus_DestroyedBeforeCallback) {
return; return;
@ -536,7 +513,11 @@ void map_staging_buffer() {
void begin_frame() { void begin_frame() {
while (!bufferMapped) { while (!bufferMapped) {
wgpuDeviceTick(g_device); #ifdef EMSCRIPTEN
emscripten_sleep(0);
#else
g_device.Tick();
#endif
} }
size_t bufferOffset = 0; size_t bufferOffset = 0;
auto& stagingBuf = g_stagingBuffers[currentStagingBuffer]; auto& stagingBuf = g_stagingBuffers[currentStagingBuffer];
@ -544,8 +525,7 @@ void begin_frame() {
if (size <= 0) { if (size <= 0) {
return; return;
} }
buf = ByteBuffer{static_cast<u8*>(wgpuBufferGetMappedRange(stagingBuf, bufferOffset, size)), buf = ByteBuffer{static_cast<u8*>(stagingBuf.GetMappedRange(bufferOffset, size)), static_cast<size_t>(size)};
static_cast<size_t>(size)};
bufferOffset += size; bufferOffset += size;
}; };
mapBuffer(g_verts, VertexBufferSize); mapBuffer(g_verts, VertexBufferSize);
@ -560,23 +540,22 @@ void begin_frame() {
g_renderPasses.emplace_back(); g_renderPasses.emplace_back();
g_renderPasses[0].clearColor = gx::g_gxState.clearColor; g_renderPasses[0].clearColor = gx::g_gxState.clearColor;
g_currentRenderPass = 0; g_currentRenderPass = 0;
// push_command(CommandType::SetViewport, Command::Data{.setViewport = g_cachedViewport}); // push_command(CommandType::SetViewport, Command::Data{.setViewport = g_cachedViewport});
// push_command(CommandType::SetScissor, Command::Data{.setScissor = g_cachedScissor}); // push_command(CommandType::SetScissor, Command::Data{.setScissor = g_cachedScissor});
} }
void end_frame(WGPUCommandEncoder cmd) { void end_frame(const wgpu::CommandEncoder& cmd) {
uint64_t bufferOffset = 0; uint64_t bufferOffset = 0;
const auto writeBuffer = [&](ByteBuffer& buf, WGPUBuffer& out, uint64_t size, std::string_view label) { const auto writeBuffer = [&](ByteBuffer& buf, wgpu::Buffer& out, uint64_t size, std::string_view label) {
const auto writeSize = buf.size(); // Only need to copy this many bytes const auto writeSize = buf.size(); // Only need to copy this many bytes
if (writeSize > 0) { if (writeSize > 0) {
wgpuCommandEncoderCopyBufferToBuffer(cmd, g_stagingBuffers[currentStagingBuffer], bufferOffset, out, 0, cmd.CopyBufferToBuffer(g_stagingBuffers[currentStagingBuffer], bufferOffset, out, 0, ALIGN(writeSize, 4));
ALIGN(writeSize, 4));
buf.clear(); buf.clear();
} }
bufferOffset += size; bufferOffset += size;
return writeSize; return writeSize;
}; };
wgpuBufferUnmap(g_stagingBuffers[currentStagingBuffer]); g_stagingBuffers[currentStagingBuffer].Unmap();
g_lastVertSize = writeBuffer(g_verts, g_vertexBuffer, VertexBufferSize, "Vertex"); g_lastVertSize = writeBuffer(g_verts, g_vertexBuffer, VertexBufferSize, "Vertex");
g_lastUniformSize = writeBuffer(g_uniforms, g_uniformBuffer, UniformBufferSize, "Uniform"); g_lastUniformSize = writeBuffer(g_uniforms, g_uniformBuffer, UniformBufferSize, "Uniform");
g_lastIndexSize = writeBuffer(g_indices, g_indexBuffer, IndexBufferSize, "Index"); g_lastIndexSize = writeBuffer(g_indices, g_indexBuffer, IndexBufferSize, "Index");
@ -584,16 +563,16 @@ void end_frame(WGPUCommandEncoder cmd) {
{ {
// Perform texture copies // Perform texture copies
for (const auto& item : g_textureUploads) { for (const auto& item : g_textureUploads) {
const WGPUImageCopyBuffer buf{ const wgpu::ImageCopyBuffer buf{
.layout = .layout =
WGPUTextureDataLayout{ wgpu::TextureDataLayout{
.offset = item.layout.offset + bufferOffset, .offset = item.layout.offset + bufferOffset,
.bytesPerRow = ALIGN(item.layout.bytesPerRow, 256), .bytesPerRow = ALIGN(item.layout.bytesPerRow, 256),
.rowsPerImage = item.layout.rowsPerImage, .rowsPerImage = item.layout.rowsPerImage,
}, },
.buffer = g_stagingBuffers[currentStagingBuffer], .buffer = g_stagingBuffers[currentStagingBuffer],
}; };
wgpuCommandEncoderCopyBufferToTexture(cmd, &buf, &item.tex, &item.size); cmd.CopyBufferToTexture(&buf, &item.tex, &item.size);
} }
g_textureUploads.clear(); g_textureUploads.clear();
g_textureUpload.clear(); g_textureUpload.clear();
@ -603,7 +582,7 @@ void end_frame(WGPUCommandEncoder cmd) {
g_currentRenderPass = UINT32_MAX; g_currentRenderPass = UINT32_MAX;
} }
void render(WGPUCommandEncoder cmd) { void render(wgpu::CommandEncoder& cmd) {
for (u32 i = 0; i < g_renderPasses.size(); ++i) { for (u32 i = 0; i < g_renderPasses.size(); ++i) {
const auto& passInfo = g_renderPasses[i]; const auto& passInfo = g_renderPasses[i];
bool finalPass = i == g_renderPasses.size() - 1; bool finalPass = i == g_renderPasses.size() - 1;
@ -612,12 +591,11 @@ void render(WGPUCommandEncoder cmd) {
unreachable(); unreachable();
} }
const std::array attachments{ const std::array attachments{
WGPURenderPassColorAttachment{ wgpu::RenderPassColorAttachment{
.view = webgpu::g_frameBuffer.view, .view = webgpu::g_frameBuffer.view,
.resolveTarget = webgpu::g_graphicsConfig.msaaSamples > 1 ? webgpu::g_frameBufferResolved.view : nullptr, .resolveTarget = webgpu::g_graphicsConfig.msaaSamples > 1 ? webgpu::g_frameBufferResolved.view : nullptr,
.loadOp = passInfo.clear ? WGPULoadOp_Clear : WGPULoadOp_Load, .loadOp = passInfo.clear ? wgpu::LoadOp::Clear : wgpu::LoadOp::Load,
.storeOp = WGPUStoreOp_Store, .storeOp = wgpu::StoreOp::Store,
.clearColor = {NAN, NAN, NAN, NAN},
.clearValue = .clearValue =
{ {
.r = passInfo.clearColor.x(), .r = passInfo.clearColor.x(),
@ -627,29 +605,27 @@ void render(WGPUCommandEncoder cmd) {
}, },
}, },
}; };
const WGPURenderPassDepthStencilAttachment depthStencilAttachment{ const wgpu::RenderPassDepthStencilAttachment depthStencilAttachment{
.view = webgpu::g_depthBuffer.view, .view = webgpu::g_depthBuffer.view,
.depthLoadOp = passInfo.clear ? WGPULoadOp_Clear : WGPULoadOp_Load, .depthLoadOp = passInfo.clear ? wgpu::LoadOp::Clear : wgpu::LoadOp::Load,
.depthStoreOp = WGPUStoreOp_Store, .depthStoreOp = wgpu::StoreOp::Store,
.clearDepth = NAN,
.depthClearValue = 1.f, .depthClearValue = 1.f,
}; };
const auto label = fmt::format(FMT_STRING("Render pass {}"), i); const auto label = fmt::format(FMT_STRING("Render pass {}"), i);
const WGPURenderPassDescriptor renderPassDescriptor{ const wgpu::RenderPassDescriptor renderPassDescriptor{
.label = label.c_str(), .label = label.c_str(),
.colorAttachmentCount = attachments.size(), .colorAttachmentCount = attachments.size(),
.colorAttachments = attachments.data(), .colorAttachments = attachments.data(),
.depthStencilAttachment = &depthStencilAttachment, .depthStencilAttachment = &depthStencilAttachment,
}; };
auto pass = wgpuCommandEncoderBeginRenderPass(cmd, &renderPassDescriptor); auto pass = cmd.BeginRenderPass(&renderPassDescriptor);
render_pass(pass, i); render_pass(pass, i);
wgpuRenderPassEncoderEnd(pass); pass.End();
wgpuRenderPassEncoderRelease(pass);
if (passInfo.resolveTarget != UINT32_MAX) { if (passInfo.resolveTarget != UINT32_MAX) {
WGPUImageCopyTexture src{ wgpu::ImageCopyTexture src{
.origin = .origin =
WGPUOrigin3D{ wgpu::Origin3D{
.x = static_cast<uint32_t>(passInfo.resolveRect.x), .x = static_cast<uint32_t>(passInfo.resolveRect.x),
.y = static_cast<uint32_t>(passInfo.resolveRect.y), .y = static_cast<uint32_t>(passInfo.resolveRect.y),
}, },
@ -660,21 +636,21 @@ void render(WGPUCommandEncoder cmd) {
src.texture = webgpu::g_frameBuffer.texture; src.texture = webgpu::g_frameBuffer.texture;
} }
auto& target = g_resolvedTextures[passInfo.resolveTarget]; auto& target = g_resolvedTextures[passInfo.resolveTarget];
const WGPUImageCopyTexture dst{ const wgpu::ImageCopyTexture dst{
.texture = target->texture, .texture = target->texture,
}; };
const WGPUExtent3D size{ const wgpu::Extent3D size{
.width = static_cast<uint32_t>(passInfo.resolveRect.width), .width = static_cast<uint32_t>(passInfo.resolveRect.width),
.height = static_cast<uint32_t>(passInfo.resolveRect.height), .height = static_cast<uint32_t>(passInfo.resolveRect.height),
.depthOrArrayLayers = 1, .depthOrArrayLayers = 1,
}; };
wgpuCommandEncoderCopyTextureToTexture(cmd, &src, &dst, &size); cmd.CopyTextureToTexture(&src, &dst, &size);
} }
} }
g_renderPasses.clear(); g_renderPasses.clear();
} }
void render_pass(WGPURenderPassEncoder pass, u32 idx) { void render_pass(const wgpu::RenderPassEncoder& pass, u32 idx) {
g_currentPipeline = UINTPTR_MAX; g_currentPipeline = UINTPTR_MAX;
#ifdef AURORA_GFX_DEBUG_GROUPS #ifdef AURORA_GFX_DEBUG_GROUPS
std::vector<std::string> lastDebugGroupStack; std::vector<std::string> lastDebugGroupStack;
@ -691,10 +667,10 @@ void render_pass(WGPURenderPassEncoder pass, u32 idx) {
} }
} }
for (size_t i = firstDiff; i < lastDebugGroupStack.size(); ++i) { for (size_t i = firstDiff; i < lastDebugGroupStack.size(); ++i) {
wgpuRenderPassEncoderPopDebugGroup(pass); pass.PopDebugGroup();
} }
for (size_t i = firstDiff; i < cmd.debugGroupStack.size(); ++i) { for (size_t i = firstDiff; i < cmd.debugGroupStack.size(); ++i) {
wgpuRenderPassEncoderPushDebugGroup(pass, cmd.debugGroupStack[i].c_str()); pass.PushDebugGroup(cmd.debugGroupStack[i].c_str());
} }
lastDebugGroupStack = cmd.debugGroupStack; lastDebugGroupStack = cmd.debugGroupStack;
} }
@ -702,11 +678,11 @@ void render_pass(WGPURenderPassEncoder pass, u32 idx) {
switch (cmd.type) { switch (cmd.type) {
case CommandType::SetViewport: { case CommandType::SetViewport: {
const auto& vp = cmd.data.setViewport; const auto& vp = cmd.data.setViewport;
wgpuRenderPassEncoderSetViewport(pass, vp.left, vp.top, vp.width, vp.height, vp.znear, vp.zfar); pass.SetViewport(vp.left, vp.top, vp.width, vp.height, vp.znear, vp.zfar);
} break; } break;
case CommandType::SetScissor: { case CommandType::SetScissor: {
const auto& sc = cmd.data.setScissor; const auto& sc = cmd.data.setScissor;
wgpuRenderPassEncoderSetScissorRect(pass, sc.x, sc.y, sc.w, sc.h); pass.SetScissorRect(sc.x, sc.y, sc.w, sc.h);
} break; } break;
case CommandType::Draw: { case CommandType::Draw: {
const auto& draw = cmd.data.draw; const auto& draw = cmd.data.draw;
@ -724,12 +700,12 @@ void render_pass(WGPURenderPassEncoder pass, u32 idx) {
#ifdef AURORA_GFX_DEBUG_GROUPS #ifdef AURORA_GFX_DEBUG_GROUPS
for (size_t i = 0; i < lastDebugGroupStack.size(); ++i) { for (size_t i = 0; i < lastDebugGroupStack.size(); ++i) {
wgpuRenderPassEncoderPopDebugGroup(pass); pass.PopDebugGroup();
} }
#endif #endif
} }
bool bind_pipeline(PipelineRef ref, WGPURenderPassEncoder pass) { bool bind_pipeline(PipelineRef ref, const wgpu::RenderPassEncoder& pass) {
if (ref == g_currentPipeline) { if (ref == g_currentPipeline) {
return true; return true;
} }
@ -738,7 +714,7 @@ bool bind_pipeline(PipelineRef ref, WGPURenderPassEncoder pass) {
if (it == g_pipelines.end()) { if (it == g_pipelines.end()) {
return false; return false;
} }
wgpuRenderPassEncoderSetPipeline(pass, it->second); pass.SetPipeline(it->second);
g_currentPipeline = ref; g_currentPipeline = ref;
return true; return true;
} }
@ -809,27 +785,38 @@ std::pair<ByteBuffer, Range> map_storage(size_t length) {
return {ByteBuffer{g_storage.data() + range.offset, range.size}, range}; return {ByteBuffer{g_storage.data() + range.offset, range.size}, range};
} }
BindGroupRef bind_group_ref(const WGPUBindGroupDescriptor& descriptor) { // TODO: should we avoid caching bind groups altogether?
BindGroupRef bind_group_ref(const wgpu::BindGroupDescriptor& descriptor) {
#ifdef EMSCRIPTEN
const auto bg = g_device.CreateBindGroup(&descriptor);
BindGroupRef id = reinterpret_cast<BindGroupRef>(bg.Get());
g_cachedBindGroups.try_emplace(id, bg);
#else
const auto id = xxh3_hash(descriptor); const auto id = xxh3_hash(descriptor);
if (!g_cachedBindGroups.contains(id)) { if (!g_cachedBindGroups.contains(id)) {
g_cachedBindGroups.try_emplace(id, wgpuDeviceCreateBindGroup(g_device, &descriptor)); g_cachedBindGroups.try_emplace(id, g_device.CreateBindGroup(&descriptor));
} }
#endif
return id; return id;
} }
WGPUBindGroup find_bind_group(BindGroupRef id) { const wgpu::BindGroup& find_bind_group(BindGroupRef id) {
#ifdef EMSCRIPTEN
return g_cachedBindGroups[id];
#else
const auto it = g_cachedBindGroups.find(id); const auto it = g_cachedBindGroups.find(id);
if (it == g_cachedBindGroups.end()) { if (it == g_cachedBindGroups.end()) {
Log.report(LOG_FATAL, FMT_STRING("get_bind_group: failed to locate {}"), id); Log.report(LOG_FATAL, FMT_STRING("get_bind_group: failed to locate {}"), id);
unreachable(); unreachable();
} }
return it->second; return it->second;
#endif
} }
WGPUSampler sampler_ref(const WGPUSamplerDescriptor& descriptor) { const wgpu::Sampler& sampler_ref(const wgpu::SamplerDescriptor& descriptor) {
const auto id = xxh3_hash(descriptor); const auto id = xxh3_hash(descriptor);
auto it = g_cachedSamplers.find(id); auto it = g_cachedSamplers.find(id);
if (it == g_cachedSamplers.end()) { if (it == g_cachedSamplers.end()) {
it = g_cachedSamplers.try_emplace(id, wgpuDeviceCreateSampler(g_device, &descriptor)).first; it = g_cachedSamplers.try_emplace(id, g_device.CreateSampler(&descriptor)).first;
} }
return it->second; return it->second;
} }

View File

@ -6,7 +6,7 @@
#include <utility> #include <utility>
#include <cstring> #include <cstring>
#include <webgpu/webgpu.h> #include <webgpu/webgpu_cpp.h>
#include <xxhash_impl.h> #include <xxhash_impl.h>
namespace aurora { namespace aurora {
@ -123,10 +123,10 @@ private:
} // namespace aurora } // namespace aurora
namespace aurora::gfx { namespace aurora::gfx {
extern WGPUBuffer g_vertexBuffer; extern wgpu::Buffer g_vertexBuffer;
extern WGPUBuffer g_uniformBuffer; extern wgpu::Buffer g_uniformBuffer;
extern WGPUBuffer g_indexBuffer; extern wgpu::Buffer g_indexBuffer;
extern WGPUBuffer g_storageBuffer; extern wgpu::Buffer g_storageBuffer;
using BindGroupRef = HashType; using BindGroupRef = HashType;
using PipelineRef = HashType; using PipelineRef = HashType;
@ -148,9 +148,9 @@ void initialize();
void shutdown(); void shutdown();
void begin_frame(); void begin_frame();
void end_frame(WGPUCommandEncoder cmd); void end_frame(const wgpu::CommandEncoder& cmd);
void render(WGPUCommandEncoder cmd); void render(wgpu::CommandEncoder& cmd);
void render_pass(WGPURenderPassEncoder pass, uint32_t idx); void render_pass(const wgpu::RenderPassEncoder& pass, uint32_t idx);
void map_staging_buffer(); void map_staging_buffer();
Range push_verts(const uint8_t* data, size_t length); Range push_verts(const uint8_t* data, size_t length);
@ -192,12 +192,12 @@ void merge_draw_command(DrawData data);
template <typename PipelineConfig> template <typename PipelineConfig>
PipelineRef pipeline_ref(PipelineConfig config); PipelineRef pipeline_ref(PipelineConfig config);
bool bind_pipeline(PipelineRef ref, WGPURenderPassEncoder pass); bool bind_pipeline(PipelineRef ref, const wgpu::RenderPassEncoder& pass);
BindGroupRef bind_group_ref(const WGPUBindGroupDescriptor& descriptor); BindGroupRef bind_group_ref(const wgpu::BindGroupDescriptor& descriptor);
WGPUBindGroup find_bind_group(BindGroupRef id); const wgpu::BindGroup& find_bind_group(BindGroupRef id);
WGPUSampler sampler_ref(const WGPUSamplerDescriptor& descriptor); const wgpu::Sampler& sampler_ref(const wgpu::SamplerDescriptor& descriptor);
uint32_t align_uniform(uint32_t value); uint32_t align_uniform(uint32_t value);

View File

@ -23,108 +23,108 @@ GXState g_gxState{};
const TextureBind& get_texture(GXTexMapID id) noexcept { return g_gxState.textures[static_cast<size_t>(id)]; } const TextureBind& get_texture(GXTexMapID id) noexcept { return g_gxState.textures[static_cast<size_t>(id)]; }
static inline WGPUBlendFactor to_blend_factor(GXBlendFactor fac, bool isDst) { static inline wgpu::BlendFactor to_blend_factor(GXBlendFactor fac, bool isDst) {
switch (fac) { switch (fac) {
case GX_BL_ZERO: case GX_BL_ZERO:
return WGPUBlendFactor_Zero; return wgpu::BlendFactor::Zero;
case GX_BL_ONE: case GX_BL_ONE:
return WGPUBlendFactor_One; return wgpu::BlendFactor::One;
case GX_BL_SRCCLR: // + GX_BL_DSTCLR case GX_BL_SRCCLR: // + GX_BL_DSTCLR
if (isDst) { if (isDst) {
return WGPUBlendFactor_Src; return wgpu::BlendFactor::Src;
} else { } else {
return WGPUBlendFactor_Dst; return wgpu::BlendFactor::Dst;
} }
case GX_BL_INVSRCCLR: // + GX_BL_INVDSTCLR case GX_BL_INVSRCCLR: // + GX_BL_INVDSTCLR
if (isDst) { if (isDst) {
return WGPUBlendFactor_OneMinusSrc; return wgpu::BlendFactor::OneMinusSrc;
} else { } else {
return WGPUBlendFactor_OneMinusDst; return wgpu::BlendFactor::OneMinusDst;
} }
case GX_BL_SRCALPHA: case GX_BL_SRCALPHA:
return WGPUBlendFactor_SrcAlpha; return wgpu::BlendFactor::SrcAlpha;
case GX_BL_INVSRCALPHA: case GX_BL_INVSRCALPHA:
return WGPUBlendFactor_OneMinusSrcAlpha; return wgpu::BlendFactor::OneMinusSrcAlpha;
case GX_BL_DSTALPHA: case GX_BL_DSTALPHA:
return WGPUBlendFactor_DstAlpha; return wgpu::BlendFactor::DstAlpha;
case GX_BL_INVDSTALPHA: case GX_BL_INVDSTALPHA:
return WGPUBlendFactor_OneMinusDstAlpha; return wgpu::BlendFactor::OneMinusDstAlpha;
default: default:
Log.report(LOG_FATAL, FMT_STRING("invalid blend factor {}"), fac); Log.report(LOG_FATAL, FMT_STRING("invalid blend factor {}"), fac);
unreachable(); unreachable();
} }
} }
static inline WGPUCompareFunction to_compare_function(GXCompare func) { static inline wgpu::CompareFunction to_compare_function(GXCompare func) {
switch (func) { switch (func) {
case GX_NEVER: case GX_NEVER:
return WGPUCompareFunction_Never; return wgpu::CompareFunction::Never;
case GX_LESS: case GX_LESS:
return WGPUCompareFunction_Less; return wgpu::CompareFunction::Less;
case GX_EQUAL: case GX_EQUAL:
return WGPUCompareFunction_Equal; return wgpu::CompareFunction::Equal;
case GX_LEQUAL: case GX_LEQUAL:
return WGPUCompareFunction_LessEqual; return wgpu::CompareFunction::LessEqual;
case GX_GREATER: case GX_GREATER:
return WGPUCompareFunction_Greater; return wgpu::CompareFunction::Greater;
case GX_NEQUAL: case GX_NEQUAL:
return WGPUCompareFunction_NotEqual; return wgpu::CompareFunction::NotEqual;
case GX_GEQUAL: case GX_GEQUAL:
return WGPUCompareFunction_GreaterEqual; return wgpu::CompareFunction::GreaterEqual;
case GX_ALWAYS: case GX_ALWAYS:
return WGPUCompareFunction_Always; return wgpu::CompareFunction::Always;
default: default:
Log.report(LOG_FATAL, FMT_STRING("invalid depth fn {}"), func); Log.report(LOG_FATAL, FMT_STRING("invalid depth fn {}"), func);
unreachable(); unreachable();
} }
} }
static inline WGPUBlendState to_blend_state(GXBlendMode mode, GXBlendFactor srcFac, GXBlendFactor dstFac, GXLogicOp op, static inline wgpu::BlendState to_blend_state(GXBlendMode mode, GXBlendFactor srcFac, GXBlendFactor dstFac,
u32 dstAlpha) { GXLogicOp op, u32 dstAlpha) {
WGPUBlendComponent colorBlendComponent; wgpu::BlendComponent colorBlendComponent;
switch (mode) { switch (mode) {
case GX_BM_NONE: case GX_BM_NONE:
colorBlendComponent = { colorBlendComponent = {
.operation = WGPUBlendOperation_Add, .operation = wgpu::BlendOperation::Add,
.srcFactor = WGPUBlendFactor_One, .srcFactor = wgpu::BlendFactor::One,
.dstFactor = WGPUBlendFactor_Zero, .dstFactor = wgpu::BlendFactor::Zero,
}; };
break; break;
case GX_BM_BLEND: case GX_BM_BLEND:
colorBlendComponent = { colorBlendComponent = {
.operation = WGPUBlendOperation_Add, .operation = wgpu::BlendOperation::Add,
.srcFactor = to_blend_factor(srcFac, false), .srcFactor = to_blend_factor(srcFac, false),
.dstFactor = to_blend_factor(dstFac, true), .dstFactor = to_blend_factor(dstFac, true),
}; };
break; break;
case GX_BM_SUBTRACT: case GX_BM_SUBTRACT:
colorBlendComponent = { colorBlendComponent = {
.operation = WGPUBlendOperation_ReverseSubtract, .operation = wgpu::BlendOperation::ReverseSubtract,
.srcFactor = WGPUBlendFactor_One, .srcFactor = wgpu::BlendFactor::One,
.dstFactor = WGPUBlendFactor_One, .dstFactor = wgpu::BlendFactor::One,
}; };
break; break;
case GX_BM_LOGIC: case GX_BM_LOGIC:
switch (op) { switch (op) {
case GX_LO_CLEAR: case GX_LO_CLEAR:
colorBlendComponent = { colorBlendComponent = {
.operation = WGPUBlendOperation_Add, .operation = wgpu::BlendOperation::Add,
.srcFactor = WGPUBlendFactor_Zero, .srcFactor = wgpu::BlendFactor::Zero,
.dstFactor = WGPUBlendFactor_Zero, .dstFactor = wgpu::BlendFactor::Zero,
}; };
break; break;
case GX_LO_COPY: case GX_LO_COPY:
colorBlendComponent = { colorBlendComponent = {
.operation = WGPUBlendOperation_Add, .operation = wgpu::BlendOperation::Add,
.srcFactor = WGPUBlendFactor_One, .srcFactor = wgpu::BlendFactor::One,
.dstFactor = WGPUBlendFactor_Zero, .dstFactor = wgpu::BlendFactor::Zero,
}; };
break; break;
case GX_LO_NOOP: case GX_LO_NOOP:
colorBlendComponent = { colorBlendComponent = {
.operation = WGPUBlendOperation_Add, .operation = wgpu::BlendOperation::Add,
.srcFactor = WGPUBlendFactor_Zero, .srcFactor = wgpu::BlendFactor::Zero,
.dstFactor = WGPUBlendFactor_One, .dstFactor = wgpu::BlendFactor::One,
}; };
break; break;
default: default:
@ -136,16 +136,16 @@ static inline WGPUBlendState to_blend_state(GXBlendMode mode, GXBlendFactor srcF
Log.report(LOG_FATAL, FMT_STRING("unsupported blend mode {}"), mode); Log.report(LOG_FATAL, FMT_STRING("unsupported blend mode {}"), mode);
unreachable(); unreachable();
} }
WGPUBlendComponent alphaBlendComponent{ wgpu::BlendComponent alphaBlendComponent{
.operation = WGPUBlendOperation_Add, .operation = wgpu::BlendOperation::Add,
.srcFactor = WGPUBlendFactor_One, .srcFactor = wgpu::BlendFactor::One,
.dstFactor = WGPUBlendFactor_Zero, .dstFactor = wgpu::BlendFactor::Zero,
}; };
if (dstAlpha != UINT32_MAX) { if (dstAlpha != UINT32_MAX) {
alphaBlendComponent = WGPUBlendComponent{ alphaBlendComponent = wgpu::BlendComponent{
.operation = WGPUBlendOperation_Add, .operation = wgpu::BlendOperation::Add,
.srcFactor = WGPUBlendFactor_Constant, .srcFactor = wgpu::BlendFactor::Constant,
.dstFactor = WGPUBlendFactor_Zero, .dstFactor = wgpu::BlendFactor::Zero,
}; };
} }
return { return {
@ -154,36 +154,36 @@ static inline WGPUBlendState to_blend_state(GXBlendMode mode, GXBlendFactor srcF
}; };
} }
static inline WGPUColorWriteMaskFlags to_write_mask(bool colorUpdate, bool alphaUpdate) { static inline wgpu::ColorWriteMask to_write_mask(bool colorUpdate, bool alphaUpdate) {
WGPUColorWriteMaskFlags writeMask = WGPUColorWriteMask_None; wgpu::ColorWriteMask writeMask = wgpu::ColorWriteMask::None;
if (colorUpdate) { if (colorUpdate) {
writeMask |= WGPUColorWriteMask_Red | WGPUColorWriteMask_Green | WGPUColorWriteMask_Blue; writeMask |= wgpu::ColorWriteMask::Red | wgpu::ColorWriteMask::Green | wgpu::ColorWriteMask::Blue;
} }
if (alphaUpdate) { if (alphaUpdate) {
writeMask |= WGPUColorWriteMask_Alpha; writeMask |= wgpu::ColorWriteMask::Alpha;
} }
return writeMask; return writeMask;
} }
static inline WGPUPrimitiveState to_primitive_state(GXPrimitive gx_prim, GXCullMode gx_cullMode) { static inline wgpu::PrimitiveState to_primitive_state(GXPrimitive gx_prim, GXCullMode gx_cullMode) {
WGPUPrimitiveTopology primitive = WGPUPrimitiveTopology_TriangleList; wgpu::PrimitiveTopology primitive = wgpu::PrimitiveTopology::TriangleList;
switch (gx_prim) { switch (gx_prim) {
case GX_TRIANGLES: case GX_TRIANGLES:
break; break;
case GX_TRIANGLESTRIP: case GX_TRIANGLESTRIP:
primitive = WGPUPrimitiveTopology_TriangleStrip; primitive = wgpu::PrimitiveTopology::TriangleStrip;
break; break;
default: default:
Log.report(LOG_FATAL, FMT_STRING("Unsupported primitive type {}"), gx_prim); Log.report(LOG_FATAL, FMT_STRING("Unsupported primitive type {}"), gx_prim);
unreachable(); unreachable();
} }
WGPUCullMode cullMode = WGPUCullMode_None; wgpu::CullMode cullMode = wgpu::CullMode::None;
switch (gx_cullMode) { switch (gx_cullMode) {
case GX_CULL_FRONT: case GX_CULL_FRONT:
cullMode = WGPUCullMode_Front; cullMode = wgpu::CullMode::Front;
break; break;
case GX_CULL_BACK: case GX_CULL_BACK:
cullMode = WGPUCullMode_Back; cullMode = wgpu::CullMode::Back;
break; break;
case GX_CULL_NONE: case GX_CULL_NONE:
break; break;
@ -193,35 +193,35 @@ static inline WGPUPrimitiveState to_primitive_state(GXPrimitive gx_prim, GXCullM
} }
return { return {
.topology = primitive, .topology = primitive,
.frontFace = WGPUFrontFace_CW, .frontFace = wgpu::FrontFace::CW,
.cullMode = cullMode, .cullMode = cullMode,
}; };
} }
WGPURenderPipeline build_pipeline(const PipelineConfig& config, const ShaderInfo& info, wgpu::RenderPipeline build_pipeline(const PipelineConfig& config, const ShaderInfo& info,
ArrayRef<WGPUVertexBufferLayout> vtxBuffers, WGPUShaderModule shader, ArrayRef<wgpu::VertexBufferLayout> vtxBuffers, wgpu::ShaderModule shader,
const char* label) noexcept { const char* label) noexcept {
const WGPUDepthStencilState depthStencil{ const wgpu::DepthStencilState depthStencil{
.format = g_graphicsConfig.depthFormat, .format = g_graphicsConfig.depthFormat,
.depthWriteEnabled = config.depthUpdate, .depthWriteEnabled = config.depthUpdate,
.depthCompare = to_compare_function(config.depthFunc), .depthCompare = to_compare_function(config.depthFunc),
.stencilFront = .stencilFront =
WGPUStencilFaceState{ wgpu::StencilFaceState{
.compare = WGPUCompareFunction_Always, .compare = wgpu::CompareFunction::Always,
}, },
.stencilBack = .stencilBack =
WGPUStencilFaceState{ wgpu::StencilFaceState{
.compare = WGPUCompareFunction_Always, .compare = wgpu::CompareFunction::Always,
}, },
}; };
const auto blendState = const auto blendState =
to_blend_state(config.blendMode, config.blendFacSrc, config.blendFacDst, config.blendOp, config.dstAlpha); to_blend_state(config.blendMode, config.blendFacSrc, config.blendFacDst, config.blendOp, config.dstAlpha);
const std::array colorTargets{WGPUColorTargetState{ const std::array colorTargets{wgpu::ColorTargetState{
.format = g_graphicsConfig.colorFormat, .format = g_graphicsConfig.swapChainDescriptor.format,
.blend = &blendState, .blend = &blendState,
.writeMask = to_write_mask(config.colorUpdate, config.alphaUpdate), .writeMask = to_write_mask(config.colorUpdate, config.alphaUpdate),
}}; }};
const WGPUFragmentState fragmentState{ const wgpu::FragmentState fragmentState{
.module = shader, .module = shader,
.entryPoint = "fs_main", .entryPoint = "fs_main",
.targetCount = colorTargets.size(), .targetCount = colorTargets.size(),
@ -233,13 +233,13 @@ WGPURenderPipeline build_pipeline(const PipelineConfig& config, const ShaderInfo
layouts.samplerLayout, layouts.samplerLayout,
layouts.textureLayout, layouts.textureLayout,
}; };
const WGPUPipelineLayoutDescriptor pipelineLayoutDescriptor{ const wgpu::PipelineLayoutDescriptor pipelineLayoutDescriptor{
.label = "GX Pipeline Layout", .label = "GX Pipeline Layout",
.bindGroupLayoutCount = static_cast<uint32_t>(info.sampledTextures.any() ? bindGroupLayouts.size() : 1), .bindGroupLayoutCount = static_cast<uint32_t>(info.sampledTextures.any() ? bindGroupLayouts.size() : 1),
.bindGroupLayouts = bindGroupLayouts.data(), .bindGroupLayouts = bindGroupLayouts.data(),
}; };
auto pipelineLayout = wgpuDeviceCreatePipelineLayout(g_device, &pipelineLayoutDescriptor); auto pipelineLayout = g_device.CreatePipelineLayout(&pipelineLayoutDescriptor);
const WGPURenderPipelineDescriptor descriptor{ const wgpu::RenderPipelineDescriptor descriptor{
.label = label, .label = label,
.layout = pipelineLayout, .layout = pipelineLayout,
.vertex = .vertex =
@ -252,15 +252,13 @@ WGPURenderPipeline build_pipeline(const PipelineConfig& config, const ShaderInfo
.primitive = to_primitive_state(config.primitive, config.cullMode), .primitive = to_primitive_state(config.primitive, config.cullMode),
.depthStencil = &depthStencil, .depthStencil = &depthStencil,
.multisample = .multisample =
WGPUMultisampleState{ wgpu::MultisampleState{
.count = g_graphicsConfig.msaaSamples, .count = g_graphicsConfig.msaaSamples,
.mask = UINT32_MAX, .mask = UINT32_MAX,
}, },
.fragment = &fragmentState, .fragment = &fragmentState,
}; };
auto pipeline = wgpuDeviceCreateRenderPipeline(g_device, &descriptor); return g_device.CreateRenderPipeline(&descriptor);
wgpuPipelineLayoutRelease(pipelineLayout);
return pipeline;
} }
void populate_pipeline_config(PipelineConfig& config, GXPrimitive primitive) noexcept { void populate_pipeline_config(PipelineConfig& config, GXPrimitive primitive) noexcept {
@ -477,15 +475,15 @@ Range build_uniform(const ShaderInfo& info) noexcept {
return range; return range;
} }
static absl::flat_hash_map<u32, WGPUBindGroupLayout> sUniformBindGroupLayouts; static absl::flat_hash_map<u32, wgpu::BindGroupLayout> sUniformBindGroupLayouts;
static absl::flat_hash_map<u32, std::pair<WGPUBindGroupLayout, WGPUBindGroupLayout>> sTextureBindGroupLayouts; static absl::flat_hash_map<u32, std::pair<wgpu::BindGroupLayout, wgpu::BindGroupLayout>> sTextureBindGroupLayouts;
GXBindGroups build_bind_groups(const ShaderInfo& info, const ShaderConfig& config, GXBindGroups build_bind_groups(const ShaderInfo& info, const ShaderConfig& config,
const BindGroupRanges& ranges) noexcept { const BindGroupRanges& ranges) noexcept {
const auto layouts = build_bind_group_layouts(info, config); const auto layouts = build_bind_group_layouts(info, config);
std::array<WGPUBindGroupEntry, GX_VA_MAX_ATTR + 1> uniformEntries{ std::array<wgpu::BindGroupEntry, GX_VA_MAX_ATTR + 1> uniformEntries{
WGPUBindGroupEntry{ wgpu::BindGroupEntry{
.binding = 0, .binding = 0,
.buffer = g_uniformBuffer, .buffer = g_uniformBuffer,
.size = info.uniformSize, .size = info.uniformSize,
@ -497,7 +495,7 @@ GXBindGroups build_bind_groups(const ShaderInfo& info, const ShaderConfig& confi
if (range.size <= 0) { if (range.size <= 0) {
continue; continue;
} }
uniformEntries[uniformBindIdx] = WGPUBindGroupEntry{ uniformEntries[uniformBindIdx] = wgpu::BindGroupEntry{
.binding = uniformBindIdx, .binding = uniformBindIdx,
.buffer = g_storageBuffer, .buffer = g_storageBuffer,
.size = range.size, .size = range.size,
@ -505,8 +503,8 @@ GXBindGroups build_bind_groups(const ShaderInfo& info, const ShaderConfig& confi
++uniformBindIdx; ++uniformBindIdx;
} }
std::array<WGPUBindGroupEntry, MaxTextures> samplerEntries; std::array<wgpu::BindGroupEntry, MaxTextures> samplerEntries;
std::array<WGPUBindGroupEntry, MaxTextures * 2> textureEntries; std::array<wgpu::BindGroupEntry, MaxTextures * 2> textureEntries;
u32 samplerCount = 0; u32 samplerCount = 0;
u32 textureCount = 0; u32 textureCount = 0;
for (u32 i = 0; i < info.sampledTextures.size(); ++i) { for (u32 i = 0; i < info.sampledTextures.size(); ++i) {
@ -547,19 +545,19 @@ GXBindGroups build_bind_groups(const ShaderInfo& info, const ShaderConfig& confi
} }
} }
return { return {
.uniformBindGroup = bind_group_ref(WGPUBindGroupDescriptor{ .uniformBindGroup = bind_group_ref(wgpu::BindGroupDescriptor{
.label = "GX Uniform Bind Group", .label = "GX Uniform Bind Group",
.layout = layouts.uniformLayout, .layout = layouts.uniformLayout,
.entryCount = uniformBindIdx, .entryCount = uniformBindIdx,
.entries = uniformEntries.data(), .entries = uniformEntries.data(),
}), }),
.samplerBindGroup = bind_group_ref(WGPUBindGroupDescriptor{ .samplerBindGroup = bind_group_ref(wgpu::BindGroupDescriptor{
.label = "GX Sampler Bind Group", .label = "GX Sampler Bind Group",
.layout = layouts.samplerLayout, .layout = layouts.samplerLayout,
.entryCount = samplerCount, .entryCount = samplerCount,
.entries = samplerEntries.data(), .entries = samplerEntries.data(),
}), }),
.textureBindGroup = bind_group_ref(WGPUBindGroupDescriptor{ .textureBindGroup = bind_group_ref(wgpu::BindGroupDescriptor{
.label = "GX Texture Bind Group", .label = "GX Texture Bind Group",
.layout = layouts.textureLayout, .layout = layouts.textureLayout,
.entryCount = textureCount, .entryCount = textureCount,
@ -575,13 +573,13 @@ GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const Shader
if (uniformIt != sUniformBindGroupLayouts.end()) { if (uniformIt != sUniformBindGroupLayouts.end()) {
out.uniformLayout = uniformIt->second; out.uniformLayout = uniformIt->second;
} else { } else {
std::array<WGPUBindGroupLayoutEntry, GX_VA_MAX_ATTR + 1> uniformLayoutEntries{ std::array<wgpu::BindGroupLayoutEntry, GX_VA_MAX_ATTR + 1> uniformLayoutEntries{
WGPUBindGroupLayoutEntry{ wgpu::BindGroupLayoutEntry{
.binding = 0, .binding = 0,
.visibility = WGPUShaderStage_Vertex | WGPUShaderStage_Fragment, .visibility = wgpu::ShaderStage::Vertex | wgpu::ShaderStage::Fragment,
.buffer = .buffer =
WGPUBufferBindingLayout{ wgpu::BufferBindingLayout{
.type = WGPUBufferBindingType_Uniform, .type = wgpu::BufferBindingType::Uniform,
.hasDynamicOffset = true, .hasDynamicOffset = true,
.minBindingSize = info.uniformSize, .minBindingSize = info.uniformSize,
}, },
@ -590,24 +588,24 @@ GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const Shader
u32 bindIdx = 1; u32 bindIdx = 1;
for (int i = 0; i < GX_VA_MAX_ATTR; ++i) { for (int i = 0; i < GX_VA_MAX_ATTR; ++i) {
if (config.attrMapping[i] == static_cast<GXAttr>(i)) { if (config.attrMapping[i] == static_cast<GXAttr>(i)) {
uniformLayoutEntries[bindIdx] = WGPUBindGroupLayoutEntry{ uniformLayoutEntries[bindIdx] = wgpu::BindGroupLayoutEntry{
.binding = bindIdx, .binding = bindIdx,
.visibility = WGPUShaderStage_Vertex, .visibility = wgpu::ShaderStage::Vertex,
.buffer = .buffer =
WGPUBufferBindingLayout{ wgpu::BufferBindingLayout{
.type = WGPUBufferBindingType_ReadOnlyStorage, .type = wgpu::BufferBindingType::ReadOnlyStorage,
.hasDynamicOffset = true, .hasDynamicOffset = true,
}, },
}; };
++bindIdx; ++bindIdx;
} }
} }
const auto uniformLayoutDescriptor = WGPUBindGroupLayoutDescriptor{ const auto uniformLayoutDescriptor = wgpu::BindGroupLayoutDescriptor{
.label = "GX Uniform Bind Group Layout", .label = "GX Uniform Bind Group Layout",
.entryCount = bindIdx, .entryCount = bindIdx,
.entries = uniformLayoutEntries.data(), .entries = uniformLayoutEntries.data(),
}; };
out.uniformLayout = wgpuDeviceCreateBindGroupLayout(g_device, &uniformLayoutDescriptor); out.uniformLayout = g_device.CreateBindGroupLayout(&uniformLayoutDescriptor);
// sUniformBindGroupLayouts.try_emplace(uniformSizeKey, out.uniformLayout); // sUniformBindGroupLayouts.try_emplace(uniformSizeKey, out.uniformLayout);
} }
@ -620,8 +618,8 @@ GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const Shader
// } else { // } else {
u32 numSamplers = 0; u32 numSamplers = 0;
u32 numTextures = 0; u32 numTextures = 0;
std::array<WGPUBindGroupLayoutEntry, MaxTextures> samplerEntries; std::array<wgpu::BindGroupLayoutEntry, MaxTextures> samplerEntries;
std::array<WGPUBindGroupLayoutEntry, MaxTextures * 2> textureEntries; std::array<wgpu::BindGroupLayoutEntry, MaxTextures * 2> textureEntries;
for (u32 i = 0; i < info.sampledTextures.size(); ++i) { for (u32 i = 0; i < info.sampledTextures.size(); ++i) {
if (!info.sampledTextures.test(i)) { if (!info.sampledTextures.test(i)) {
continue; continue;
@ -631,60 +629,60 @@ GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const Shader
bool loadAsPalette = is_palette_format(texConfig.loadFmt); bool loadAsPalette = is_palette_format(texConfig.loadFmt);
samplerEntries[numSamplers] = { samplerEntries[numSamplers] = {
.binding = numSamplers, .binding = numSamplers,
.visibility = WGPUShaderStage_Fragment, .visibility = wgpu::ShaderStage::Fragment,
.sampler = {.type = copyAsPalette && loadAsPalette ? WGPUSamplerBindingType_NonFiltering .sampler = {.type = copyAsPalette && loadAsPalette ? wgpu::SamplerBindingType::NonFiltering
: WGPUSamplerBindingType_Filtering}, : wgpu::SamplerBindingType::Filtering},
}; };
++numSamplers; ++numSamplers;
if (loadAsPalette) { if (loadAsPalette) {
textureEntries[numTextures] = { textureEntries[numTextures] = {
.binding = numTextures, .binding = numTextures,
.visibility = WGPUShaderStage_Fragment, .visibility = wgpu::ShaderStage::Fragment,
.texture = .texture =
{ {
.sampleType = copyAsPalette ? WGPUTextureSampleType_Sint : WGPUTextureSampleType_Float, .sampleType = copyAsPalette ? wgpu::TextureSampleType::Sint : wgpu::TextureSampleType::Float,
.viewDimension = WGPUTextureViewDimension_2D, .viewDimension = wgpu::TextureViewDimension::e2D,
}, },
}; };
++numTextures; ++numTextures;
textureEntries[numTextures] = { textureEntries[numTextures] = {
.binding = numTextures, .binding = numTextures,
.visibility = WGPUShaderStage_Fragment, .visibility = wgpu::ShaderStage::Fragment,
.texture = .texture =
{ {
.sampleType = WGPUTextureSampleType_Float, .sampleType = wgpu::TextureSampleType::Float,
.viewDimension = WGPUTextureViewDimension_2D, .viewDimension = wgpu::TextureViewDimension::e2D,
}, },
}; };
++numTextures; ++numTextures;
} else { } else {
textureEntries[numTextures] = { textureEntries[numTextures] = {
.binding = numTextures, .binding = numTextures,
.visibility = WGPUShaderStage_Fragment, .visibility = wgpu::ShaderStage::Fragment,
.texture = .texture =
{ {
.sampleType = WGPUTextureSampleType_Float, .sampleType = wgpu::TextureSampleType::Float,
.viewDimension = WGPUTextureViewDimension_2D, .viewDimension = wgpu::TextureViewDimension::e2D,
}, },
}; };
++numTextures; ++numTextures;
} }
} }
{ {
const WGPUBindGroupLayoutDescriptor descriptor{ const wgpu::BindGroupLayoutDescriptor descriptor{
.label = "GX Sampler Bind Group Layout", .label = "GX Sampler Bind Group Layout",
.entryCount = numSamplers, .entryCount = numSamplers,
.entries = samplerEntries.data(), .entries = samplerEntries.data(),
}; };
out.samplerLayout = wgpuDeviceCreateBindGroupLayout(g_device, &descriptor); out.samplerLayout = g_device.CreateBindGroupLayout(&descriptor);
} }
{ {
const WGPUBindGroupLayoutDescriptor descriptor{ const wgpu::BindGroupLayoutDescriptor descriptor{
.label = "GX Texture Bind Group Layout", .label = "GX Texture Bind Group Layout",
.entryCount = numTextures, .entryCount = numTextures,
.entries = textureEntries.data(), .entries = textureEntries.data(),
}; };
out.textureLayout = wgpuDeviceCreateBindGroupLayout(g_device, &descriptor); out.textureLayout = g_device.CreateBindGroupLayout(&descriptor);
} }
// sTextureBindGroupLayouts.try_emplace(textureCount, out.samplerLayout, out.textureLayout); // sTextureBindGroupLayouts.try_emplace(textureCount, out.samplerLayout, out.textureLayout);
// } // }
@ -692,17 +690,10 @@ GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const Shader
} }
// TODO this is awkward // TODO this is awkward
extern absl::flat_hash_map<ShaderRef, std::pair<WGPUShaderModule, gx::ShaderInfo>> g_gxCachedShaders; extern absl::flat_hash_map<ShaderRef, std::pair<wgpu::ShaderModule, gx::ShaderInfo>> g_gxCachedShaders;
void shutdown() noexcept { void shutdown() noexcept {
// TODO we should probably store this all in g_state.gx instead // TODO we should probably store this all in g_state.gx instead
for (const auto& item : sUniformBindGroupLayouts) {
wgpuBindGroupLayoutRelease(item.second);
}
sUniformBindGroupLayouts.clear(); sUniformBindGroupLayouts.clear();
for (const auto& item : sTextureBindGroupLayouts) {
wgpuBindGroupLayoutRelease(item.second.first);
wgpuBindGroupLayoutRelease(item.second.second);
}
sTextureBindGroupLayouts.clear(); sTextureBindGroupLayouts.clear();
for (auto& item : g_gxState.textures) { for (auto& item : g_gxState.textures) {
item.texObj.ref.reset(); item.texObj.ref.reset();
@ -710,40 +701,37 @@ void shutdown() noexcept {
for (auto& item : g_gxState.tluts) { for (auto& item : g_gxState.tluts) {
item.ref.reset(); item.ref.reset();
} }
for (const auto& item : g_gxCachedShaders) {
wgpuShaderModuleRelease(item.second.first);
}
g_gxCachedShaders.clear(); g_gxCachedShaders.clear();
} }
} // namespace gx } // namespace gx
static WGPUAddressMode wgpu_address_mode(GXTexWrapMode mode) { static wgpu::AddressMode wgpu_address_mode(GXTexWrapMode mode) {
switch (mode) { switch (mode) {
case GX_CLAMP: case GX_CLAMP:
return WGPUAddressMode_ClampToEdge; return wgpu::AddressMode::ClampToEdge;
case GX_REPEAT: case GX_REPEAT:
return WGPUAddressMode_Repeat; return wgpu::AddressMode::Repeat;
case GX_MIRROR: case GX_MIRROR:
return WGPUAddressMode_MirrorRepeat; return wgpu::AddressMode::MirrorRepeat;
default: default:
Log.report(LOG_FATAL, FMT_STRING("invalid wrap mode {}"), mode); Log.report(LOG_FATAL, FMT_STRING("invalid wrap mode {}"), mode);
unreachable(); unreachable();
} }
} }
static std::pair<WGPUFilterMode, WGPUFilterMode> wgpu_filter_mode(GXTexFilter filter) { static std::pair<wgpu::FilterMode, wgpu::FilterMode> wgpu_filter_mode(GXTexFilter filter) {
switch (filter) { switch (filter) {
case GX_NEAR: case GX_NEAR:
return {WGPUFilterMode_Nearest, WGPUFilterMode_Linear}; return {wgpu::FilterMode::Nearest, wgpu::FilterMode::Linear};
case GX_LINEAR: case GX_LINEAR:
return {WGPUFilterMode_Linear, WGPUFilterMode_Linear}; return {wgpu::FilterMode::Linear, wgpu::FilterMode::Linear};
case GX_NEAR_MIP_NEAR: case GX_NEAR_MIP_NEAR:
return {WGPUFilterMode_Nearest, WGPUFilterMode_Nearest}; return {wgpu::FilterMode::Nearest, wgpu::FilterMode::Nearest};
case GX_LIN_MIP_NEAR: case GX_LIN_MIP_NEAR:
return {WGPUFilterMode_Linear, WGPUFilterMode_Nearest}; return {wgpu::FilterMode::Linear, wgpu::FilterMode::Nearest};
case GX_NEAR_MIP_LIN: case GX_NEAR_MIP_LIN:
return {WGPUFilterMode_Nearest, WGPUFilterMode_Linear}; return {wgpu::FilterMode::Nearest, wgpu::FilterMode::Linear};
case GX_LIN_MIP_LIN: case GX_LIN_MIP_LIN:
return {WGPUFilterMode_Linear, WGPUFilterMode_Linear}; return {wgpu::FilterMode::Linear, wgpu::FilterMode::Linear};
default: default:
Log.report(LOG_FATAL, FMT_STRING("invalid filter mode {}"), filter); Log.report(LOG_FATAL, FMT_STRING("invalid filter mode {}"), filter);
unreachable(); unreachable();
@ -762,16 +750,16 @@ static u16 wgpu_aniso(GXAnisotropy aniso) {
unreachable(); unreachable();
} }
} }
WGPUSamplerDescriptor TextureBind::get_descriptor() const noexcept { wgpu::SamplerDescriptor TextureBind::get_descriptor() const noexcept {
if (gx::requires_copy_conversion(texObj) && gx::is_palette_format(texObj.ref->gxFormat)) { if (gx::requires_copy_conversion(texObj) && gx::is_palette_format(texObj.ref->gxFormat)) {
return { return {
.label = "Generated Non-Filtering Sampler", .label = "Generated Non-Filtering Sampler",
.addressModeU = wgpu_address_mode(texObj.wrapS), .addressModeU = wgpu_address_mode(texObj.wrapS),
.addressModeV = wgpu_address_mode(texObj.wrapT), .addressModeV = wgpu_address_mode(texObj.wrapT),
.addressModeW = WGPUAddressMode_Repeat, .addressModeW = wgpu::AddressMode::Repeat,
.magFilter = WGPUFilterMode_Nearest, .magFilter = wgpu::FilterMode::Nearest,
.minFilter = WGPUFilterMode_Nearest, .minFilter = wgpu::FilterMode::Nearest,
.mipmapFilter = WGPUFilterMode_Nearest, .mipmapFilter = wgpu::FilterMode::Nearest,
.lodMinClamp = 0.f, .lodMinClamp = 0.f,
.lodMaxClamp = 1000.f, .lodMaxClamp = 1000.f,
.maxAnisotropy = 1, .maxAnisotropy = 1,
@ -783,7 +771,7 @@ WGPUSamplerDescriptor TextureBind::get_descriptor() const noexcept {
.label = "Generated Filtering Sampler", .label = "Generated Filtering Sampler",
.addressModeU = wgpu_address_mode(texObj.wrapS), .addressModeU = wgpu_address_mode(texObj.wrapS),
.addressModeV = wgpu_address_mode(texObj.wrapT), .addressModeV = wgpu_address_mode(texObj.wrapT),
.addressModeW = WGPUAddressMode_Repeat, .addressModeW = wgpu::AddressMode::Repeat,
.magFilter = magFilter, .magFilter = magFilter,
.minFilter = minFilter, .minFilter = minFilter,
.mipmapFilter = mipFilter, .mipmapFilter = mipFilter,

View File

@ -363,9 +363,9 @@ struct PipelineConfig {
static_assert(std::has_unique_object_representations_v<PipelineConfig>); static_assert(std::has_unique_object_representations_v<PipelineConfig>);
struct GXBindGroupLayouts { struct GXBindGroupLayouts {
WGPUBindGroupLayout uniformLayout; wgpu::BindGroupLayout uniformLayout;
WGPUBindGroupLayout samplerLayout; wgpu::BindGroupLayout samplerLayout;
WGPUBindGroupLayout textureLayout; wgpu::BindGroupLayout textureLayout;
}; };
struct GXBindGroups { struct GXBindGroups {
BindGroupRef uniformBindGroup; BindGroupRef uniformBindGroup;
@ -390,11 +390,11 @@ struct BindGroupRanges {
std::array<Range, GX_VA_MAX_ATTR> vaRanges{}; std::array<Range, GX_VA_MAX_ATTR> vaRanges{};
}; };
void populate_pipeline_config(PipelineConfig& config, GXPrimitive primitive) noexcept; void populate_pipeline_config(PipelineConfig& config, GXPrimitive primitive) noexcept;
WGPURenderPipeline build_pipeline(const PipelineConfig& config, const ShaderInfo& info, wgpu::RenderPipeline build_pipeline(const PipelineConfig& config, const ShaderInfo& info,
ArrayRef<WGPUVertexBufferLayout> vtxBuffers, WGPUShaderModule shader, ArrayRef<wgpu::VertexBufferLayout> vtxBuffers, wgpu::ShaderModule shader,
const char* label) noexcept; const char* label) noexcept;
ShaderInfo build_shader_info(const ShaderConfig& config) noexcept; ShaderInfo build_shader_info(const ShaderConfig& config) noexcept;
WGPUShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& info) noexcept; wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& info) noexcept;
// Range build_vertex_buffer(const GXShaderInfo& info) noexcept; // Range build_vertex_buffer(const GXShaderInfo& info) noexcept;
Range build_uniform(const ShaderInfo& info) noexcept; Range build_uniform(const ShaderInfo& info) noexcept;
GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const ShaderConfig& config) noexcept; GXBindGroupLayouts build_bind_group_layouts(const ShaderInfo& info, const ShaderConfig& config) noexcept;

View File

@ -6,7 +6,7 @@
#include <absl/container/flat_hash_map.h> #include <absl/container/flat_hash_map.h>
constexpr bool EnableNormalVisualization = false; constexpr bool EnableNormalVisualization = false;
constexpr bool EnableDebugPrints = true; constexpr bool EnableDebugPrints = false;
constexpr bool UsePerPixelLighting = true; constexpr bool UsePerPixelLighting = true;
namespace aurora::gfx::gx { namespace aurora::gfx::gx {
@ -16,7 +16,7 @@ using namespace std::string_view_literals;
static Module Log("aurora::gfx::gx"); static Module Log("aurora::gfx::gx");
absl::flat_hash_map<ShaderRef, std::pair<WGPUShaderModule, gx::ShaderInfo>> g_gxCachedShaders; absl::flat_hash_map<ShaderRef, std::pair<wgpu::ShaderModule, gx::ShaderInfo>> g_gxCachedShaders;
#ifndef NDEBUG #ifndef NDEBUG
static absl::flat_hash_map<ShaderRef, gx::ShaderConfig> g_gxCachedShaderConfigs; static absl::flat_hash_map<ShaderRef, gx::ShaderConfig> g_gxCachedShaderConfigs;
#endif #endif
@ -701,7 +701,7 @@ ShaderInfo build_shader_info(const ShaderConfig& config) noexcept {
return info; return info;
} }
WGPUShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& info) noexcept { wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& info) noexcept {
const auto hash = xxh3_hash(config); const auto hash = xxh3_hash(config);
const auto it = g_gxCachedShaders.find(hash); const auto it = g_gxCachedShaders.find(hash);
if (it != g_gxCachedShaders.end()) { if (it != g_gxCachedShaders.end()) {
@ -1371,16 +1371,14 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {{{8}{7}
Log.report(LOG_INFO, FMT_STRING("Generated shader: {}"), shaderSource); Log.report(LOG_INFO, FMT_STRING("Generated shader: {}"), shaderSource);
} }
const WGPUShaderModuleWGSLDescriptor wgslDescriptor{ wgpu::ShaderModuleWGSLDescriptor wgslDescriptor{};
.chain = {.sType = WGPUSType_ShaderModuleWGSLDescriptor}, wgslDescriptor.source = shaderSource.c_str();
.source = shaderSource.c_str(),
};
const auto label = fmt::format(FMT_STRING("GX Shader {:x}"), hash); const auto label = fmt::format(FMT_STRING("GX Shader {:x}"), hash);
const auto shaderDescriptor = WGPUShaderModuleDescriptor{ const auto shaderDescriptor = wgpu::ShaderModuleDescriptor{
.nextInChain = &wgslDescriptor.chain, .nextInChain = &wgslDescriptor,
.label = label.c_str(), .label = label.c_str(),
}; };
auto shader = wgpuDeviceCreateShaderModule(webgpu::g_device, &shaderDescriptor); auto shader = webgpu::g_device.CreateShaderModule(&shaderDescriptor);
auto pair = std::make_pair(shader, info); auto pair = std::make_pair(shader, info);
g_gxCachedShaders.emplace(hash, pair); g_gxCachedShaders.emplace(hash, pair);

View File

@ -371,11 +371,11 @@ void queue_surface(const u8* dlStart, u32 dlSize) noexcept {
State construct_state() { return {}; } State construct_state() { return {}; }
WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config) { wgpu::RenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config) {
const auto info = build_shader_info(config.shaderConfig); // TODO remove const auto info = build_shader_info(config.shaderConfig); // TODO remove
const auto shader = build_shader(config.shaderConfig, info); const auto shader = build_shader(config.shaderConfig, info);
std::array<WGPUVertexAttribute, gx::MaxVtxAttr> vtxAttrs{}; std::array<wgpu::VertexAttribute, gx::MaxVtxAttr> vtxAttrs{};
auto [num4xAttr, rem] = std::div(config.shaderConfig.indexedAttributeCount, 4); auto [num4xAttr, rem] = std::div(config.shaderConfig.indexedAttributeCount, 4);
u32 num2xAttr = 0; u32 num2xAttr = 0;
if (rem > 2) { if (rem > 2) {
@ -390,7 +390,7 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
// Indexed attributes // Indexed attributes
for (u32 i = 0; i < num4xAttr; ++i) { for (u32 i = 0; i < num4xAttr; ++i) {
vtxAttrs[shaderLocation] = { vtxAttrs[shaderLocation] = {
.format = WGPUVertexFormat_Sint16x4, .format = wgpu::VertexFormat::Sint16x4,
.offset = offset, .offset = offset,
.shaderLocation = shaderLocation, .shaderLocation = shaderLocation,
}; };
@ -399,7 +399,7 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
} }
for (u32 i = 0; i < num2xAttr; ++i) { for (u32 i = 0; i < num2xAttr; ++i) {
vtxAttrs[shaderLocation] = { vtxAttrs[shaderLocation] = {
.format = WGPUVertexFormat_Sint16x2, .format = wgpu::VertexFormat::Sint16x2,
.offset = offset, .offset = offset,
.shaderLocation = shaderLocation, .shaderLocation = shaderLocation,
}; };
@ -417,8 +417,8 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
switch (attr) { switch (attr) {
case GX_VA_POS: case GX_VA_POS:
case GX_VA_NRM: case GX_VA_NRM:
vtxAttrs[shaderLocation] = WGPUVertexAttribute{ vtxAttrs[shaderLocation] = wgpu::VertexAttribute{
.format = WGPUVertexFormat_Float32x3, .format = wgpu::VertexFormat::Float32x3,
.offset = offset, .offset = offset,
.shaderLocation = shaderLocation, .shaderLocation = shaderLocation,
}; };
@ -426,8 +426,8 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
break; break;
case GX_VA_CLR0: case GX_VA_CLR0:
case GX_VA_CLR1: case GX_VA_CLR1:
vtxAttrs[shaderLocation] = WGPUVertexAttribute{ vtxAttrs[shaderLocation] = wgpu::VertexAttribute{
.format = WGPUVertexFormat_Float32x4, .format = wgpu::VertexFormat::Float32x4,
.offset = offset, .offset = offset,
.shaderLocation = shaderLocation, .shaderLocation = shaderLocation,
}; };
@ -441,8 +441,8 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
case GX_VA_TEX5: case GX_VA_TEX5:
case GX_VA_TEX6: case GX_VA_TEX6:
case GX_VA_TEX7: case GX_VA_TEX7:
vtxAttrs[shaderLocation] = WGPUVertexAttribute{ vtxAttrs[shaderLocation] = wgpu::VertexAttribute{
.format = WGPUVertexFormat_Float32x2, .format = wgpu::VertexFormat::Float32x2,
.offset = offset, .offset = offset,
.shaderLocation = shaderLocation, .shaderLocation = shaderLocation,
}; };
@ -454,9 +454,9 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
++shaderLocation; ++shaderLocation;
} }
const std::array vtxBuffers{WGPUVertexBufferLayout{ const std::array vtxBuffers{wgpu::VertexBufferLayout{
.arrayStride = offset, .arrayStride = offset,
.stepMode = WGPUVertexStepMode_Vertex, .stepMode = wgpu::VertexStepMode::Vertex,
.attributeCount = shaderLocation, .attributeCount = shaderLocation,
.attributes = vtxAttrs.data(), .attributes = vtxAttrs.data(),
}}; }};
@ -464,7 +464,7 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
return build_pipeline(config, info, vtxBuffers, shader, "GX Pipeline"); return build_pipeline(config, info, vtxBuffers, shader, "GX Pipeline");
} }
void render(const State& state, const DrawData& data, const WGPURenderPassEncoder& pass) { void render(const State& state, const DrawData& data, const wgpu::RenderPassEncoder& pass) {
if (!bind_pipeline(data.pipeline, pass)) { if (!bind_pipeline(data.pipeline, pass)) {
return; return;
} }
@ -479,20 +479,18 @@ void render(const State& state, const DrawData& data, const WGPURenderPassEncode
offsets[bindIdx] = range.offset; offsets[bindIdx] = range.offset;
++bindIdx; ++bindIdx;
} }
wgpuRenderPassEncoderSetBindGroup(pass, 0, find_bind_group(data.bindGroups.uniformBindGroup), bindIdx, pass.SetBindGroup(0, find_bind_group(data.bindGroups.uniformBindGroup), bindIdx, offsets.data());
offsets.data());
if (data.bindGroups.samplerBindGroup && data.bindGroups.textureBindGroup) { if (data.bindGroups.samplerBindGroup && data.bindGroups.textureBindGroup) {
wgpuRenderPassEncoderSetBindGroup(pass, 1, find_bind_group(data.bindGroups.samplerBindGroup), 0, nullptr); pass.SetBindGroup(1, find_bind_group(data.bindGroups.samplerBindGroup));
wgpuRenderPassEncoderSetBindGroup(pass, 2, find_bind_group(data.bindGroups.textureBindGroup), 0, nullptr); pass.SetBindGroup(2, find_bind_group(data.bindGroups.textureBindGroup));
} }
wgpuRenderPassEncoderSetVertexBuffer(pass, 0, g_vertexBuffer, data.vertRange.offset, data.vertRange.size); pass.SetVertexBuffer(0, g_vertexBuffer, data.vertRange.offset, data.vertRange.size);
wgpuRenderPassEncoderSetIndexBuffer(pass, g_indexBuffer, WGPUIndexFormat_Uint16, data.idxRange.offset, pass.SetIndexBuffer(g_indexBuffer, wgpu::IndexFormat::Uint16, data.idxRange.offset, data.idxRange.size);
data.idxRange.size);
if (data.dstAlpha != UINT32_MAX) { if (data.dstAlpha != UINT32_MAX) {
const WGPUColor color{0.f, 0.f, 0.f, data.dstAlpha / 255.f}; const wgpu::Color color{0.f, 0.f, 0.f, data.dstAlpha / 255.f};
wgpuRenderPassEncoderSetBlendConstant(pass, &color); pass.SetBlendConstant(&color);
} }
wgpuRenderPassEncoderDrawIndexed(pass, data.indexCount, 1, 0, 0, 0); pass.DrawIndexed(data.indexCount);
} }
} // namespace aurora::gfx::model } // namespace aurora::gfx::model

View File

@ -20,8 +20,8 @@ struct PipelineConfig : gx::PipelineConfig {};
struct State {}; struct State {};
State construct_state(); State construct_state();
WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config); wgpu::RenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config);
void render(const State& state, const DrawData& data, const WGPURenderPassEncoder& pass); void render(const State& state, const DrawData& data, const wgpu::RenderPassEncoder& pass);
void queue_surface(const u8* dlStart, u32 dlSize) noexcept; void queue_surface(const u8* dlStart, u32 dlSize) noexcept;
} // namespace aurora::gfx::model } // namespace aurora::gfx::model

View File

@ -7,21 +7,21 @@ static Module Log("aurora::gfx::stream");
using webgpu::g_device; using webgpu::g_device;
WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config) { wgpu::RenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config) {
const auto info = build_shader_info(config.shaderConfig); // TODO remove const auto info = build_shader_info(config.shaderConfig); // TODO remove
const auto shader = build_shader(config.shaderConfig, info); const auto shader = build_shader(config.shaderConfig, info);
std::array<WGPUVertexAttribute, 4> attributes{}; std::array<wgpu::VertexAttribute, 4> attributes{};
attributes[0] = WGPUVertexAttribute{ attributes[0] = wgpu::VertexAttribute{
.format = WGPUVertexFormat_Float32x3, .format = wgpu::VertexFormat::Float32x3,
.offset = 0, .offset = 0,
.shaderLocation = 0, .shaderLocation = 0,
}; };
uint64_t offset = 12; uint64_t offset = 12;
uint32_t shaderLocation = 1; uint32_t shaderLocation = 1;
if (config.shaderConfig.vtxAttrs[GX_VA_NRM] == GX_DIRECT) { if (config.shaderConfig.vtxAttrs[GX_VA_NRM] == GX_DIRECT) {
attributes[shaderLocation] = WGPUVertexAttribute{ attributes[shaderLocation] = wgpu::VertexAttribute{
.format = WGPUVertexFormat_Float32x3, .format = wgpu::VertexFormat::Float32x3,
.offset = offset, .offset = offset,
.shaderLocation = shaderLocation, .shaderLocation = shaderLocation,
}; };
@ -29,8 +29,8 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
shaderLocation++; shaderLocation++;
} }
if (config.shaderConfig.vtxAttrs[GX_VA_CLR0] == GX_DIRECT) { if (config.shaderConfig.vtxAttrs[GX_VA_CLR0] == GX_DIRECT) {
attributes[shaderLocation] = WGPUVertexAttribute{ attributes[shaderLocation] = wgpu::VertexAttribute{
.format = WGPUVertexFormat_Float32x4, .format = wgpu::VertexFormat::Float32x4,
.offset = offset, .offset = offset,
.shaderLocation = shaderLocation, .shaderLocation = shaderLocation,
}; };
@ -41,15 +41,15 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
if (config.shaderConfig.vtxAttrs[i] != GX_DIRECT) { if (config.shaderConfig.vtxAttrs[i] != GX_DIRECT) {
continue; continue;
} }
attributes[shaderLocation] = WGPUVertexAttribute{ attributes[shaderLocation] = wgpu::VertexAttribute{
.format = WGPUVertexFormat_Float32x2, .format = wgpu::VertexFormat::Float32x2,
.offset = offset, .offset = offset,
.shaderLocation = shaderLocation, .shaderLocation = shaderLocation,
}; };
offset += 8; offset += 8;
shaderLocation++; shaderLocation++;
} }
const std::array vertexBuffers{WGPUVertexBufferLayout{ const std::array vertexBuffers{wgpu::VertexBufferLayout{
.arrayStride = offset, .arrayStride = offset,
.attributeCount = shaderLocation, .attributeCount = shaderLocation,
.attributes = attributes.data(), .attributes = attributes.data(),
@ -60,25 +60,23 @@ WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const Pi
State construct_state() { return {}; } State construct_state() { return {}; }
void render(const State& state, const DrawData& data, const WGPURenderPassEncoder& pass) { void render(const State& state, const DrawData& data, const wgpu::RenderPassEncoder& pass) {
if (!bind_pipeline(data.pipeline, pass)) { if (!bind_pipeline(data.pipeline, pass)) {
return; return;
} }
const std::array offsets{data.uniformRange.offset}; const std::array offsets{data.uniformRange.offset};
wgpuRenderPassEncoderSetBindGroup(pass, 0, find_bind_group(data.bindGroups.uniformBindGroup), offsets.size(), pass.SetBindGroup(0, find_bind_group(data.bindGroups.uniformBindGroup), offsets.size(), offsets.data());
offsets.data());
if (data.bindGroups.samplerBindGroup && data.bindGroups.textureBindGroup) { if (data.bindGroups.samplerBindGroup && data.bindGroups.textureBindGroup) {
wgpuRenderPassEncoderSetBindGroup(pass, 1, find_bind_group(data.bindGroups.samplerBindGroup), 0, nullptr); pass.SetBindGroup(1, find_bind_group(data.bindGroups.samplerBindGroup));
wgpuRenderPassEncoderSetBindGroup(pass, 2, find_bind_group(data.bindGroups.textureBindGroup), 0, nullptr); pass.SetBindGroup(2, find_bind_group(data.bindGroups.textureBindGroup));
} }
wgpuRenderPassEncoderSetVertexBuffer(pass, 0, g_vertexBuffer, data.vertRange.offset, data.vertRange.size); pass.SetVertexBuffer(0, g_vertexBuffer, data.vertRange.offset, data.vertRange.size);
wgpuRenderPassEncoderSetIndexBuffer(pass, g_indexBuffer, WGPUIndexFormat_Uint16, data.indexRange.offset, pass.SetIndexBuffer(g_indexBuffer, wgpu::IndexFormat::Uint16, data.indexRange.offset, data.indexRange.size);
data.indexRange.size);
if (data.dstAlpha != UINT32_MAX) { if (data.dstAlpha != UINT32_MAX) {
const WGPUColor color{0.f, 0.f, 0.f, data.dstAlpha / 255.f}; const wgpu::Color color{0.f, 0.f, 0.f, data.dstAlpha / 255.f};
wgpuRenderPassEncoderSetBlendConstant(pass, &color); pass.SetBlendConstant(&color);
} }
wgpuRenderPassEncoderDrawIndexed(pass, data.indexCount, 1, 0, 0, 0); pass.DrawIndexed(data.indexCount);
} }
} // namespace aurora::gfx::stream } // namespace aurora::gfx::stream

View File

@ -19,6 +19,6 @@ struct PipelineConfig : public gx::PipelineConfig {};
struct State {}; struct State {};
State construct_state(); State construct_state();
WGPURenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config); wgpu::RenderPipeline create_pipeline(const State& state, [[maybe_unused]] const PipelineConfig& config);
void render(const State& state, const DrawData& data, const WGPURenderPassEncoder& pass); void render(const State& state, const DrawData& data, const wgpu::RenderPassEncoder& pass);
} // namespace aurora::gfx::stream } // namespace aurora::gfx::stream

View File

@ -19,30 +19,30 @@ struct TextureFormatInfo {
uint8_t blockSize; uint8_t blockSize;
bool compressed; bool compressed;
}; };
static TextureFormatInfo format_info(WGPUTextureFormat format) { static TextureFormatInfo format_info(wgpu::TextureFormat format) {
switch (format) { switch (format) {
case WGPUTextureFormat_R8Unorm: case wgpu::TextureFormat::R8Unorm:
return {1, 1, 1, false}; return {1, 1, 1, false};
case WGPUTextureFormat_R16Sint: case wgpu::TextureFormat::R16Sint:
return {1, 1, 2, false}; return {1, 1, 2, false};
case WGPUTextureFormat_RGBA8Unorm: case wgpu::TextureFormat::RGBA8Unorm:
case WGPUTextureFormat_R32Float: case wgpu::TextureFormat::R32Float:
return {1, 1, 4, false}; return {1, 1, 4, false};
case WGPUTextureFormat_BC1RGBAUnorm: case wgpu::TextureFormat::BC1RGBAUnorm:
return {4, 4, 8, true}; return {4, 4, 8, true};
default: default:
Log.report(LOG_FATAL, FMT_STRING("format_info: unimplemented format {}"), magic_enum::enum_name(format)); Log.report(LOG_FATAL, FMT_STRING("format_info: unimplemented format {}"), magic_enum::enum_name(format));
unreachable(); unreachable();
} }
} }
static WGPUExtent3D physical_size(WGPUExtent3D size, TextureFormatInfo info) { static wgpu::Extent3D physical_size(wgpu::Extent3D size, TextureFormatInfo info) {
const uint32_t width = ((size.width + info.blockWidth - 1) / info.blockWidth) * info.blockWidth; const uint32_t width = ((size.width + info.blockWidth - 1) / info.blockWidth) * info.blockWidth;
const uint32_t height = ((size.height + info.blockHeight - 1) / info.blockHeight) * info.blockHeight; const uint32_t height = ((size.height + info.blockHeight - 1) / info.blockHeight) * info.blockHeight;
return {width, height, size.depthOrArrayLayers}; return {width, height, size.depthOrArrayLayers};
} }
TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format, TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format, ArrayRef<uint8_t> data,
ArrayRef<uint8_t> data, const char* label) noexcept { const char* label) noexcept {
auto handle = new_dynamic_texture_2d(width, height, mips, format, label); auto handle = new_dynamic_texture_2d(width, height, mips, format, label);
const auto& ref = *handle; const auto& ref = *handle;
@ -56,7 +56,7 @@ TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mi
uint32_t offset = 0; uint32_t offset = 0;
for (uint32_t mip = 0; mip < mips; ++mip) { for (uint32_t mip = 0; mip < mips; ++mip) {
const WGPUExtent3D mipSize{ const wgpu::Extent3D mipSize{
.width = std::max(ref.size.width >> mip, 1u), .width = std::max(ref.size.width >> mip, 1u),
.height = std::max(ref.size.height >> mip, 1u), .height = std::max(ref.size.height >> mip, 1u),
.depthOrArrayLayers = ref.size.depthOrArrayLayers, .depthOrArrayLayers = ref.size.depthOrArrayLayers,
@ -72,19 +72,19 @@ TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mi
offset + dataSize, data.size()); offset + dataSize, data.size());
unreachable(); unreachable();
} }
const WGPUImageCopyTexture dstView{ const wgpu::ImageCopyTexture dstView{
.texture = ref.texture, .texture = ref.texture,
.mipLevel = mip, .mipLevel = mip,
}; };
// const auto range = push_texture_data(data.data() + offset, dataSize, bytesPerRow, heightBlocks); // const auto range = push_texture_data(data.data() + offset, dataSize, bytesPerRow, heightBlocks);
const WGPUTextureDataLayout dataLayout{ const wgpu::TextureDataLayout dataLayout{
// .offset = range.offset, // .offset = range.offset,
.bytesPerRow = bytesPerRow, .bytesPerRow = bytesPerRow,
.rowsPerImage = heightBlocks, .rowsPerImage = heightBlocks,
}; };
// TODO // TODO
// g_textureUploads.emplace_back(dataLayout, std::move(dstView), physicalSize); // g_textureUploads.emplace_back(dataLayout, std::move(dstView), physicalSize);
wgpuQueueWriteTexture(g_queue, &dstView, data.data() + offset, dataSize, &dataLayout, &physicalSize); g_queue.WriteTexture(&dstView, data.data() + offset, dataSize, &dataLayout, &physicalSize);
offset += dataSize; offset += dataSize;
} }
if (data.size() != UINT32_MAX && offset < data.size()) { if (data.size() != UINT32_MAX && offset < data.size()) {
@ -97,60 +97,61 @@ TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mi
TextureHandle new_dynamic_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format, TextureHandle new_dynamic_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format,
const char* label) noexcept { const char* label) noexcept {
const auto wgpuFormat = to_wgpu(format); const auto wgpuFormat = to_wgpu(format);
const WGPUExtent3D size{ const wgpu::Extent3D size{
.width = width, .width = width,
.height = height, .height = height,
.depthOrArrayLayers = 1, .depthOrArrayLayers = 1,
}; };
const WGPUTextureDescriptor textureDescriptor{ const wgpu::TextureDescriptor textureDescriptor{
.label = label, .label = label,
.usage = WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopyDst, .usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst,
.dimension = WGPUTextureDimension_2D, .dimension = wgpu::TextureDimension::e2D,
.size = size, .size = size,
.format = wgpuFormat, .format = wgpuFormat,
.mipLevelCount = mips, .mipLevelCount = mips,
.sampleCount = 1, .sampleCount = 1,
}; };
const auto viewLabel = fmt::format(FMT_STRING("{} view"), label); const auto viewLabel = fmt::format(FMT_STRING("{} view"), label);
const WGPUTextureViewDescriptor textureViewDescriptor{ const wgpu::TextureViewDescriptor textureViewDescriptor{
.label = viewLabel.c_str(), .label = viewLabel.c_str(),
.format = wgpuFormat, .format = wgpuFormat,
.dimension = WGPUTextureViewDimension_2D, .dimension = wgpu::TextureViewDimension::e2D,
.mipLevelCount = mips, .mipLevelCount = mips,
.arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED, .arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED,
}; };
auto texture = wgpuDeviceCreateTexture(g_device, &textureDescriptor); auto texture = g_device.CreateTexture(&textureDescriptor);
auto textureView = wgpuTextureCreateView(texture, &textureViewDescriptor); auto textureView = texture.CreateView(&textureViewDescriptor);
return std::make_shared<TextureRef>(texture, textureView, size, wgpuFormat, mips, format, false); return std::make_shared<TextureRef>(std::move(texture), std::move(textureView), size, wgpuFormat, mips, format,
false);
} }
TextureHandle new_render_texture(uint32_t width, uint32_t height, u32 fmt, const char* label) noexcept { TextureHandle new_render_texture(uint32_t width, uint32_t height, u32 fmt, const char* label) noexcept {
const auto wgpuFormat = webgpu::g_graphicsConfig.colorFormat; const auto wgpuFormat = webgpu::g_graphicsConfig.swapChainDescriptor.format;
const WGPUExtent3D size{ const wgpu::Extent3D size{
.width = width, .width = width,
.height = height, .height = height,
.depthOrArrayLayers = 1, .depthOrArrayLayers = 1,
}; };
const WGPUTextureDescriptor textureDescriptor{ const wgpu::TextureDescriptor textureDescriptor{
.label = label, .label = label,
.usage = WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopyDst, .usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst,
.dimension = WGPUTextureDimension_2D, .dimension = wgpu::TextureDimension::e2D,
.size = size, .size = size,
.format = wgpuFormat, .format = wgpuFormat,
.mipLevelCount = 1, .mipLevelCount = 1,
.sampleCount = 1, .sampleCount = 1,
}; };
const auto viewLabel = fmt::format(FMT_STRING("{} view"), label); const auto viewLabel = fmt::format(FMT_STRING("{} view"), label);
const WGPUTextureViewDescriptor textureViewDescriptor{ const wgpu::TextureViewDescriptor textureViewDescriptor{
.label = viewLabel.c_str(), .label = viewLabel.c_str(),
.format = wgpuFormat, .format = wgpuFormat,
.dimension = WGPUTextureViewDimension_2D, .dimension = wgpu::TextureViewDimension::e2D,
.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED, .mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED,
.arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED, .arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED,
}; };
auto texture = wgpuDeviceCreateTexture(g_device, &textureDescriptor); auto texture = g_device.CreateTexture(&textureDescriptor);
auto textureView = wgpuTextureCreateView(texture, &textureViewDescriptor); auto textureView = texture.CreateView(&textureViewDescriptor);
return std::make_shared<TextureRef>(texture, textureView, size, wgpuFormat, 1, fmt, true); return std::make_shared<TextureRef>(std::move(texture), std::move(textureView), size, wgpuFormat, 1, fmt, true);
} }
void write_texture(const TextureRef& ref, ArrayRef<uint8_t> data) noexcept { void write_texture(const TextureRef& ref, ArrayRef<uint8_t> data) noexcept {
@ -164,7 +165,7 @@ void write_texture(const TextureRef& ref, ArrayRef<uint8_t> data) noexcept {
uint32_t offset = 0; uint32_t offset = 0;
for (uint32_t mip = 0; mip < ref.mipCount; ++mip) { for (uint32_t mip = 0; mip < ref.mipCount; ++mip) {
const WGPUExtent3D mipSize{ const wgpu::Extent3D mipSize{
.width = std::max(ref.size.width >> mip, 1u), .width = std::max(ref.size.width >> mip, 1u),
.height = std::max(ref.size.height >> mip, 1u), .height = std::max(ref.size.height >> mip, 1u),
.depthOrArrayLayers = ref.size.depthOrArrayLayers, .depthOrArrayLayers = ref.size.depthOrArrayLayers,
@ -180,26 +181,26 @@ void write_texture(const TextureRef& ref, ArrayRef<uint8_t> data) noexcept {
data.size()); data.size());
unreachable(); unreachable();
} }
// auto dstView = WGPUImageCopyTexture{ // auto dstView = wgpu::ImageCopyTexture{
// .texture = ref.texture, // .texture = ref.texture,
// .mipLevel = mip, // .mipLevel = mip,
// }; // };
// const auto range = push_texture_data(data.data() + offset, dataSize, bytesPerRow, heightBlocks); // const auto range = push_texture_data(data.data() + offset, dataSize, bytesPerRow, heightBlocks);
// const auto dataLayout = WGPUTextureDataLayout{ // const auto dataLayout = wgpu::TextureDataLayout{
// .offset = range.offset, // .offset = range.offset,
// .bytesPerRow = bytesPerRow, // .bytesPerRow = bytesPerRow,
// .rowsPerImage = heightBlocks, // .rowsPerImage = heightBlocks,
// }; // };
// g_textureUploads.emplace_back(dataLayout, std::move(dstView), physicalSize); // g_textureUploads.emplace_back(dataLayout, std::move(dstView), physicalSize);
const WGPUImageCopyTexture dstView{ const wgpu::ImageCopyTexture dstView{
.texture = ref.texture, .texture = ref.texture,
.mipLevel = mip, .mipLevel = mip,
}; };
const WGPUTextureDataLayout dataLayout{ const wgpu::TextureDataLayout dataLayout{
.bytesPerRow = bytesPerRow, .bytesPerRow = bytesPerRow,
.rowsPerImage = heightBlocks, .rowsPerImage = heightBlocks,
}; };
wgpuQueueWriteTexture(g_queue, &dstView, data.data() + offset, dataSize, &dataLayout, &physicalSize); g_queue.WriteTexture(&dstView, data.data() + offset, dataSize, &dataLayout, &physicalSize);
offset += dataSize; offset += dataSize;
} }
if (data.size() != UINT32_MAX && offset < data.size()) { if (data.size() != UINT32_MAX && offset < data.size()) {

View File

@ -5,45 +5,40 @@
namespace aurora::gfx { namespace aurora::gfx {
struct TextureUpload { struct TextureUpload {
WGPUTextureDataLayout layout; wgpu::TextureDataLayout layout;
WGPUImageCopyTexture tex; wgpu::ImageCopyTexture tex;
WGPUExtent3D size; wgpu::Extent3D size;
TextureUpload(WGPUTextureDataLayout layout, WGPUImageCopyTexture tex, WGPUExtent3D size) noexcept TextureUpload(wgpu::TextureDataLayout layout, wgpu::ImageCopyTexture tex, wgpu::Extent3D size) noexcept
: layout(layout), tex(tex), size(size) {} : layout(layout), tex(tex), size(size) {}
}; };
extern std::vector<TextureUpload> g_textureUploads; extern std::vector<TextureUpload> g_textureUploads;
constexpr u32 InvalidTextureFormat = -1; constexpr u32 InvalidTextureFormat = -1;
struct TextureRef { struct TextureRef {
WGPUTexture texture; wgpu::Texture texture;
WGPUTextureView view; wgpu::TextureView view;
WGPUExtent3D size; wgpu::Extent3D size;
WGPUTextureFormat format; wgpu::TextureFormat format;
uint32_t mipCount; uint32_t mipCount;
u32 gxFormat; u32 gxFormat;
bool isRenderTexture; // :shrug: for now bool isRenderTexture; // :shrug: for now
TextureRef(WGPUTexture texture, WGPUTextureView view, WGPUExtent3D size, WGPUTextureFormat format, uint32_t mipCount, TextureRef(wgpu::Texture texture, wgpu::TextureView view, wgpu::Extent3D size, wgpu::TextureFormat format,
u32 gxFormat, bool isRenderTexture) uint32_t mipCount, u32 gxFormat, bool isRenderTexture)
: texture(texture) : texture(std::move(texture))
, view(view) , view(std::move(view))
, size(size) , size(size)
, format(format) , format(format)
, mipCount(mipCount) , mipCount(mipCount)
, gxFormat(gxFormat) , gxFormat(gxFormat)
, isRenderTexture(isRenderTexture) {} , isRenderTexture(isRenderTexture) {}
~TextureRef() {
wgpuTextureViewRelease(view);
wgpuTextureRelease(texture);
}
}; };
using TextureHandle = std::shared_ptr<TextureRef>; using TextureHandle = std::shared_ptr<TextureRef>;
TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format, TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format, ArrayRef<uint8_t> data,
ArrayRef<uint8_t> data, const char* label) noexcept; const char* label) noexcept;
TextureHandle new_dynamic_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format, TextureHandle new_dynamic_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format,
const char* label) noexcept; const char* label) noexcept;
TextureHandle new_render_texture(uint32_t width, uint32_t height, u32 fmt, const char* label) noexcept; TextureHandle new_render_texture(uint32_t width, uint32_t height, u32 fmt, const char* label) noexcept;
@ -84,7 +79,7 @@ struct TextureBind {
TextureBind() noexcept = default; TextureBind() noexcept = default;
TextureBind(GXTexObj_ obj) noexcept : texObj(std::move(obj)) {} TextureBind(GXTexObj_ obj) noexcept : texObj(std::move(obj)) {}
void reset() noexcept { texObj.ref.reset(); }; void reset() noexcept { texObj.ref.reset(); };
[[nodiscard]] WGPUSamplerDescriptor get_descriptor() const noexcept; [[nodiscard]] wgpu::SamplerDescriptor get_descriptor() const noexcept;
operator bool() const noexcept { return texObj.ref.operator bool(); } operator bool() const noexcept { return texObj.ref.operator bool(); }
}; };
} // namespace aurora::gfx } // namespace aurora::gfx

View File

@ -597,7 +597,7 @@ ByteBuffer convert_texture(u32 format, uint32_t width, uint32_t height, uint32_t
case GX_TF_RGBA8: case GX_TF_RGBA8:
return BuildRGBA8FromGCN(width, height, mips, data); return BuildRGBA8FromGCN(width, height, mips, data);
case GX_TF_CMPR: case GX_TF_CMPR:
if (wgpuDeviceHasFeature(webgpu::g_device, WGPUFeatureName_TextureCompressionBC)) { if (webgpu::g_device.HasFeature(wgpu::FeatureName::TextureCompressionBC)) {
return BuildDXT1FromGCN(width, height, mips, data); return BuildDXT1FromGCN(width, height, mips, data);
} else { } else {
return BuildRGBA8FromCMPR(width, height, mips, data); return BuildRGBA8FromCMPR(width, height, mips, data);

View File

@ -5,23 +5,23 @@
#include "../webgpu/gpu.hpp" #include "../webgpu/gpu.hpp"
namespace aurora::gfx { namespace aurora::gfx {
static WGPUTextureFormat to_wgpu(u32 format) { static wgpu::TextureFormat to_wgpu(u32 format) {
switch (format) { switch (format) {
case GX_TF_I4: case GX_TF_I4:
case GX_TF_I8: case GX_TF_I8:
case GX_TF_R8_PC: case GX_TF_R8_PC:
return WGPUTextureFormat_R8Unorm; return wgpu::TextureFormat::R8Unorm;
case GX_TF_C4: case GX_TF_C4:
case GX_TF_C8: case GX_TF_C8:
case GX_TF_C14X2: case GX_TF_C14X2:
return WGPUTextureFormat_R16Sint; return wgpu::TextureFormat::R16Sint;
case GX_TF_CMPR: case GX_TF_CMPR:
if (wgpuDeviceHasFeature(webgpu::g_device, WGPUFeatureName_TextureCompressionBC)) { if (webgpu::g_device.HasFeature(wgpu::FeatureName::TextureCompressionBC)) {
return WGPUTextureFormat_BC1RGBAUnorm; return wgpu::TextureFormat::BC1RGBAUnorm;
} }
[[fallthrough]]; [[fallthrough]];
default: default:
return WGPUTextureFormat_RGBA8Unorm; return wgpu::TextureFormat::RGBA8Unorm;
} }
} }

View File

@ -18,7 +18,7 @@ static std::string g_imguiLog{};
static bool g_useSdlRenderer = false; static bool g_useSdlRenderer = false;
static std::vector<SDL_Texture*> g_sdlTextures; static std::vector<SDL_Texture*> g_sdlTextures;
static std::vector<WGPUTexture> g_wgpuTextures; static std::vector<wgpu::Texture> g_wgpuTextures;
void create_context() noexcept { void create_context() noexcept {
IMGUI_CHECKVERSION(); IMGUI_CHECKVERSION();
@ -41,7 +41,8 @@ void initialize() noexcept {
if (g_useSdlRenderer) { if (g_useSdlRenderer) {
ImGui_ImplSDLRenderer_Init(renderer); ImGui_ImplSDLRenderer_Init(renderer);
} else { } else {
ImGui_ImplWGPU_Init(webgpu::g_device, 1, webgpu::g_graphicsConfig.colorFormat); const auto format = webgpu::g_graphicsConfig.swapChainDescriptor.format;
ImGui_ImplWGPU_Init(webgpu::g_device.Get(), 1, static_cast<WGPUTextureFormat>(format));
} }
} }
@ -57,9 +58,6 @@ void shutdown() noexcept {
SDL_DestroyTexture(texture); SDL_DestroyTexture(texture);
} }
g_sdlTextures.clear(); g_sdlTextures.clear();
for (const auto& texture : g_wgpuTextures) {
wgpuTextureDestroy(texture);
}
g_wgpuTextures.clear(); g_wgpuTextures.clear();
} }
@ -99,7 +97,7 @@ void new_frame(const AuroraWindowSize& size) noexcept {
ImGui::NewFrame(); ImGui::NewFrame();
} }
void render(WGPURenderPassEncoder pass) noexcept { void render(const wgpu::RenderPassEncoder& pass) noexcept {
ImGui::Render(); ImGui::Render();
auto* data = ImGui::GetDrawData(); auto* data = ImGui::GetDrawData();
@ -111,7 +109,7 @@ void render(WGPURenderPassEncoder pass) noexcept {
ImGui_ImplSDLRenderer_RenderDrawData(data); ImGui_ImplSDLRenderer_RenderDrawData(data);
SDL_RenderPresent(renderer); SDL_RenderPresent(renderer);
} else { } else {
ImGui_ImplWGPU_RenderDrawData(data, pass); ImGui_ImplWGPU_RenderDrawData(data, pass.Get());
} }
} }
@ -124,41 +122,41 @@ ImTextureID add_texture(uint32_t width, uint32_t height, const uint8_t* data) no
g_sdlTextures.push_back(texture); g_sdlTextures.push_back(texture);
return texture; return texture;
} }
const auto size = WGPUExtent3D{ const wgpu::Extent3D size{
.width = width, .width = width,
.height = height, .height = height,
.depthOrArrayLayers = 1, .depthOrArrayLayers = 1,
}; };
const auto textureDescriptor = WGPUTextureDescriptor{ const wgpu::TextureDescriptor textureDescriptor{
.label = "imgui texture", .label = "imgui texture",
.usage = WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopyDst, .usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst,
.dimension = WGPUTextureDimension_2D, .dimension = wgpu::TextureDimension::e2D,
.size = size, .size = size,
.format = WGPUTextureFormat_RGBA8Unorm, .format = wgpu::TextureFormat::RGBA8Unorm,
.mipLevelCount = 1, .mipLevelCount = 1,
.sampleCount = 1, .sampleCount = 1,
}; };
const auto textureViewDescriptor = WGPUTextureViewDescriptor{ const wgpu::TextureViewDescriptor textureViewDescriptor{
.label = "imgui texture view", .label = "imgui texture view",
.format = WGPUTextureFormat_RGBA8Unorm, .format = wgpu::TextureFormat::RGBA8Unorm,
.dimension = WGPUTextureViewDimension_2D, .dimension = wgpu::TextureViewDimension::e2D,
.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED, .mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED,
.arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED, .arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED,
}; };
auto texture = wgpuDeviceCreateTexture(webgpu::g_device, &textureDescriptor); auto texture = webgpu::g_device.CreateTexture(&textureDescriptor);
auto textureView = wgpuTextureCreateView(texture, &textureViewDescriptor); auto textureView = texture.CreateView(&textureViewDescriptor);
{ {
const auto dstView = WGPUImageCopyTexture{ const wgpu::ImageCopyTexture dstView{
.texture = texture, .texture = texture,
}; };
const auto dataLayout = WGPUTextureDataLayout{ const wgpu::TextureDataLayout dataLayout{
.bytesPerRow = 4 * width, .bytesPerRow = 4 * width,
.rowsPerImage = height, .rowsPerImage = height,
}; };
wgpuQueueWriteTexture(webgpu::g_queue, &dstView, data, width * height * 4, &dataLayout, &size); webgpu::g_queue.WriteTexture(&dstView, data, width * height * 4, &dataLayout, &size);
} }
g_wgpuTextures.push_back(texture); g_wgpuTextures.push_back(texture);
return textureView; return textureView.Release();
} }
} // namespace aurora::imgui } // namespace aurora::imgui

View File

@ -5,7 +5,10 @@
#include <string_view> #include <string_view>
union SDL_Event; union SDL_Event;
typedef struct WGPURenderPassEncoderImpl* WGPURenderPassEncoder;
namespace wgpu {
class RenderPassEncoder;
} // namespace wgpu
namespace aurora::imgui { namespace aurora::imgui {
void create_context() noexcept; void create_context() noexcept;
@ -14,5 +17,5 @@ void shutdown() noexcept;
void process_event(const SDL_Event& event) noexcept; void process_event(const SDL_Event& event) noexcept;
void new_frame(const AuroraWindowSize& size) noexcept; void new_frame(const AuroraWindowSize& size) noexcept;
void render(WGPURenderPassEncoder pass) noexcept; void render(const wgpu::RenderPassEncoder& pass) noexcept;
} // namespace aurora::imgui } // namespace aurora::imgui

View File

@ -6,140 +6,149 @@
#include "../internal.hpp" #include "../internal.hpp"
#include <SDL.h> #include <SDL.h>
#include <dawn/native/DawnNative.h>
#include <magic_enum.hpp> #include <magic_enum.hpp>
#include <memory> #include <memory>
#include <algorithm> #include <algorithm>
#ifdef WEBGPU_DAWN
#include <dawn/native/DawnNative.h>
#include "../dawn/BackendBinding.hpp" #include "../dawn/BackendBinding.hpp"
#endif
namespace aurora::webgpu { namespace aurora::webgpu {
static Module Log("aurora::gpu"); static Module Log("aurora::gpu");
WGPUDevice g_device; wgpu::Device g_device;
WGPUQueue g_queue; wgpu::Queue g_queue;
WGPUSwapChain g_swapChain; wgpu::SwapChain g_swapChain;
WGPUBackendType g_backendType; wgpu::BackendType g_backendType;
GraphicsConfig g_graphicsConfig; GraphicsConfig g_graphicsConfig;
TextureWithSampler g_frameBuffer; TextureWithSampler g_frameBuffer;
TextureWithSampler g_frameBufferResolved; TextureWithSampler g_frameBufferResolved;
TextureWithSampler g_depthBuffer; TextureWithSampler g_depthBuffer;
// EFB -> XFB copy pipeline // EFB -> XFB copy pipeline
static WGPUBindGroupLayout g_CopyBindGroupLayout; static wgpu::BindGroupLayout g_CopyBindGroupLayout;
WGPURenderPipeline g_CopyPipeline; wgpu::RenderPipeline g_CopyPipeline;
WGPUBindGroup g_CopyBindGroup; wgpu::BindGroup g_CopyBindGroup;
static std::unique_ptr<dawn::native::Instance> g_Instance; #ifdef WEBGPU_DAWN
static dawn::native::Adapter g_Adapter; static std::unique_ptr<dawn::native::Instance> g_dawnInstance;
static WGPUAdapterProperties g_AdapterProperties; static dawn::native::Adapter g_adapter;
static std::unique_ptr<utils::BackendBinding> g_BackendBinding; static std::unique_ptr<utils::BackendBinding> g_backendBinding;
#else
wgpu::Instance g_instance;
static wgpu::Adapter g_adapter;
#endif
static wgpu::Surface g_surface;
static wgpu::AdapterProperties g_adapterProperties;
TextureWithSampler create_render_texture(bool multisampled) { TextureWithSampler create_render_texture(bool multisampled) {
const WGPUExtent3D size{ const wgpu::Extent3D size{
.width = g_graphicsConfig.width, .width = g_graphicsConfig.swapChainDescriptor.width,
.height = g_graphicsConfig.height, .height = g_graphicsConfig.swapChainDescriptor.height,
.depthOrArrayLayers = 1, .depthOrArrayLayers = 1,
}; };
const auto format = g_graphicsConfig.colorFormat; const auto format = g_graphicsConfig.swapChainDescriptor.format;
uint32_t sampleCount = 1; uint32_t sampleCount = 1;
if (multisampled) { if (multisampled) {
sampleCount = g_graphicsConfig.msaaSamples; sampleCount = g_graphicsConfig.msaaSamples;
} }
const WGPUTextureDescriptor textureDescriptor{ const wgpu::TextureDescriptor textureDescriptor{
.label = "Render texture", .label = "Render texture",
.usage = WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_TextureBinding | WGPUTextureUsage_CopySrc | .usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
WGPUTextureUsage_CopyDst, wgpu::TextureUsage::CopyDst,
.dimension = WGPUTextureDimension_2D, .dimension = wgpu::TextureDimension::e2D,
.size = size, .size = size,
.format = format, .format = format,
.mipLevelCount = 1, .mipLevelCount = 1,
.sampleCount = sampleCount, .sampleCount = sampleCount,
}; };
auto texture = wgpuDeviceCreateTexture(g_device, &textureDescriptor); auto texture = g_device.CreateTexture(&textureDescriptor);
const WGPUTextureViewDescriptor viewDescriptor{ const wgpu::TextureViewDescriptor viewDescriptor{
.dimension = WGPUTextureViewDimension_2D, .label = "Render texture view",
.dimension = wgpu::TextureViewDimension::e2D,
.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED, .mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED,
.arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED, .arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED,
}; };
auto view = wgpuTextureCreateView(texture, &viewDescriptor); auto view = texture.CreateView(&viewDescriptor);
const WGPUSamplerDescriptor samplerDescriptor{ const wgpu::SamplerDescriptor samplerDescriptor{
.label = "Render sampler", .label = "Render sampler",
.addressModeU = WGPUAddressMode_ClampToEdge, .addressModeU = wgpu::AddressMode::ClampToEdge,
.addressModeV = WGPUAddressMode_ClampToEdge, .addressModeV = wgpu::AddressMode::ClampToEdge,
.addressModeW = WGPUAddressMode_ClampToEdge, .addressModeW = wgpu::AddressMode::ClampToEdge,
.magFilter = WGPUFilterMode_Linear, .magFilter = wgpu::FilterMode::Linear,
.minFilter = WGPUFilterMode_Linear, .minFilter = wgpu::FilterMode::Linear,
.mipmapFilter = WGPUFilterMode_Linear, .mipmapFilter = wgpu::FilterMode::Linear,
.lodMinClamp = 0.f, .lodMinClamp = 0.f,
.lodMaxClamp = 1000.f, .lodMaxClamp = 1000.f,
.maxAnisotropy = 1, .maxAnisotropy = 1,
}; };
auto sampler = wgpuDeviceCreateSampler(g_device, &samplerDescriptor); auto sampler = g_device.CreateSampler(&samplerDescriptor);
return { return {
.texture{texture}, .texture = std::move(texture),
.view{view}, .view = std::move(view),
.size = size, .size = size,
.format = format, .format = format,
.sampler{sampler}, .sampler = std::move(sampler),
}; };
} }
static TextureWithSampler create_depth_texture() { static TextureWithSampler create_depth_texture() {
const WGPUExtent3D size{ const wgpu::Extent3D size{
.width = g_graphicsConfig.width, .width = g_graphicsConfig.swapChainDescriptor.width,
.height = g_graphicsConfig.height, .height = g_graphicsConfig.swapChainDescriptor.height,
.depthOrArrayLayers = 1, .depthOrArrayLayers = 1,
}; };
const auto format = g_graphicsConfig.depthFormat; const auto format = g_graphicsConfig.depthFormat;
const WGPUTextureDescriptor textureDescriptor{ const wgpu::TextureDescriptor textureDescriptor{
.label = "Depth texture", .label = "Depth texture",
.usage = WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_TextureBinding, .usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding,
.dimension = WGPUTextureDimension_2D, .dimension = wgpu::TextureDimension::e2D,
.size = size, .size = size,
.format = format, .format = format,
.mipLevelCount = 1, .mipLevelCount = 1,
.sampleCount = g_graphicsConfig.msaaSamples, .sampleCount = g_graphicsConfig.msaaSamples,
}; };
auto texture = wgpuDeviceCreateTexture(g_device, &textureDescriptor); auto texture = g_device.CreateTexture(&textureDescriptor);
const WGPUTextureViewDescriptor viewDescriptor{ const wgpu::TextureViewDescriptor viewDescriptor{
.dimension = WGPUTextureViewDimension_2D, .label = "Depth texture view",
.dimension = wgpu::TextureViewDimension::e2D,
.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED, .mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED,
.arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED, .arrayLayerCount = WGPU_ARRAY_LAYER_COUNT_UNDEFINED,
}; };
auto view = wgpuTextureCreateView(texture, &viewDescriptor); auto view = texture.CreateView(&viewDescriptor);
const WGPUSamplerDescriptor samplerDescriptor{ const wgpu::SamplerDescriptor samplerDescriptor{
.label = "Depth sampler", .label = "Depth sampler",
.addressModeU = WGPUAddressMode_ClampToEdge, .addressModeU = wgpu::AddressMode::ClampToEdge,
.addressModeV = WGPUAddressMode_ClampToEdge, .addressModeV = wgpu::AddressMode::ClampToEdge,
.addressModeW = WGPUAddressMode_ClampToEdge, .addressModeW = wgpu::AddressMode::ClampToEdge,
.magFilter = WGPUFilterMode_Linear, .magFilter = wgpu::FilterMode::Linear,
.minFilter = WGPUFilterMode_Linear, .minFilter = wgpu::FilterMode::Linear,
.mipmapFilter = WGPUFilterMode_Linear, .mipmapFilter = wgpu::FilterMode::Linear,
.lodMinClamp = 0.f, .lodMinClamp = 0.f,
.lodMaxClamp = 1000.f, .lodMaxClamp = 1000.f,
.maxAnisotropy = 1, .maxAnisotropy = 1,
}; };
auto sampler = wgpuDeviceCreateSampler(g_device, &samplerDescriptor); auto sampler = g_device.CreateSampler(&samplerDescriptor);
return { return {
.texture{texture}, .texture = std::move(texture),
.view{view}, .view = std::move(view),
.size = size, .size = size,
.format = format, .format = format,
.sampler{sampler}, .sampler = std::move(sampler),
}; };
} }
void create_copy_pipeline() { void create_copy_pipeline() {
WGPUShaderModuleWGSLDescriptor sourceDescriptor{ wgpu::ShaderModuleWGSLDescriptor sourceDescriptor{};
.chain = {.sType = WGPUSType_ShaderModuleWGSLDescriptor}, sourceDescriptor.source = R"""(
.source = R"""(
@group(0) @binding(0) @group(0) @binding(0)
var efb_sampler: sampler; var efb_sampler: sampler;
@group(0) @binding(1) @group(0) @binding(1)
@ -173,179 +182,237 @@ fn vs_main(@builtin(vertex_index) vtxIdx: u32) -> VertexOutput {
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> { fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
return textureSample(efb_texture, efb_sampler, in.uv); return textureSample(efb_texture, efb_sampler, in.uv);
} }
)""", )""";
}; const wgpu::ShaderModuleDescriptor moduleDescriptor{
const WGPUShaderModuleDescriptor moduleDescriptor{ .nextInChain = &sourceDescriptor,
.nextInChain = &sourceDescriptor.chain,
.label = "XFB Copy Module", .label = "XFB Copy Module",
}; };
auto module = wgpuDeviceCreateShaderModule(g_device, &moduleDescriptor); auto module = g_device.CreateShaderModule(&moduleDescriptor);
const std::array colorTargets{WGPUColorTargetState{ const std::array colorTargets{wgpu::ColorTargetState{
.format = g_graphicsConfig.colorFormat, .format = g_graphicsConfig.swapChainDescriptor.format,
.writeMask = WGPUColorWriteMask_All, .writeMask = wgpu::ColorWriteMask::All,
}}; }};
const WGPUFragmentState fragmentState{ const wgpu::FragmentState fragmentState{
.module = module, .module = module,
.entryPoint = "fs_main", .entryPoint = "fs_main",
.targetCount = colorTargets.size(), .targetCount = colorTargets.size(),
.targets = colorTargets.data(), .targets = colorTargets.data(),
}; };
const std::array bindGroupLayoutEntries{ const std::array bindGroupLayoutEntries{
WGPUBindGroupLayoutEntry{ wgpu::BindGroupLayoutEntry{
.binding = 0, .binding = 0,
.visibility = WGPUShaderStage_Fragment, .visibility = wgpu::ShaderStage::Fragment,
.sampler = .sampler =
WGPUSamplerBindingLayout{ wgpu::SamplerBindingLayout{
.type = WGPUSamplerBindingType_Filtering, .type = wgpu::SamplerBindingType::Filtering,
}, },
}, },
WGPUBindGroupLayoutEntry{ wgpu::BindGroupLayoutEntry{
.binding = 1, .binding = 1,
.visibility = WGPUShaderStage_Fragment, .visibility = wgpu::ShaderStage::Fragment,
.texture = .texture =
WGPUTextureBindingLayout{ wgpu::TextureBindingLayout{
.sampleType = WGPUTextureSampleType_Float, .sampleType = wgpu::TextureSampleType::Float,
.viewDimension = WGPUTextureViewDimension_2D, .viewDimension = wgpu::TextureViewDimension::e2D,
}, },
}, },
}; };
const WGPUBindGroupLayoutDescriptor bindGroupLayoutDescriptor{ const wgpu::BindGroupLayoutDescriptor bindGroupLayoutDescriptor{
.entryCount = bindGroupLayoutEntries.size(), .entryCount = bindGroupLayoutEntries.size(),
.entries = bindGroupLayoutEntries.data(), .entries = bindGroupLayoutEntries.data(),
}; };
g_CopyBindGroupLayout = wgpuDeviceCreateBindGroupLayout(g_device, &bindGroupLayoutDescriptor); g_CopyBindGroupLayout = g_device.CreateBindGroupLayout(&bindGroupLayoutDescriptor);
const WGPUPipelineLayoutDescriptor layoutDescriptor{ const wgpu::PipelineLayoutDescriptor layoutDescriptor{
.bindGroupLayoutCount = 1, .bindGroupLayoutCount = 1,
.bindGroupLayouts = &g_CopyBindGroupLayout, .bindGroupLayouts = &g_CopyBindGroupLayout,
}; };
auto pipelineLayout = wgpuDeviceCreatePipelineLayout(g_device, &layoutDescriptor); auto pipelineLayout = g_device.CreatePipelineLayout(&layoutDescriptor);
const WGPURenderPipelineDescriptor pipelineDescriptor{ const wgpu::RenderPipelineDescriptor pipelineDescriptor{
.layout = pipelineLayout, .layout = pipelineLayout,
.vertex = .vertex =
WGPUVertexState{ wgpu::VertexState{
.module = module, .module = module,
.entryPoint = "vs_main", .entryPoint = "vs_main",
}, },
.primitive = .primitive =
WGPUPrimitiveState{ wgpu::PrimitiveState{
.topology = WGPUPrimitiveTopology_TriangleList, .topology = wgpu::PrimitiveTopology::TriangleList,
}, },
.multisample = .multisample =
WGPUMultisampleState{ wgpu::MultisampleState{
.count = 1, .count = 1,
.mask = UINT32_MAX, .mask = UINT32_MAX,
}, },
.fragment = &fragmentState, .fragment = &fragmentState,
}; };
g_CopyPipeline = wgpuDeviceCreateRenderPipeline(g_device, &pipelineDescriptor); g_CopyPipeline = g_device.CreateRenderPipeline(&pipelineDescriptor);
wgpuPipelineLayoutRelease(pipelineLayout);
} }
void create_copy_bind_group() { void create_copy_bind_group() {
const std::array bindGroupEntries{ const std::array bindGroupEntries{
WGPUBindGroupEntry{ wgpu::BindGroupEntry{
.binding = 0, .binding = 0,
.sampler = g_graphicsConfig.msaaSamples > 1 ? g_frameBufferResolved.sampler : g_frameBuffer.sampler, .sampler = g_graphicsConfig.msaaSamples > 1 ? g_frameBufferResolved.sampler : g_frameBuffer.sampler,
}, },
WGPUBindGroupEntry{ wgpu::BindGroupEntry{
.binding = 1, .binding = 1,
.textureView = g_graphicsConfig.msaaSamples > 1 ? g_frameBufferResolved.view : g_frameBuffer.view, .textureView = g_graphicsConfig.msaaSamples > 1 ? g_frameBufferResolved.view : g_frameBuffer.view,
}, },
}; };
const WGPUBindGroupDescriptor bindGroupDescriptor{ const wgpu::BindGroupDescriptor bindGroupDescriptor{
.layout = g_CopyBindGroupLayout, .layout = g_CopyBindGroupLayout,
.entryCount = bindGroupEntries.size(), .entryCount = bindGroupEntries.size(),
.entries = bindGroupEntries.data(), .entries = bindGroupEntries.data(),
}; };
g_CopyBindGroup = wgpuDeviceCreateBindGroup(g_device, &bindGroupDescriptor); g_CopyBindGroup = g_device.CreateBindGroup(&bindGroupDescriptor);
} }
static void error_callback(WGPUErrorType type, char const* message, void* userdata) { static void error_callback(WGPUErrorType type, char const* message, void* userdata) {
Log.report(LOG_FATAL, FMT_STRING("Dawn error {}: {}"), magic_enum::enum_name(static_cast<WGPUErrorType>(type)), Log.report(LOG_FATAL, FMT_STRING("WebGPU error {}: {}"), magic_enum::enum_name(static_cast<WGPUErrorType>(type)),
message); message);
} }
#ifndef WEBGPU_DAWN
static void adapter_callback(WGPURequestAdapterStatus status, WGPUAdapter adapter, char const* message,
void* userdata) {
if (status == WGPURequestAdapterStatus_Success) {
g_adapter = wgpu::Adapter::Acquire(adapter);
} else {
Log.report(LOG_WARNING, FMT_STRING("Adapter request failed with message: {}"), message);
}
*static_cast<bool*>(userdata) = true;
}
#endif
static void device_callback(WGPURequestDeviceStatus status, WGPUDevice device, char const* message, void* userdata) { static void device_callback(WGPURequestDeviceStatus status, WGPUDevice device, char const* message, void* userdata) {
if (status == WGPURequestDeviceStatus_Success) { if (status == WGPURequestDeviceStatus_Success) {
g_device = device; g_device = wgpu::Device::Acquire(device);
} else { } else {
Log.report(LOG_WARNING, FMT_STRING("Device request failed with message: {}"), message); Log.report(LOG_WARNING, FMT_STRING("Device request failed with message: {}"), message);
} }
*static_cast<bool*>(userdata) = true; *static_cast<bool*>(userdata) = true;
} }
static WGPUBackendType to_wgpu_backend(AuroraBackend backend) { static wgpu::BackendType to_wgpu_backend(AuroraBackend backend) {
switch (backend) { switch (backend) {
case BACKEND_WEBGPU: case BACKEND_WEBGPU:
return WGPUBackendType_WebGPU; return wgpu::BackendType::WebGPU;
case BACKEND_D3D12: case BACKEND_D3D12:
return WGPUBackendType_D3D12; return wgpu::BackendType::D3D12;
case BACKEND_METAL: case BACKEND_METAL:
return WGPUBackendType_Metal; return wgpu::BackendType::Metal;
case BACKEND_VULKAN: case BACKEND_VULKAN:
return WGPUBackendType_Vulkan; return wgpu::BackendType::Vulkan;
case BACKEND_OPENGL: case BACKEND_OPENGL:
return WGPUBackendType_OpenGL; return wgpu::BackendType::OpenGL;
case BACKEND_OPENGLES: case BACKEND_OPENGLES:
return WGPUBackendType_OpenGLES; return wgpu::BackendType::OpenGLES;
default: default:
return WGPUBackendType_Null; return wgpu::BackendType::Null;
} }
} }
bool initialize(AuroraBackend auroraBackend) { bool initialize(AuroraBackend auroraBackend) {
if (!g_Instance) { #ifdef WEBGPU_DAWN
if (!g_dawnInstance) {
Log.report(LOG_INFO, FMT_STRING("Creating Dawn instance")); Log.report(LOG_INFO, FMT_STRING("Creating Dawn instance"));
g_Instance = std::make_unique<dawn::native::Instance>(); g_dawnInstance = std::make_unique<dawn::native::Instance>();
} }
WGPUBackendType backend = to_wgpu_backend(auroraBackend); #else
if (!g_instance) {
const wgpu::InstanceDescriptor instanceDescriptor{};
g_instance = {}; // TODO use wgpuCreateInstance when supported
}
#endif
wgpu::BackendType backend = to_wgpu_backend(auroraBackend);
#ifdef EMSCRIPTEN
if (backend != wgpu::BackendType::WebGPU) {
Log.report(LOG_WARNING, FMT_STRING("Backend type {} unsupported"), magic_enum::enum_name(backend));
return false;
}
#endif
Log.report(LOG_INFO, FMT_STRING("Attempting to initialize {}"), magic_enum::enum_name(backend)); Log.report(LOG_INFO, FMT_STRING("Attempting to initialize {}"), magic_enum::enum_name(backend));
#if 0 #if 0
// D3D12's debug layer is very slow // D3D12's debug layer is very slow
g_Instance->EnableBackendValidation(backend != WGPUBackendType::D3D12); g_dawnInstance->EnableBackendValidation(backend != WGPUBackendType::D3D12);
#endif #endif
#ifdef WEBGPU_DAWN
SDL_Window* window = window::get_sdl_window(); SDL_Window* window = window::get_sdl_window();
if (!utils::DiscoverAdapter(g_Instance.get(), window, backend)) { if (!utils::DiscoverAdapter(g_dawnInstance.get(), window, backend)) {
return false; return false;
} }
{ {
std::vector<dawn::native::Adapter> adapters = g_Instance->GetAdapters(); std::vector<dawn::native::Adapter> adapters = g_dawnInstance->GetAdapters();
std::sort(adapters.begin(), adapters.end(), [&](const auto& a, const auto& b) { std::sort(adapters.begin(), adapters.end(), [&](const auto& a, const auto& b) {
WGPUAdapterProperties propertiesA; wgpu::AdapterProperties propertiesA;
WGPUAdapterProperties propertiesB; wgpu::AdapterProperties propertiesB;
a.GetProperties(&propertiesA); a.GetProperties(&propertiesA);
b.GetProperties(&propertiesB); b.GetProperties(&propertiesB);
constexpr std::array PreferredTypeOrder{ constexpr std::array PreferredTypeOrder{
WGPUAdapterType_DiscreteGPU, wgpu::AdapterType::DiscreteGPU,
WGPUAdapterType_IntegratedGPU, wgpu::AdapterType::IntegratedGPU,
WGPUAdapterType_CPU, wgpu::AdapterType::CPU,
}; };
const auto typeItA = std::find(PreferredTypeOrder.begin(), PreferredTypeOrder.end(), propertiesA.adapterType); const auto typeItA = std::find(PreferredTypeOrder.begin(), PreferredTypeOrder.end(), propertiesA.adapterType);
const auto typeItB = std::find(PreferredTypeOrder.begin(), PreferredTypeOrder.end(), propertiesB.adapterType); const auto typeItB = std::find(PreferredTypeOrder.begin(), PreferredTypeOrder.end(), propertiesB.adapterType);
return typeItA < typeItB; return typeItA < typeItB;
}); });
const auto adapterIt = std::find_if(adapters.begin(), adapters.end(), [=](const auto& adapter) -> bool { const auto adapterIt = std::find_if(adapters.begin(), adapters.end(), [=](const auto& adapter) -> bool {
WGPUAdapterProperties properties; wgpu::AdapterProperties properties;
adapter.GetProperties(&properties); adapter.GetProperties(&properties);
return properties.backendType == backend; return properties.backendType == backend;
}); });
if (adapterIt == adapters.end()) { if (adapterIt == adapters.end()) {
return false; return false;
} }
g_Adapter = *adapterIt; g_adapter = *adapterIt;
} }
g_Adapter.GetProperties(&g_AdapterProperties); #else
g_backendType = g_AdapterProperties.backendType; const WGPUSurfaceDescriptorFromCanvasHTMLSelector canvasDescriptor{
.chain = {.sType = WGPUSType_SurfaceDescriptorFromCanvasHTMLSelector},
.selector = "#canvas",
};
const WGPUSurfaceDescriptor surfaceDescriptor{
.nextInChain = &canvasDescriptor.chain,
.label = "Surface",
};
g_surface = wgpu::Surface::Acquire(wgpuInstanceCreateSurface(g_instance.Get(), &surfaceDescriptor));
if (!g_surface) {
Log.report(LOG_FATAL, FMT_STRING("Failed to initialize surface"));
}
const WGPURequestAdapterOptions options{
.compatibleSurface = g_surface.Get(),
.powerPreference = WGPUPowerPreference_HighPerformance,
.forceFallbackAdapter = false,
};
bool adapterCallbackRecieved = false;
wgpuInstanceRequestAdapter(g_instance.Get(), &options, adapter_callback, &adapterCallbackRecieved);
while (!adapterCallbackRecieved) {
emscripten_log(EM_LOG_CONSOLE, "Waiting for adapter...\n");
emscripten_sleep(100);
}
#endif
g_adapter.GetProperties(&g_adapterProperties);
g_backendType = g_adapterProperties.backendType;
const auto backendName = magic_enum::enum_name(g_backendType); const auto backendName = magic_enum::enum_name(g_backendType);
const char* adapterName = g_adapterProperties.name;
if (adapterName == nullptr) {
adapterName = "Unknown";
}
const char* driverDescription = g_adapterProperties.driverDescription;
if (driverDescription == nullptr) {
driverDescription = "Unknown";
}
Log.report(LOG_INFO, FMT_STRING("Graphics adapter information\n API: {}\n Device: {} ({})\n Driver: {}"), Log.report(LOG_INFO, FMT_STRING("Graphics adapter information\n API: {}\n Device: {} ({})\n Driver: {}"),
backendName, g_AdapterProperties.name, magic_enum::enum_name(g_AdapterProperties.adapterType), backendName, adapterName, magic_enum::enum_name(g_adapterProperties.adapterType), driverDescription);
g_AdapterProperties.driverDescription);
{ {
// TODO: emscripten doesn't implement wgpuAdapterGetLimits
#ifdef WEBGPU_DAWN
WGPUSupportedLimits supportedLimits{}; WGPUSupportedLimits supportedLimits{};
g_Adapter.GetLimits(&supportedLimits); g_adapter.GetLimits(&supportedLimits);
const WGPURequiredLimits requiredLimits{ const wgpu::RequiredLimits requiredLimits{
.limits = .limits =
{ {
// Use "best" supported alignments // Use "best" supported alignments
@ -357,13 +424,27 @@ bool initialize(AuroraBackend auroraBackend) {
: supportedLimits.limits.minStorageBufferOffsetAlignment, : supportedLimits.limits.minStorageBufferOffsetAlignment,
}, },
}; };
std::vector<WGPUFeatureName> features; #endif
const auto supportedFeatures = g_Adapter.GetSupportedFeatures(); std::vector<wgpu::FeatureName> features;
#ifdef WEBGPU_DAWN
const auto supportedFeatures = g_adapter.GetSupportedFeatures();
for (const auto* const feature : supportedFeatures) { for (const auto* const feature : supportedFeatures) {
if (strcmp(feature, "texture-compression-bc") == 0) { if (strcmp(feature, "texture-compression-bc") == 0) {
features.push_back(WGPUFeatureName_TextureCompressionBC); features.push_back(wgpu::FeatureName::TextureCompressionBC);
} }
} }
#else
std::vector<wgpu::FeatureName> supportedFeatures;
size_t featureCount = g_adapter.EnumerateFeatures(nullptr);
supportedFeatures.resize(featureCount);
g_adapter.EnumerateFeatures(supportedFeatures.data());
for (const auto& feature : supportedFeatures) {
if (feature == wgpu::FeatureName::TextureCompressionBC) {
features.push_back(wgpu::FeatureName::TextureCompressionBC);
}
}
#endif
#ifdef WEBGPU_DAWN
const std::array enableToggles { const std::array enableToggles {
/* clang-format off */ /* clang-format off */
#if _WIN32 #if _WIN32
@ -377,87 +458,110 @@ bool initialize(AuroraBackend auroraBackend) {
"disable_symbol_renaming", "disable_symbol_renaming",
/* clang-format on */ /* clang-format on */
}; };
const WGPUDawnTogglesDeviceDescriptor togglesDescriptor{ wgpu::DawnTogglesDeviceDescriptor togglesDescriptor{};
.chain = {.sType = WGPUSType_DawnTogglesDeviceDescriptor}, togglesDescriptor.forceEnabledTogglesCount = enableToggles.size();
.forceEnabledTogglesCount = enableToggles.size(), togglesDescriptor.forceEnabledToggles = enableToggles.data();
.forceEnabledToggles = enableToggles.data(), #endif
}; const wgpu::DeviceDescriptor deviceDescriptor{
const WGPUDeviceDescriptor deviceDescriptor{ #ifdef WEBGPU_DAWN
.nextInChain = &togglesDescriptor.chain, .nextInChain = &togglesDescriptor,
#endif
.requiredFeaturesCount = static_cast<uint32_t>(features.size()), .requiredFeaturesCount = static_cast<uint32_t>(features.size()),
.requiredFeatures = features.data(), .requiredFeatures = features.data(),
#ifdef WEBGPU_DAWN
.requiredLimits = &requiredLimits, .requiredLimits = &requiredLimits,
#endif
}; };
bool deviceCallbackReceived = false; bool deviceCallbackReceived = false;
g_Adapter.RequestDevice(&deviceDescriptor, &device_callback, &deviceCallbackReceived); g_adapter.RequestDevice(&deviceDescriptor, device_callback, &deviceCallbackReceived);
// while (!deviceCallbackReceived) { #ifdef EMSCRIPTEN
// TODO wgpuInstanceProcessEvents while (!deviceCallbackReceived) {
// } emscripten_log(EM_LOG_CONSOLE, "Waiting for device...\n");
emscripten_sleep(100);
}
#endif
if (!g_device) { if (!g_device) {
return false; return false;
} }
wgpuDeviceSetUncapturedErrorCallback(g_device, &error_callback, nullptr); g_device.SetUncapturedErrorCallback(&error_callback, nullptr);
} }
wgpuDeviceSetDeviceLostCallback(g_device, nullptr, nullptr); g_device.SetDeviceLostCallback(nullptr, nullptr);
g_queue = wgpuDeviceGetQueue(g_device); g_queue = g_device.GetQueue();
g_BackendBinding = std::unique_ptr<utils::BackendBinding>(utils::CreateBinding(g_backendType, window, g_device)); #if WEBGPU_DAWN
if (!g_BackendBinding) { g_backendBinding =
std::unique_ptr<utils::BackendBinding>(utils::CreateBinding(g_backendType, window, g_device.Get()));
if (!g_backendBinding) {
return false; return false;
} }
auto swapChainFormat = static_cast<wgpu::TextureFormat>(g_backendBinding->GetPreferredSwapChainTextureFormat());
auto swapChainFormat = static_cast<WGPUTextureFormat>(g_BackendBinding->GetPreferredSwapChainTextureFormat()); #else
if (swapChainFormat == WGPUTextureFormat_RGBA8UnormSrgb) { auto swapChainFormat = g_surface.GetPreferredFormat(g_adapter);
swapChainFormat = WGPUTextureFormat_RGBA8Unorm; #endif
} else if (swapChainFormat == WGPUTextureFormat_BGRA8UnormSrgb) { if (swapChainFormat == wgpu::TextureFormat::RGBA8UnormSrgb) {
swapChainFormat = WGPUTextureFormat_BGRA8Unorm; swapChainFormat = wgpu::TextureFormat::RGBA8Unorm;
} else if (swapChainFormat == wgpu::TextureFormat::BGRA8UnormSrgb) {
swapChainFormat = wgpu::TextureFormat::BGRA8Unorm;
} }
Log.report(LOG_INFO, FMT_STRING("Using swapchain format {}"), magic_enum::enum_name(swapChainFormat)); Log.report(LOG_INFO, FMT_STRING("Using swapchain format {}"), magic_enum::enum_name(swapChainFormat));
{ const auto size = window::get_window_size();
const WGPUSwapChainDescriptor descriptor{ g_graphicsConfig = GraphicsConfig{
.format = swapChainFormat, .swapChainDescriptor =
.implementation = g_BackendBinding->GetSwapChainImplementation(), wgpu::SwapChainDescriptor{
}; .usage = wgpu::TextureUsage::RenderAttachment,
g_swapChain = wgpuDeviceCreateSwapChain(g_device, nullptr, &descriptor); .format = swapChainFormat,
} .width = size.fb_width,
{ .height = size.fb_height,
const auto size = window::get_window_size(); .presentMode = wgpu::PresentMode::Fifo,
g_graphicsConfig = GraphicsConfig{ #ifdef WEBGPU_DAWN
.width = size.fb_width, .implementation = g_backendBinding->GetSwapChainImplementation(),
.height = size.fb_height, #endif
.colorFormat = swapChainFormat, },
.depthFormat = WGPUTextureFormat_Depth32Float, .depthFormat = wgpu::TextureFormat::Depth32Float,
.msaaSamples = g_config.msaa, .msaaSamples = g_config.msaa,
.textureAnisotropy = g_config.maxTextureAnisotropy, .textureAnisotropy = g_config.maxTextureAnisotropy,
}; };
create_copy_pipeline(); create_copy_pipeline();
resize_swapchain(size.fb_width, size.fb_height, true); resize_swapchain(size.fb_width, size.fb_height, true);
// g_windowSize = size;
}
return true; return true;
} }
void shutdown() { void shutdown() {
wgpuBindGroupLayoutRelease(g_CopyBindGroupLayout); g_CopyBindGroupLayout = {};
wgpuRenderPipelineRelease(g_CopyPipeline); g_CopyPipeline = {};
wgpuBindGroupRelease(g_CopyBindGroup); g_CopyBindGroup = {};
g_frameBuffer = {}; g_frameBuffer = {};
g_frameBufferResolved = {}; g_frameBufferResolved = {};
g_depthBuffer = {}; g_depthBuffer = {};
wgpuSwapChainRelease(g_swapChain); wgpuSwapChainRelease(g_swapChain.Release());
wgpuQueueRelease(g_queue); wgpuQueueRelease(g_queue.Release());
g_BackendBinding.reset(); wgpuDeviceDestroy(g_device.Release());
wgpuDeviceDestroy(g_device); g_adapter = {};
g_Instance.reset(); #ifdef WEBGPU_DAWN
g_backendBinding.reset();
g_dawnInstance.reset();
#else
g_surface = {};
g_instance = {};
#endif
} }
void resize_swapchain(uint32_t width, uint32_t height, bool force) { void resize_swapchain(uint32_t width, uint32_t height, bool force) {
if (!force && g_graphicsConfig.width == width && g_graphicsConfig.height == height) { if (!force && g_graphicsConfig.swapChainDescriptor.width == width &&
g_graphicsConfig.swapChainDescriptor.height == height) {
return; return;
} }
g_graphicsConfig.width = width; g_graphicsConfig.swapChainDescriptor.width = width;
g_graphicsConfig.height = height; g_graphicsConfig.swapChainDescriptor.height = height;
wgpuSwapChainConfigure(g_swapChain, g_graphicsConfig.colorFormat, WGPUTextureUsage_RenderAttachment, width, height); #ifdef WEBGPU_DAWN
if (!g_swapChain) {
g_swapChain = g_device.CreateSwapChain(g_surface, &g_graphicsConfig.swapChainDescriptor);
}
g_swapChain.Configure(g_graphicsConfig.swapChainDescriptor.format, g_graphicsConfig.swapChainDescriptor.usage, width,
height);
#else
g_swapChain = g_device.CreateSwapChain(g_surface, &g_graphicsConfig.swapChainDescriptor);
#endif
g_frameBuffer = create_render_texture(true); g_frameBuffer = create_render_texture(true);
g_frameBufferResolved = create_render_texture(false); g_frameBufferResolved = create_render_texture(false);
g_depthBuffer = create_depth_texture(); g_depthBuffer = create_depth_texture();

View File

@ -11,75 +11,30 @@ struct SDL_Window;
namespace aurora::webgpu { namespace aurora::webgpu {
struct GraphicsConfig { struct GraphicsConfig {
uint32_t width; wgpu::SwapChainDescriptor swapChainDescriptor;
uint32_t height; wgpu::TextureFormat depthFormat;
WGPUTextureFormat colorFormat;
WGPUTextureFormat depthFormat;
uint32_t msaaSamples; uint32_t msaaSamples;
uint16_t textureAnisotropy; uint16_t textureAnisotropy;
}; };
struct TextureWithSampler { struct TextureWithSampler {
wgpu::Texture texture; wgpu::Texture texture;
wgpu::TextureView view; wgpu::TextureView view;
WGPUExtent3D size; wgpu::Extent3D size;
WGPUTextureFormat format; wgpu::TextureFormat format;
wgpu::Sampler sampler; wgpu::Sampler sampler;
// TextureWithSampler() = default;
// TextureWithSampler(WGPUTexture texture, WGPUTextureView view, WGPUExtent3D size, WGPUTextureFormat format,
// WGPUSampler sampler) noexcept
// : texture(texture), view(view), size(size), format(format), sampler(sampler) {}
// TextureWithSampler(const TextureWithSampler& rhs) noexcept
// : texture(rhs.texture), view(rhs.view), size(rhs.size), format(rhs.format), sampler(rhs.sampler) {
// wgpuTextureReference(texture);
// wgpuTextureViewReference(view);
// wgpuSamplerReference(sampler);
// }
// TextureWithSampler(TextureWithSampler&& rhs) noexcept
// : texture(rhs.texture), view(rhs.view), size(rhs.size), format(rhs.format), sampler(rhs.sampler) {
// rhs.texture = nullptr;
// rhs.view = nullptr;
// rhs.sampler = nullptr;
// }
// ~TextureWithSampler() { reset(); }
// TextureWithSampler& operator=(const TextureWithSampler& rhs) noexcept {
// reset();
// texture = rhs.texture;
// view = rhs.view;
// size = rhs.size;
// format = rhs.format;
// sampler = rhs.sampler;
// wgpuTextureReference(texture);
// wgpuTextureViewReference(view);
// wgpuSamplerReference(sampler);
// return *this;
// }
// void reset() {
// if (texture != nullptr) {
// wgpuTextureRelease(texture);
// texture = nullptr;
// }
// if (view != nullptr) {
// wgpuTextureViewRelease(view);
// view = nullptr;
// }
// if (sampler != nullptr) {
// wgpuSamplerRelease(sampler);
// sampler = nullptr;
// }
// }
}; };
extern WGPUDevice g_device; extern wgpu::Device g_device;
extern WGPUQueue g_queue; extern wgpu::Queue g_queue;
extern WGPUSwapChain g_swapChain; extern wgpu::SwapChain g_swapChain;
extern WGPUBackendType g_backendType; extern wgpu::BackendType g_backendType;
extern GraphicsConfig g_graphicsConfig; extern GraphicsConfig g_graphicsConfig;
extern TextureWithSampler g_frameBuffer; extern TextureWithSampler g_frameBuffer;
extern TextureWithSampler g_frameBufferResolved; extern TextureWithSampler g_frameBufferResolved;
extern TextureWithSampler g_depthBuffer; extern TextureWithSampler g_depthBuffer;
extern WGPURenderPipeline g_CopyPipeline; extern wgpu::RenderPipeline g_CopyPipeline;
extern WGPUBindGroup g_CopyBindGroup; extern wgpu::BindGroup g_CopyBindGroup;
extern wgpu::Instance g_instance;
bool initialize(AuroraBackend backend); bool initialize(AuroraBackend backend);
void shutdown(); void shutdown();

View File

@ -1,102 +1,4 @@
#include <webgpu/webgpu.h> #include <webgpu/webgpu_cpp.h>
#ifdef EMSCRIPTEN
namespace wgpu { #include <emscripten.h>
template <typename Derived, typename CType> #endif
struct ObjectBase {
ObjectBase() = default;
ObjectBase(CType handle) : mHandle(handle) {}
~ObjectBase() { Reset(); }
ObjectBase(ObjectBase const& other) : ObjectBase(other.Get()) {}
Derived& operator=(ObjectBase const& other) {
if (&other != this) {
if (mHandle) {
Derived::WGPURelease(mHandle);
}
mHandle = other.mHandle;
if (mHandle) {
Derived::WGPUReference(mHandle);
}
}
return static_cast<Derived&>(*this);
}
ObjectBase(ObjectBase&& other) noexcept {
mHandle = other.mHandle;
other.mHandle = 0;
}
Derived& operator=(ObjectBase&& other) noexcept {
if (&other != this) {
if (mHandle) {
Derived::WGPURelease(mHandle);
}
mHandle = other.mHandle;
other.mHandle = nullptr;
}
return static_cast<Derived&>(*this);
}
ObjectBase(std::nullptr_t) {}
Derived& operator=(std::nullptr_t) {
if (mHandle != nullptr) {
Derived::WGPURelease(mHandle);
mHandle = nullptr;
}
return static_cast<Derived&>(*this);
}
bool operator==(std::nullptr_t) const { return mHandle == nullptr; }
bool operator!=(std::nullptr_t) const { return mHandle != nullptr; }
explicit operator bool() const { return mHandle != nullptr; }
operator CType() { return mHandle; }
[[nodiscard]] CType Get() const { return mHandle; }
CType Release() {
CType result = mHandle;
mHandle = 0;
return result;
}
void Reset() {
if (mHandle) {
Derived::WGPURelease(mHandle);
mHandle = nullptr;
}
}
protected:
CType mHandle = nullptr;
};
class Texture : public ObjectBase<Texture, WGPUTexture> {
public:
using ObjectBase::ObjectBase;
using ObjectBase::operator=;
private:
friend ObjectBase<Texture, WGPUTexture>;
static void WGPUReference(WGPUTexture handle) { wgpuTextureReference(handle); }
static void WGPURelease(WGPUTexture handle) { wgpuTextureRelease(handle); }
};
class TextureView : public ObjectBase<TextureView, WGPUTextureView> {
public:
using ObjectBase::ObjectBase;
using ObjectBase::operator=;
private:
friend ObjectBase<TextureView, WGPUTextureView>;
static void WGPUReference(WGPUTextureView handle) { wgpuTextureViewReference(handle); }
static void WGPURelease(WGPUTextureView handle) { wgpuTextureViewRelease(handle); }
};
class Sampler : public ObjectBase<Sampler, WGPUSampler> {
public:
using ObjectBase::ObjectBase;
using ObjectBase::operator=;
private:
friend ObjectBase<Sampler, WGPUSampler>;
static void WGPUReference(WGPUSampler handle) { wgpuSamplerReference(handle); }
static void WGPURelease(WGPUSampler handle) { wgpuSamplerRelease(handle); }
};
} // namespace wgpu

View File

@ -197,7 +197,7 @@ void show_window() {
} }
bool initialize() { bool initialize() {
if (SDL_Init(SDL_INIT_EVERYTHING) < 0) { if (SDL_Init(SDL_INIT_EVERYTHING & ~SDL_INIT_HAPTIC) < 0) {
Log.report(LOG_FATAL, FMT_STRING("Error initializing SDL: {}"), SDL_GetError()); Log.report(LOG_FATAL, FMT_STRING("Error initializing SDL: {}"), SDL_GetError());
unreachable(); unreachable();
} }