Update to latest upstream dawn

This commit is contained in:
Luke Street 2025-04-03 00:12:22 -06:00
parent 1b088e79e8
commit ca3e4f5273
17 changed files with 383 additions and 1090 deletions

View File

@ -6,6 +6,7 @@ if (NOT EMSCRIPTEN)
if (CMAKE_SYSTEM_NAME STREQUAL Linux)
set(DAWN_ENABLE_OPENGLES ON CACHE BOOL "Enable compilation of the OpenGL ES backend" FORCE)
endif ()
set(DAWN_FETCH_DEPENDENCIES ON CACHE BOOL "Use fetch_dawn_dependencies.py as an alternative to using depot_tools" FORCE)
add_subdirectory(dawn EXCLUDE_FROM_ALL)
if (DAWN_ENABLE_VULKAN)
target_compile_definitions(dawn_native PRIVATE

2
extern/dawn vendored

@ -1 +1 @@
Subproject commit f10e70a26db00bb89f88be4204cf49ffc869e194
Subproject commit 65ce958ed0019a860238703efa0087660c4247d4

View File

@ -24,6 +24,7 @@ typedef enum {
AURORA_CONTROLLER_REMOVED,
AURORA_PAUSED,
AURORA_UNPAUSED,
AURORA_DISPLAY_SCALE_CHANGED,
} AuroraEventType;
struct AuroraEvent {

View File

@ -7,9 +7,10 @@
#include "webgpu/gpu.hpp"
#include "window.hpp"
#include <SDL3/SDL.h>
#include <SDL3/SDL_filesystem.h>
#include <imgui.h>
#include <magic_enum.hpp>
#include <webgpu/webgpu_cpp.h>
namespace aurora {
static Module Log("aurora");
@ -19,7 +20,7 @@ AuroraConfig g_config;
// GPU
using webgpu::g_device;
using webgpu::g_queue;
using webgpu::g_swapChain;
using webgpu::g_surface;
constexpr std::array PreferredBackendOrder{
#ifdef ENABLE_BACKEND_WEBGPU
@ -37,12 +38,12 @@ constexpr std::array PreferredBackendOrder{
#ifdef DAWN_ENABLE_BACKEND_D3D11
BACKEND_D3D11,
#endif
//#ifdef DAWN_ENABLE_BACKEND_DESKTOP_GL
// BACKEND_OPENGL,
//#endif
//#ifdef DAWN_ENABLE_BACKEND_OPENGLES
// BACKEND_OPENGLES,
//#endif
// #ifdef DAWN_ENABLE_BACKEND_DESKTOP_GL
// BACKEND_OPENGL,
// #endif
// #ifdef DAWN_ENABLE_BACKEND_OPENGLES
// BACKEND_OPENGLES,
// #endif
#ifdef DAWN_ENABLE_BACKEND_NULL
BACKEND_NULL,
#endif
@ -111,9 +112,7 @@ static AuroraInfo initialize(int argc, char* argv[], const AuroraConfig& config)
}
imgui::initialize();
if (aurora_begin_frame()) {
g_initialFrame = true;
}
g_initialFrame = true;
g_config.desiredBackend = selectedBackend;
return {
.backend = selectedBackend,
@ -123,14 +122,10 @@ static AuroraInfo initialize(int argc, char* argv[], const AuroraConfig& config)
};
}
#ifndef EMSCRIPTEN
static wgpu::TextureView g_currentView;
#endif
static void shutdown() noexcept {
#ifndef EMSCRIPTEN
g_currentView = {};
#endif
imgui::shutdown();
gfx::shutdown();
webgpu::shutdown();
@ -139,26 +134,32 @@ static void shutdown() noexcept {
static const AuroraEvent* update() noexcept {
if (g_initialFrame) {
aurora_end_frame();
g_initialFrame = false;
input::initialize();
}
const auto* events = window::poll_events();
imgui::new_frame(window::get_window_size());
return events;
return window::poll_events();
}
static bool begin_frame() noexcept {
#ifndef EMSCRIPTEN
g_currentView = g_swapChain.GetCurrentTextureView();
if (!g_currentView) {
ImGui::EndFrame();
wgpu::SurfaceTexture surfaceTexture;
g_surface.GetCurrentTexture(&surfaceTexture);
switch (surfaceTexture.status) {
case wgpu::SurfaceGetCurrentTextureStatus::SuccessOptimal:
g_currentView = surfaceTexture.texture.CreateView();
break;
case wgpu::SurfaceGetCurrentTextureStatus::SuccessSuboptimal: {
Log.report(LOG_WARNING, FMT_STRING("Surface texture is suboptimal"));
// Force swapchain recreation
const auto size = window::get_window_size();
webgpu::resize_swapchain(size.fb_width, size.fb_height, true);
return false;
}
#endif
default:
Log.report(LOG_ERROR, FMT_STRING("Failed to get surface texture: {}"),
magic_enum::enum_name(surfaceTexture.status));
return false;
}
imgui::new_frame(window::get_window_size());
gfx::begin_frame();
return true;
}
@ -173,11 +174,7 @@ static void end_frame() noexcept {
{
const std::array attachments{
wgpu::RenderPassColorAttachment{
#ifdef EMSCRIPTEN
.view = g_swapChain.GetCurrentTextureView(),
#else
.view = g_currentView,
#endif
.loadOp = wgpu::LoadOp::Clear,
.storeOp = wgpu::StoreOp::Store,
},
@ -192,24 +189,14 @@ static void end_frame() noexcept {
pass.SetPipeline(webgpu::g_CopyPipeline);
pass.SetBindGroup(0, webgpu::g_CopyBindGroup, 0, nullptr);
pass.Draw(3);
if (!g_initialFrame) {
// Render ImGui
imgui::render(pass);
}
imgui::render(pass);
pass.End();
}
const wgpu::CommandBufferDescriptor cmdBufDescriptor{.label = "Redraw command buffer"};
const auto buffer = encoder.Finish(&cmdBufDescriptor);
g_queue.Submit(1, &buffer);
#ifdef WEBGPU_DAWN
g_swapChain.Present();
g_surface.Present();
g_currentView = {};
#else
emscripten_sleep(0);
#endif
if (!g_initialFrame) {
ImGui::EndFrame();
}
}
} // namespace aurora

View File

@ -24,93 +24,93 @@
namespace aurora::webgpu::utils {
#if defined(DAWN_ENABLE_BACKEND_OPENGL)
struct GLUserData {
SDL_Window* window;
SDL_GLContext context;
};
void GLMakeCurrent(void* userData) {
auto* data = static_cast<GLUserData*>(userData);
SDL_GL_MakeCurrent(data->window, data->context);
}
void GLDestroy(void* userData) {
auto* data = static_cast<GLUserData*>(userData);
SDL_GL_DestroyContext(data->context);
delete data;
}
#endif
bool DiscoverAdapter(dawn::native::Instance* instance, [[maybe_unused]] SDL_Window* window, wgpu::BackendType type) {
switch (type) {
#if defined(DAWN_ENABLE_BACKEND_D3D11)
case wgpu::BackendType::D3D11: {
dawn::native::d3d11::PhysicalDeviceDiscoveryOptions options;
return instance->DiscoverPhysicalDevices(&options);
}
#endif
#if defined(DAWN_ENABLE_BACKEND_D3D12)
case wgpu::BackendType::D3D12: {
dawn::native::d3d12::PhysicalDeviceDiscoveryOptions options;
return instance->DiscoverPhysicalDevices(&options);
}
#endif
#if defined(DAWN_ENABLE_BACKEND_METAL)
case wgpu::BackendType::Metal: {
dawn::native::metal::PhysicalDeviceDiscoveryOptions options;
return instance->DiscoverPhysicalDevices(&options);
}
#endif
#if defined(DAWN_ENABLE_BACKEND_VULKAN)
case wgpu::BackendType::Vulkan: {
dawn::native::vulkan::PhysicalDeviceDiscoveryOptions options;
return instance->DiscoverPhysicalDevices(&options);
}
#endif
#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
case wgpu::BackendType::OpenGL: {
SDL_GL_ResetAttributes();
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 4);
SDL_GLContext context = SDL_GL_CreateContext(window);
dawn::native::opengl::PhysicalDeviceDiscoveryOptions options{WGPUBackendType_OpenGL};
options.getProc = reinterpret_cast<void* (*)(const char*)>(SDL_GL_GetProcAddress);
options.makeCurrent = GLMakeCurrent;
options.destroy = GLDestroy;
options.userData = new GLUserData{
.window = window,
.context = context,
};
return instance->DiscoverPhysicalDevices(&options);
}
#endif
#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
case wgpu::BackendType::OpenGLES: {
SDL_GL_ResetAttributes();
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
SDL_GLContext context = SDL_GL_CreateContext(window);
dawn::native::opengl::PhysicalDeviceDiscoveryOptions options{WGPUBackendType_OpenGLES};
options.getProc = reinterpret_cast<void* (*)(const char*)>(SDL_GL_GetProcAddress);
options.makeCurrent = GLMakeCurrent;
options.destroy = GLDestroy;
options.userData = new GLUserData{
.window = window,
.context = context,
};
return instance->DiscoverPhysicalDevices(&options);
}
#endif
#if defined(DAWN_ENABLE_BACKEND_NULL)
case wgpu::BackendType::Null:
instance->DiscoverDefaultPhysicalDevices();
return true;
#endif
default:
return false;
}
}
//#if defined(DAWN_ENABLE_BACKEND_OPENGL)
//struct GLUserData {
// SDL_Window* window;
// SDL_GLContext context;
//};
//void GLMakeCurrent(void* userData) {
// auto* data = static_cast<GLUserData*>(userData);
// SDL_GL_MakeCurrent(data->window, data->context);
//}
//void GLDestroy(void* userData) {
// auto* data = static_cast<GLUserData*>(userData);
// SDL_GL_DestroyContext(data->context);
// delete data;
//}
//#endif
//
//bool DiscoverAdapter(dawn::native::Instance* instance, [[maybe_unused]] SDL_Window* window, wgpu::BackendType type) {
// switch (type) {
//#if defined(DAWN_ENABLE_BACKEND_D3D11)
// case wgpu::BackendType::D3D11: {
// dawn::native::d3d11::PhysicalDeviceDiscoveryOptions options;
// return instance->DiscoverPhysicalDevices(&options);
// }
//#endif
//#if defined(DAWN_ENABLE_BACKEND_D3D12)
// case wgpu::BackendType::D3D12: {
// dawn::native::d3d12::PhysicalDeviceDiscoveryOptions options;
// return instance->DiscoverPhysicalDevices(&options);
// }
//#endif
//#if defined(DAWN_ENABLE_BACKEND_METAL)
// case wgpu::BackendType::Metal: {
// dawn::native::metal::PhysicalDeviceDiscoveryOptions options;
// return instance->DiscoverPhysicalDevices(&options);
// }
//#endif
//#if defined(DAWN_ENABLE_BACKEND_VULKAN)
// case wgpu::BackendType::Vulkan: {
// dawn::native::vulkan::PhysicalDeviceDiscoveryOptions options;
// return instance->DiscoverPhysicalDevices(&options);
// }
//#endif
//#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
// case wgpu::BackendType::OpenGL: {
// SDL_GL_ResetAttributes();
// SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
// SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
// SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 4);
// SDL_GLContext context = SDL_GL_CreateContext(window);
// dawn::native::opengl::PhysicalDeviceDiscoveryOptions options{WGPUBackendType_OpenGL};
// options.getProc = reinterpret_cast<void* (*)(const char*)>(SDL_GL_GetProcAddress);
// options.makeCurrent = GLMakeCurrent;
// options.destroy = GLDestroy;
// options.userData = new GLUserData{
// .window = window,
// .context = context,
// };
// return instance->DiscoverPhysicalDevices(&options);
// }
//#endif
//#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
// case wgpu::BackendType::OpenGLES: {
// SDL_GL_ResetAttributes();
// SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES);
// SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
// SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
// SDL_GLContext context = SDL_GL_CreateContext(window);
// dawn::native::opengl::PhysicalDeviceDiscoveryOptions options{WGPUBackendType_OpenGLES};
// options.getProc = reinterpret_cast<void* (*)(const char*)>(SDL_GL_GetProcAddress);
// options.makeCurrent = GLMakeCurrent;
// options.destroy = GLDestroy;
// options.userData = new GLUserData{
// .window = window,
// .context = context,
// };
// return instance->DiscoverPhysicalDevices(&options);
// }
//#endif
//#if defined(DAWN_ENABLE_BACKEND_NULL)
// case wgpu::BackendType::Null:
// instance->DiscoverDefaultPhysicalDevices();
// return true;
//#endif
// default:
// return false;
// }
//}
std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptorCocoa(SDL_Window* window);
@ -120,27 +120,24 @@ std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(SDL_Wind
#else
const auto props = SDL_GetWindowProperties(window);
#if defined(SDL_PLATFORM_WIN32)
std::unique_ptr<wgpu::SurfaceDescriptorFromWindowsHWND> desc =
std::make_unique<wgpu::SurfaceDescriptorFromWindowsHWND>();
std::unique_ptr<wgpu::SurfaceSourceWindowsHWND> desc =
std::make_unique<wgpu::SurfaceSourceWindowsHWND>();
desc->hwnd = wmInfo.info.win.window;
desc->hinstance = wmInfo.info.win.hinstance;
return std::move(desc);
#elif defined(SDL_PLATFORM_LINUX)
if (SDL_strcmp(SDL_GetCurrentVideoDriver(), "wayland") == 0) {
std::unique_ptr<wgpu::SurfaceDescriptorFromWaylandSurface> desc =
std::make_unique<wgpu::SurfaceDescriptorFromWaylandSurface>();
desc->display =
SDL_GetPointerProperty(SDL_GetWindowProperties(window), SDL_PROP_WINDOW_WAYLAND_DISPLAY_POINTER, nullptr);
desc->surface =
SDL_GetPointerProperty(SDL_GetWindowProperties(window), SDL_PROP_WINDOW_WAYLAND_SURFACE_POINTER, nullptr);
std::unique_ptr<wgpu::SurfaceSourceWaylandSurface> desc =
std::make_unique<wgpu::SurfaceSourceWaylandSurface>();
desc->display = SDL_GetPointerProperty(props, SDL_PROP_WINDOW_WAYLAND_DISPLAY_POINTER, nullptr);
desc->surface = SDL_GetPointerProperty(props, SDL_PROP_WINDOW_WAYLAND_SURFACE_POINTER, nullptr);
return std::move(desc);
}
if (SDL_strcmp(SDL_GetCurrentVideoDriver(), "x11") == 0) {
std::unique_ptr<wgpu::SurfaceDescriptorFromXlibWindow> desc =
std::make_unique<wgpu::SurfaceDescriptorFromXlibWindow>();
desc->display =
SDL_GetPointerProperty(SDL_GetWindowProperties(window), SDL_PROP_WINDOW_X11_DISPLAY_POINTER, nullptr);
desc->window = SDL_GetNumberProperty(SDL_GetWindowProperties(window), SDL_PROP_WINDOW_X11_WINDOW_NUMBER, 0);
std::unique_ptr<wgpu::SurfaceSourceXlibWindow> desc =
std::make_unique<wgpu::SurfaceSourceXlibWindow>();
desc->display = SDL_GetPointerProperty(props, SDL_PROP_WINDOW_X11_DISPLAY_POINTER, nullptr);
desc->window = SDL_GetNumberProperty(props, SDL_PROP_WINDOW_X11_WINDOW_NUMBER, 0);
return std::move(desc);
}
#endif

View File

@ -92,14 +92,14 @@ namespace aurora {
// the structure definition, which could easily change with Dawn updates.
template <>
inline HashType xxh3_hash(const wgpu::BindGroupDescriptor& input, HashType seed) {
constexpr auto offset = sizeof(void*) * 2; // skip nextInChain, label
constexpr auto offset = sizeof(void*) * 3; // skip nextInChain, label
const auto hash = xxh3_hash_s(reinterpret_cast<const u8*>(&input) + offset,
sizeof(wgpu::BindGroupDescriptor) - offset - sizeof(void*) /* skip entries */, seed);
return xxh3_hash_s(input.entries, sizeof(wgpu::BindGroupEntry) * input.entryCount, hash);
}
template <>
inline HashType xxh3_hash(const wgpu::SamplerDescriptor& input, HashType seed) {
constexpr auto offset = sizeof(void*) * 2; // skip nextInChain, label
constexpr auto offset = sizeof(void*) * 3; // skip nextInChain, label
return xxh3_hash_s(reinterpret_cast<const u8*>(&input) + offset,
sizeof(wgpu::SamplerDescriptor) - offset - 2 /* skip padding */, seed);
}
@ -135,7 +135,7 @@ wgpu::Buffer g_uniformBuffer;
wgpu::Buffer g_indexBuffer;
wgpu::Buffer g_storageBuffer;
static std::array<wgpu::Buffer, 3> g_stagingBuffers;
static wgpu::SupportedLimits g_cachedLimits;
static wgpu::Limits g_cachedLimits;
static ShaderState g_state;
static PipelineRef g_currentPipeline;
@ -378,8 +378,7 @@ void load_pipeline_cache() {
if (config.version != gx::GXPipelineConfigVersion) {
break;
}
find_pipeline(
type, config, [=]() { return stream::create_pipeline(g_state.stream, config); }, true);
find_pipeline(type, config, [=]() { return stream::create_pipeline(g_state.stream, config); }, true);
} break;
case ShaderType::Model: {
if (size != sizeof(model::PipelineConfig)) {
@ -389,8 +388,7 @@ void load_pipeline_cache() {
if (config.version != gx::GXPipelineConfigVersion) {
break;
}
find_pipeline(
type, config, [=]() { return model::create_pipeline(g_state.model, config); }, true);
find_pipeline(type, config, [=]() { return model::create_pipeline(g_state.model, config); }, true);
} break;
default:
Log.report(LOG_WARNING, FMT_STRING("Unknown pipeline type {}"), static_cast<int>(type));
@ -494,24 +492,20 @@ static bool bufferMapped = false;
void map_staging_buffer() {
bufferMapped = false;
g_stagingBuffers[currentStagingBuffer].MapAsync(
wgpu::MapMode::Write, 0, StagingBufferSize,
[](WGPUBufferMapAsyncStatus status, void* userdata) {
if (status == WGPUBufferMapAsyncStatus_DestroyedBeforeCallback) {
wgpu::MapMode::Write, 0, StagingBufferSize, wgpu::CallbackMode::AllowSpontaneous,
[](wgpu::MapAsyncStatus status, wgpu::StringView message) {
if (status == wgpu::MapAsyncStatus::CallbackCancelled) {
return;
}
ASSERT(status == WGPUBufferMapAsyncStatus_Success, "Buffer mapping failed: {}", static_cast<int>(status));
*static_cast<bool*>(userdata) = true;
},
&bufferMapped);
ASSERT(status == wgpu::MapAsyncStatus::Success, "Buffer mapping failed: {} {}", magic_enum::enum_name(status),
message);
bufferMapped = true;
});
}
void begin_frame() {
while (!bufferMapped) {
#ifdef EMSCRIPTEN
emscripten_sleep(0);
#else
g_device.Tick();
#endif
g_instance.ProcessEvents();
}
size_t bufferOffset = 0;
auto& stagingBuf = g_stagingBuffers[currentStagingBuffer];
@ -561,9 +555,9 @@ void end_frame(const wgpu::CommandEncoder& cmd) {
{
// Perform texture copies
for (const auto& item : g_textureUploads) {
const wgpu::ImageCopyBuffer buf{
const wgpu::TexelCopyBufferInfo buf{
.layout =
wgpu::TextureDataLayout{
wgpu::TexelCopyBufferLayout{
.offset = item.layout.offset + bufferOffset,
.bytesPerRow = ALIGN(item.layout.bytesPerRow, 256),
.rowsPerImage = item.layout.rowsPerImage,
@ -623,7 +617,7 @@ void render(wgpu::CommandEncoder& cmd) {
pass.End();
if (passInfo.resolveTarget) {
wgpu::ImageCopyTexture src{
wgpu::TexelCopyTextureInfo src{
.origin =
wgpu::Origin3D{
.x = static_cast<uint32_t>(passInfo.resolveRect.x),
@ -635,7 +629,7 @@ void render(wgpu::CommandEncoder& cmd) {
} else {
src.texture = webgpu::g_frameBuffer.texture;
}
const wgpu::ImageCopyTexture dst{
const wgpu::TexelCopyTextureInfo dst{
.texture = passInfo.resolveTarget->texture,
};
const wgpu::Extent3D size{
@ -750,10 +744,10 @@ static inline Range map(ByteBuffer& target, size_t length, size_t alignment) {
Range push_verts(const uint8_t* data, size_t length) { return push(g_verts, data, length, 0); }
Range push_indices(const uint8_t* data, size_t length) { return push(g_indices, data, length, 0); }
Range push_uniform(const uint8_t* data, size_t length) {
return push(g_uniforms, data, length, g_cachedLimits.limits.minUniformBufferOffsetAlignment);
return push(g_uniforms, data, length, g_cachedLimits.minUniformBufferOffsetAlignment);
}
Range push_storage(const uint8_t* data, size_t length) {
return push(g_storage, data, length, g_cachedLimits.limits.minStorageBufferOffsetAlignment);
return push(g_storage, data, length, g_cachedLimits.minStorageBufferOffsetAlignment);
}
Range push_texture_data(const uint8_t* data, size_t length, u32 bytesPerRow, u32 rowsPerImage) {
// For CopyBufferToTexture, we need an alignment of 256 per row (see Dawn kTextureBytesPerRowAlignment)
@ -776,11 +770,11 @@ std::pair<ByteBuffer, Range> map_indices(size_t length) {
return {ByteBuffer{g_indices.data() + range.offset, range.size}, range};
}
std::pair<ByteBuffer, Range> map_uniform(size_t length) {
const auto range = map(g_uniforms, length, g_cachedLimits.limits.minUniformBufferOffsetAlignment);
const auto range = map(g_uniforms, length, g_cachedLimits.minUniformBufferOffsetAlignment);
return {ByteBuffer{g_uniforms.data() + range.offset, range.size}, range};
}
std::pair<ByteBuffer, Range> map_storage(size_t length) {
const auto range = map(g_storage, length, g_cachedLimits.limits.minStorageBufferOffsetAlignment);
const auto range = map(g_storage, length, g_cachedLimits.minStorageBufferOffsetAlignment);
return {ByteBuffer{g_storage.data() + range.offset, range.size}, range};
}
@ -817,7 +811,7 @@ const wgpu::Sampler& sampler_ref(const wgpu::SamplerDescriptor& descriptor) {
return it->second;
}
uint32_t align_uniform(uint32_t value) { return ALIGN(value, g_cachedLimits.limits.minUniformBufferOffsetAlignment); }
uint32_t align_uniform(uint32_t value) { return ALIGN(value, g_cachedLimits.minUniformBufferOffsetAlignment); }
} // namespace aurora::gfx
void push_debug_group(const char* label) {

View File

@ -205,7 +205,7 @@ wgpu::RenderPipeline build_pipeline(const PipelineConfig& config, const ShaderIn
const auto blendState =
to_blend_state(config.blendMode, config.blendFacSrc, config.blendFacDst, config.blendOp, config.dstAlpha);
const std::array colorTargets{wgpu::ColorTargetState{
.format = g_graphicsConfig.swapChainDescriptor.format,
.format = g_graphicsConfig.surfaceConfiguration.format,
.blend = &blendState,
.writeMask = to_write_mask(config.colorUpdate, config.alphaUpdate),
}};

View File

@ -449,8 +449,8 @@ wgpu::RenderPipeline create_pipeline(const State& state, [[maybe_unused]] const
}
const std::array vtxBuffers{wgpu::VertexBufferLayout{
.arrayStride = offset,
.stepMode = wgpu::VertexStepMode::Vertex,
.arrayStride = offset,
.attributeCount = shaderLocation,
.attributes = vtxAttrs.data(),
}};

View File

@ -67,12 +67,12 @@ TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mi
const uint32_t dataSize = bytesPerRow * heightBlocks * mipSize.depthOrArrayLayers;
CHECK(offset + dataSize <= data.size(), "new_static_texture_2d[{}]: expected at least {} bytes, got {}", label,
offset + dataSize, data.size());
const wgpu::ImageCopyTexture dstView{
const wgpu::TexelCopyTextureInfo dstView{
.texture = ref.texture,
.mipLevel = mip,
};
// const auto range = push_texture_data(data.data() + offset, dataSize, bytesPerRow, heightBlocks);
const wgpu::TextureDataLayout dataLayout{
const wgpu::TexelCopyBufferLayout dataLayout{
// .offset = range.offset,
.bytesPerRow = bytesPerRow,
.rowsPerImage = heightBlocks,
@ -121,7 +121,7 @@ TextureHandle new_dynamic_texture_2d(uint32_t width, uint32_t height, uint32_t m
}
TextureHandle new_render_texture(uint32_t width, uint32_t height, u32 fmt, const char* label) noexcept {
const auto wgpuFormat = webgpu::g_graphicsConfig.swapChainDescriptor.format;
const auto wgpuFormat = webgpu::g_graphicsConfig.surfaceConfiguration.format;
const wgpu::Extent3D size{
.width = width,
.height = height,
@ -184,11 +184,11 @@ void write_texture(const TextureRef& ref, ArrayRef<uint8_t> data) noexcept {
// .rowsPerImage = heightBlocks,
// };
// g_textureUploads.emplace_back(dataLayout, std::move(dstView), physicalSize);
const wgpu::ImageCopyTexture dstView{
const wgpu::TexelCopyTextureInfo dstView{
.texture = ref.texture,
.mipLevel = mip,
};
const wgpu::TextureDataLayout dataLayout{
const wgpu::TexelCopyBufferLayout dataLayout{
.bytesPerRow = bytesPerRow,
.rowsPerImage = heightBlocks,
};

View File

@ -5,11 +5,11 @@
namespace aurora::gfx {
struct TextureUpload {
wgpu::TextureDataLayout layout;
wgpu::ImageCopyTexture tex;
wgpu::TexelCopyBufferLayout layout;
wgpu::TexelCopyTextureInfo tex;
wgpu::Extent3D size;
TextureUpload(wgpu::TextureDataLayout layout, wgpu::ImageCopyTexture tex, wgpu::Extent3D size) noexcept
TextureUpload(wgpu::TexelCopyBufferLayout layout, wgpu::TexelCopyTextureInfo tex, wgpu::Extent3D size) noexcept
: layout(layout), tex(tex), size(size) {}
};
extern std::vector<TextureUpload> g_textureUploads;

View File

@ -1,17 +1,19 @@
#include "imgui.hpp"
#include "webgpu/gpu.hpp"
#include <cstddef>
#include <string>
#include <vector>
#include <webgpu/webgpu_cpp.h>
#include "internal.hpp"
#include "webgpu/gpu.hpp"
#include "window.hpp"
#include <SDL3/SDL.h>
#include <webgpu/webgpu.h>
#define IMGUI_IMPL_WEBGPU_BACKEND_DAWN
#include "../imgui/backends/imgui_impl_sdl3.cpp" // NOLINT(bugprone-suspicious-include)
#include "../imgui/backends/imgui_impl_sdlrenderer3.cpp" // NOLINT(bugprone-suspicious-include)
// #include "../imgui/backends/imgui_impl_wgpu.cpp" // NOLINT(bugprone-suspicious-include)
// TODO: Transition back to imgui-provided backend when it uses WGSL
#include "imgui_impl_wgpu.cpp" // NOLINT(bugprone-suspicious-include)
#include "../imgui/backends/imgui_impl_wgpu.cpp" // NOLINT(bugprone-suspicious-include)
namespace aurora::imgui {
static float g_scale;
@ -37,14 +39,16 @@ void initialize() noexcept {
ImGui_ImplSDL3_Init(window::get_sdl_window(), renderer, NULL);
#ifdef __APPLE__
// Disable MouseCanUseGlobalState for scaling purposes
ImGui_ImplSDL2_GetBackendData()->MouseCanUseGlobalState = false;
ImGui_ImplSDL3_GetBackendData()->MouseCanUseGlobalState = false;
#endif
g_useSdlRenderer = renderer != nullptr;
if (g_useSdlRenderer) {
ImGui_ImplSDLRenderer3_Init(renderer);
} else {
const auto format = webgpu::g_graphicsConfig.swapChainDescriptor.format;
ImGui_ImplWGPU_Init(webgpu::g_device.Get(), 1, static_cast<WGPUTextureFormat>(format));
ImGui_ImplWGPU_InitInfo info;
info.Device = webgpu::g_device.Get();
info.RenderTargetFormat = static_cast<WGPUTextureFormat>(webgpu::g_graphicsConfig.surfaceConfiguration.format);
ImGui_ImplWGPU_Init(&info);
}
}
@ -64,14 +68,15 @@ void shutdown() noexcept {
}
void process_event(const SDL_Event& event) noexcept {
#ifdef __APPLE__
if (event.type == SDL_MOUSEMOTION) {
auto& io = ImGui::GetIO();
// Scale up mouse coordinates
io.AddMousePosEvent(static_cast<float>(event.motion.x) * g_scale, static_cast<float>(event.motion.y) * g_scale);
if (event.type == SDL_EVENT_MOUSE_MOTION) {
SDL_Event scaledEvent = event;
scaledEvent.motion.x *= g_scale;
scaledEvent.motion.y *= g_scale;
scaledEvent.motion.xrel *= g_scale;
scaledEvent.motion.yrel *= g_scale;
ImGui_ImplSDL3_ProcessEvent(&scaledEvent);
return;
}
#endif
ImGui_ImplSDL3_ProcessEvent(&event);
}
@ -82,8 +87,6 @@ void new_frame(const AuroraWindowSize& size) noexcept {
} else {
if (g_scale != size.scale) {
if (g_scale > 0.f) {
// TODO wgpu backend bug: doesn't clear bind groups on invalidate
g_resources.ImageBindGroups.Clear();
ImGui_ImplWGPU_CreateDeviceObjects();
}
g_scale = size.scale;
@ -149,17 +152,17 @@ ImTextureID add_texture(uint32_t width, uint32_t height, const uint8_t* data) no
auto texture = webgpu::g_device.CreateTexture(&textureDescriptor);
auto textureView = texture.CreateView(&textureViewDescriptor);
{
const wgpu::ImageCopyTexture dstView{
const wgpu::TexelCopyTextureInfo dstView{
.texture = texture,
};
const wgpu::TextureDataLayout dataLayout{
const wgpu::TexelCopyBufferLayout dataLayout{
.bytesPerRow = 4 * width,
.rowsPerImage = height,
};
webgpu::g_queue.WriteTexture(&dstView, data, width * height * 4, &dataLayout, &size);
}
g_wgpuTextures.push_back(texture);
return reinterpret_cast<ImTextureID>(textureView.Release());
return reinterpret_cast<ImTextureID>(textureView.MoveToCHandle());
}
} // namespace aurora::imgui

View File

@ -1,642 +0,0 @@
// dear imgui: Renderer for WebGPU
// This needs to be used along with a Platform Binding (e.g. GLFW)
// (Please note that WebGPU is currently experimental, will not run on non-beta browsers, and may break.)
// Implemented features:
// [X] Renderer: User texture binding. Use 'WGPUTextureView' as ImTextureID. Read the FAQ about ImTextureID!
// [X] Renderer: Support for large meshes (64k+ vertices) with 16-bit indices.
// You can use unmodified imgui_impl_* files in your project. See examples/ folder for examples of using this.
// Prefer including the entire imgui/ repository into your project (either as a copy or as a submodule), and only build the backends you need.
// If you are new to Dear ImGui, read documentation from the docs/ folder + read the top of imgui.cpp.
// Read online: https://github.com/ocornut/imgui/tree/master/docs
// CHANGELOG
// (minor and older changes stripped away, please see git history for details)
// 2022-11-24: Fixed validation error with default depth buffer settings.
// 2022-11-10: Fixed rendering when a depth buffer is enabled. Added 'WGPUTextureFormat depth_format' parameter to ImGui_ImplWGPU_Init().
// 2022-10-11: Using 'nullptr' instead of 'NULL' as per our switch to C++11.
// 2021-11-29: Passing explicit buffer sizes to wgpuRenderPassEncoderSetVertexBuffer()/wgpuRenderPassEncoderSetIndexBuffer().
// 2021-08-24: Fixed for latest specs.
// 2021-05-24: Add support for draw_data->FramebufferScale.
// 2021-05-19: Replaced direct access to ImDrawCmd::TextureId with a call to ImDrawCmd::GetTexID(). (will become a requirement)
// 2021-05-16: Update to latest WebGPU specs (compatible with Emscripten 2.0.20 and Chrome Canary 92).
// 2021-02-18: Change blending equation to preserve alpha in output buffer.
// 2021-01-28: Initial version.
#include "imgui.h"
#include "imgui_impl_wgpu.h"
#include <limits.h>
#include <webgpu/webgpu.h>
// Dear ImGui prototypes from imgui_internal.h
extern ImGuiID ImHashData(const void* data_p, size_t data_size, ImU32 seed = 0);
// WebGPU data
static WGPUDevice g_wgpuDevice = nullptr;
static WGPUQueue g_defaultQueue = nullptr;
static WGPUTextureFormat g_renderTargetFormat = WGPUTextureFormat_Undefined;
static WGPUTextureFormat g_depthStencilFormat = WGPUTextureFormat_Undefined;
static WGPURenderPipeline g_pipelineState = nullptr;
struct RenderResources
{
WGPUTexture FontTexture; // Font texture
WGPUTextureView FontTextureView; // Texture view for font texture
WGPUSampler Sampler; // Sampler for the font texture
WGPUBuffer Uniforms; // Shader uniforms
WGPUBindGroup CommonBindGroup; // Resources bind-group to bind the common resources to pipeline
ImGuiStorage ImageBindGroups; // Resources bind-group to bind the font/image resources to pipeline (this is a key->value map)
WGPUBindGroup ImageBindGroup; // Default font-resource of Dear ImGui
WGPUBindGroupLayout ImageBindGroupLayout; // Cache layout used for the image bind group. Avoids allocating unnecessary JS objects when working with WebASM
};
static RenderResources g_resources;
struct FrameResources
{
WGPUBuffer IndexBuffer;
WGPUBuffer VertexBuffer;
ImDrawIdx* IndexBufferHost;
ImDrawVert* VertexBufferHost;
int IndexBufferSize;
int VertexBufferSize;
};
static FrameResources* g_pFrameResources = nullptr;
static unsigned int g_numFramesInFlight = 0;
static unsigned int g_frameIndex = UINT_MAX;
struct Uniforms
{
float MVP[4][4];
};
//-----------------------------------------------------------------------------
// SHADERS
//-----------------------------------------------------------------------------
static const char* __wgsl_shader = R"(
struct Uniforms
{
mvp: mat4x4<f32>,
}
@group(0) @binding(0) var<uniform> uniforms: Uniforms;
@group(0) @binding(1) var samp: sampler;
@group(1) @binding(0) var tex: texture_2d<f32>;
struct VertexOutput
{
@location(0) color: vec4<f32>,
@location(1) uv: vec2<f32>,
@builtin(position) pos: vec4<f32>,
}
@vertex
fn vs_main(
@location(0) pos: vec2<f32>,
@location(1) uv: vec2<f32>,
@location(2) color: vec4<f32>,
) -> VertexOutput
{
var out: VertexOutput;
out.color = color;
out.uv = uv;
out.pos = uniforms.mvp * vec4<f32>(pos.x, pos.y, 0.0, 1.0);
return out;
}
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32>
{
return in.color * textureSample(tex, samp, in.uv.xy);
}
)";
static void SafeRelease(ImDrawIdx*& res)
{
if (res)
delete[] res;
res = nullptr;
}
static void SafeRelease(ImDrawVert*& res)
{
if (res)
delete[] res;
res = nullptr;
}
static void SafeRelease(WGPUBindGroupLayout& res)
{
if (res)
wgpuBindGroupLayoutRelease(res);
res = nullptr;
}
static void SafeRelease(WGPUBindGroup& res)
{
if (res)
wgpuBindGroupRelease(res);
res = nullptr;
}
static void SafeRelease(WGPUBuffer& res)
{
if (res)
wgpuBufferRelease(res);
res = nullptr;
}
static void SafeRelease(WGPURenderPipeline& res)
{
if (res)
wgpuRenderPipelineRelease(res);
res = nullptr;
}
static void SafeRelease(WGPUSampler& res)
{
if (res)
wgpuSamplerRelease(res);
res = nullptr;
}
static void SafeRelease(WGPUShaderModule& res)
{
if (res)
wgpuShaderModuleRelease(res);
res = nullptr;
}
static void SafeRelease(WGPUTextureView& res)
{
if (res)
wgpuTextureViewRelease(res);
res = nullptr;
}
static void SafeRelease(WGPUTexture& res)
{
if (res)
wgpuTextureRelease(res);
res = nullptr;
}
static void SafeRelease(RenderResources& res)
{
SafeRelease(res.FontTexture);
SafeRelease(res.FontTextureView);
SafeRelease(res.Sampler);
SafeRelease(res.Uniforms);
SafeRelease(res.CommonBindGroup);
SafeRelease(res.ImageBindGroup);
SafeRelease(res.ImageBindGroupLayout);
};
static void SafeRelease(FrameResources& res)
{
SafeRelease(res.IndexBuffer);
SafeRelease(res.VertexBuffer);
SafeRelease(res.IndexBufferHost);
SafeRelease(res.VertexBufferHost);
}
static WGPUShaderModule ImGui_ImplWGPU_CreateShaderModule(const char* source)
{
WGPUShaderModuleWGSLDescriptor wgsl_desc = {};
wgsl_desc.chain.sType = WGPUSType_ShaderModuleWGSLDescriptor;
wgsl_desc.code = source;
WGPUShaderModuleDescriptor desc = {};
desc.nextInChain = reinterpret_cast<WGPUChainedStruct*>(&wgsl_desc);
return wgpuDeviceCreateShaderModule(g_wgpuDevice, &desc);
}
static WGPUBindGroup ImGui_ImplWGPU_CreateImageBindGroup(WGPUBindGroupLayout layout, WGPUTextureView texture)
{
WGPUBindGroupEntry image_bg_entries[] = { { nullptr, 0, 0, 0, 0, 0, texture } };
WGPUBindGroupDescriptor image_bg_descriptor = {};
image_bg_descriptor.layout = layout;
image_bg_descriptor.entryCount = sizeof(image_bg_entries) / sizeof(WGPUBindGroupEntry);
image_bg_descriptor.entries = image_bg_entries;
return wgpuDeviceCreateBindGroup(g_wgpuDevice, &image_bg_descriptor);
}
static void ImGui_ImplWGPU_SetupRenderState(ImDrawData* draw_data, WGPURenderPassEncoder ctx, FrameResources* fr)
{
// Setup orthographic projection matrix into our constant buffer
// Our visible imgui space lies from draw_data->DisplayPos (top left) to draw_data->DisplayPos+data_data->DisplaySize (bottom right).
{
float L = draw_data->DisplayPos.x;
float R = draw_data->DisplayPos.x + draw_data->DisplaySize.x;
float T = draw_data->DisplayPos.y;
float B = draw_data->DisplayPos.y + draw_data->DisplaySize.y;
float mvp[4][4] =
{
{ 2.0f/(R-L), 0.0f, 0.0f, 0.0f },
{ 0.0f, 2.0f/(T-B), 0.0f, 0.0f },
{ 0.0f, 0.0f, 0.5f, 0.0f },
{ (R+L)/(L-R), (T+B)/(B-T), 0.5f, 1.0f },
};
wgpuQueueWriteBuffer(g_defaultQueue, g_resources.Uniforms, 0, mvp, sizeof(mvp));
}
// Setup viewport
wgpuRenderPassEncoderSetViewport(ctx, 0, 0, draw_data->FramebufferScale.x * draw_data->DisplaySize.x, draw_data->FramebufferScale.y * draw_data->DisplaySize.y, 0, 1);
// Bind shader and vertex buffers
wgpuRenderPassEncoderSetVertexBuffer(ctx, 0, fr->VertexBuffer, 0, fr->VertexBufferSize * sizeof(ImDrawVert));
wgpuRenderPassEncoderSetIndexBuffer(ctx, fr->IndexBuffer, sizeof(ImDrawIdx) == 2 ? WGPUIndexFormat_Uint16 : WGPUIndexFormat_Uint32, 0, fr->IndexBufferSize * sizeof(ImDrawIdx));
wgpuRenderPassEncoderSetPipeline(ctx, g_pipelineState);
wgpuRenderPassEncoderSetBindGroup(ctx, 0, g_resources.CommonBindGroup, 0, nullptr);
// Setup blend factor
WGPUColor blend_color = { 0.f, 0.f, 0.f, 0.f };
wgpuRenderPassEncoderSetBlendConstant(ctx, &blend_color);
}
// Render function
// (this used to be set in io.RenderDrawListsFn and called by ImGui::Render(), but you can now call this directly from your main loop)
void ImGui_ImplWGPU_RenderDrawData(ImDrawData* draw_data, WGPURenderPassEncoder pass_encoder)
{
// Avoid rendering when minimized
if (draw_data->DisplaySize.x <= 0.0f || draw_data->DisplaySize.y <= 0.0f)
return;
// FIXME: Assuming that this only gets called once per frame!
// If not, we can't just re-allocate the IB or VB, we'll have to do a proper allocator.
g_frameIndex = g_frameIndex + 1;
FrameResources* fr = &g_pFrameResources[g_frameIndex % g_numFramesInFlight];
// Create and grow vertex/index buffers if needed
if (fr->VertexBuffer == nullptr || fr->VertexBufferSize < draw_data->TotalVtxCount)
{
if (fr->VertexBuffer)
{
wgpuBufferDestroy(fr->VertexBuffer);
wgpuBufferRelease(fr->VertexBuffer);
}
SafeRelease(fr->VertexBufferHost);
fr->VertexBufferSize = draw_data->TotalVtxCount + 5000;
WGPUBufferDescriptor vb_desc =
{
nullptr,
"Dear ImGui Vertex buffer",
WGPUBufferUsage_CopyDst | WGPUBufferUsage_Vertex,
fr->VertexBufferSize * sizeof(ImDrawVert),
false
};
fr->VertexBuffer = wgpuDeviceCreateBuffer(g_wgpuDevice, &vb_desc);
if (!fr->VertexBuffer)
return;
fr->VertexBufferHost = new ImDrawVert[fr->VertexBufferSize];
}
if (fr->IndexBuffer == nullptr || fr->IndexBufferSize < draw_data->TotalIdxCount)
{
if (fr->IndexBuffer)
{
wgpuBufferDestroy(fr->IndexBuffer);
wgpuBufferRelease(fr->IndexBuffer);
}
SafeRelease(fr->IndexBufferHost);
fr->IndexBufferSize = draw_data->TotalIdxCount + 10000;
WGPUBufferDescriptor ib_desc =
{
nullptr,
"Dear ImGui Index buffer",
WGPUBufferUsage_CopyDst | WGPUBufferUsage_Index,
fr->IndexBufferSize * sizeof(ImDrawIdx),
false
};
fr->IndexBuffer = wgpuDeviceCreateBuffer(g_wgpuDevice, &ib_desc);
if (!fr->IndexBuffer)
return;
fr->IndexBufferHost = new ImDrawIdx[fr->IndexBufferSize];
}
// Upload vertex/index data into a single contiguous GPU buffer
ImDrawVert* vtx_dst = (ImDrawVert*)fr->VertexBufferHost;
ImDrawIdx* idx_dst = (ImDrawIdx*)fr->IndexBufferHost;
for (int n = 0; n < draw_data->CmdListsCount; n++)
{
const ImDrawList* cmd_list = draw_data->CmdLists[n];
memcpy(vtx_dst, cmd_list->VtxBuffer.Data, cmd_list->VtxBuffer.Size * sizeof(ImDrawVert));
memcpy(idx_dst, cmd_list->IdxBuffer.Data, cmd_list->IdxBuffer.Size * sizeof(ImDrawIdx));
vtx_dst += cmd_list->VtxBuffer.Size;
idx_dst += cmd_list->IdxBuffer.Size;
}
int64_t vb_write_size = ((char*)vtx_dst - (char*)fr->VertexBufferHost + 3) & ~3;
int64_t ib_write_size = ((char*)idx_dst - (char*)fr->IndexBufferHost + 3) & ~3;
wgpuQueueWriteBuffer(g_defaultQueue, fr->VertexBuffer, 0, fr->VertexBufferHost, vb_write_size);
wgpuQueueWriteBuffer(g_defaultQueue, fr->IndexBuffer, 0, fr->IndexBufferHost, ib_write_size);
// Setup desired render state
ImGui_ImplWGPU_SetupRenderState(draw_data, pass_encoder, fr);
// Render command lists
// (Because we merged all buffers into a single one, we maintain our own offset into them)
int global_vtx_offset = 0;
int global_idx_offset = 0;
ImVec2 clip_scale = draw_data->FramebufferScale;
ImVec2 clip_off = draw_data->DisplayPos;
for (int n = 0; n < draw_data->CmdListsCount; n++)
{
const ImDrawList* cmd_list = draw_data->CmdLists[n];
for (int cmd_i = 0; cmd_i < cmd_list->CmdBuffer.Size; cmd_i++)
{
const ImDrawCmd* pcmd = &cmd_list->CmdBuffer[cmd_i];
if (pcmd->UserCallback != nullptr)
{
// User callback, registered via ImDrawList::AddCallback()
// (ImDrawCallback_ResetRenderState is a special callback value used by the user to request the renderer to reset render state.)
if (pcmd->UserCallback == ImDrawCallback_ResetRenderState)
ImGui_ImplWGPU_SetupRenderState(draw_data, pass_encoder, fr);
else
pcmd->UserCallback(cmd_list, pcmd);
}
else
{
// Bind custom texture
ImTextureID tex_id = pcmd->GetTexID();
ImGuiID tex_id_hash = ImHashData(&tex_id, sizeof(tex_id));
auto bind_group = g_resources.ImageBindGroups.GetVoidPtr(tex_id_hash);
if (bind_group)
{
wgpuRenderPassEncoderSetBindGroup(pass_encoder, 1, (WGPUBindGroup)bind_group, 0, nullptr);
}
else
{
WGPUBindGroup image_bind_group = ImGui_ImplWGPU_CreateImageBindGroup(g_resources.ImageBindGroupLayout, (WGPUTextureView)tex_id);
g_resources.ImageBindGroups.SetVoidPtr(tex_id_hash, image_bind_group);
wgpuRenderPassEncoderSetBindGroup(pass_encoder, 1, image_bind_group, 0, nullptr);
}
// Project scissor/clipping rectangles into framebuffer space
ImVec2 clip_min((pcmd->ClipRect.x - clip_off.x) * clip_scale.x, (pcmd->ClipRect.y - clip_off.y) * clip_scale.y);
ImVec2 clip_max((pcmd->ClipRect.z - clip_off.x) * clip_scale.x, (pcmd->ClipRect.w - clip_off.y) * clip_scale.y);
if (clip_max.x <= clip_min.x || clip_max.y <= clip_min.y)
continue;
// Apply scissor/clipping rectangle, Draw
wgpuRenderPassEncoderSetScissorRect(pass_encoder, (uint32_t)clip_min.x, (uint32_t)clip_min.y, (uint32_t)(clip_max.x - clip_min.x), (uint32_t)(clip_max.y - clip_min.y));
wgpuRenderPassEncoderDrawIndexed(pass_encoder, pcmd->ElemCount, 1, pcmd->IdxOffset + global_idx_offset, pcmd->VtxOffset + global_vtx_offset, 0);
}
}
global_idx_offset += cmd_list->IdxBuffer.Size;
global_vtx_offset += cmd_list->VtxBuffer.Size;
}
}
static void ImGui_ImplWGPU_CreateFontsTexture()
{
// Build texture atlas
ImGuiIO& io = ImGui::GetIO();
unsigned char* pixels;
int width, height, size_pp;
io.Fonts->GetTexDataAsRGBA32(&pixels, &width, &height, &size_pp);
// Upload texture to graphics system
{
WGPUTextureDescriptor tex_desc = {};
tex_desc.label = "Dear ImGui Font Texture";
tex_desc.dimension = WGPUTextureDimension_2D;
tex_desc.size.width = width;
tex_desc.size.height = height;
tex_desc.size.depthOrArrayLayers = 1;
tex_desc.sampleCount = 1;
tex_desc.format = WGPUTextureFormat_RGBA8Unorm;
tex_desc.mipLevelCount = 1;
tex_desc.usage = WGPUTextureUsage_CopyDst | WGPUTextureUsage_TextureBinding;
g_resources.FontTexture = wgpuDeviceCreateTexture(g_wgpuDevice, &tex_desc);
WGPUTextureViewDescriptor tex_view_desc = {};
tex_view_desc.format = WGPUTextureFormat_RGBA8Unorm;
tex_view_desc.dimension = WGPUTextureViewDimension_2D;
tex_view_desc.baseMipLevel = 0;
tex_view_desc.mipLevelCount = 1;
tex_view_desc.baseArrayLayer = 0;
tex_view_desc.arrayLayerCount = 1;
tex_view_desc.aspect = WGPUTextureAspect_All;
g_resources.FontTextureView = wgpuTextureCreateView(g_resources.FontTexture, &tex_view_desc);
}
// Upload texture data
{
WGPUImageCopyTexture dst_view = {};
dst_view.texture = g_resources.FontTexture;
dst_view.mipLevel = 0;
dst_view.origin = { 0, 0, 0 };
dst_view.aspect = WGPUTextureAspect_All;
WGPUTextureDataLayout layout = {};
layout.offset = 0;
layout.bytesPerRow = width * size_pp;
layout.rowsPerImage = height;
WGPUExtent3D size = { (uint32_t)width, (uint32_t)height, 1 };
wgpuQueueWriteTexture(g_defaultQueue, &dst_view, pixels, (uint32_t)(width * size_pp * height), &layout, &size);
}
// Create the associated sampler
// (Bilinear sampling is required by default. Set 'io.Fonts->Flags |= ImFontAtlasFlags_NoBakedLines' or 'style.AntiAliasedLinesUseTex = false' to allow point/nearest sampling)
{
WGPUSamplerDescriptor sampler_desc = {};
sampler_desc.minFilter = WGPUFilterMode_Linear;
sampler_desc.magFilter = WGPUFilterMode_Linear;
sampler_desc.mipmapFilter = WGPUMipmapFilterMode_Linear;
sampler_desc.addressModeU = WGPUAddressMode_Repeat;
sampler_desc.addressModeV = WGPUAddressMode_Repeat;
sampler_desc.addressModeW = WGPUAddressMode_Repeat;
sampler_desc.maxAnisotropy = 1;
g_resources.Sampler = wgpuDeviceCreateSampler(g_wgpuDevice, &sampler_desc);
}
// Store our identifier
static_assert(sizeof(ImTextureID) >= sizeof(g_resources.FontTexture), "Can't pack descriptor handle into TexID, 32-bit not supported yet.");
io.Fonts->SetTexID((ImTextureID)g_resources.FontTextureView);
}
static void ImGui_ImplWGPU_CreateUniformBuffer()
{
WGPUBufferDescriptor ub_desc =
{
nullptr,
"Dear ImGui Uniform buffer",
WGPUBufferUsage_CopyDst | WGPUBufferUsage_Uniform,
sizeof(Uniforms),
false
};
g_resources.Uniforms = wgpuDeviceCreateBuffer(g_wgpuDevice, &ub_desc);
}
bool ImGui_ImplWGPU_CreateDeviceObjects()
{
if (!g_wgpuDevice)
return false;
if (g_pipelineState)
ImGui_ImplWGPU_InvalidateDeviceObjects();
// Create render pipeline
WGPURenderPipelineDescriptor graphics_pipeline_desc = {};
graphics_pipeline_desc.primitive.topology = WGPUPrimitiveTopology_TriangleList;
graphics_pipeline_desc.primitive.stripIndexFormat = WGPUIndexFormat_Undefined;
graphics_pipeline_desc.primitive.frontFace = WGPUFrontFace_CW;
graphics_pipeline_desc.primitive.cullMode = WGPUCullMode_None;
graphics_pipeline_desc.multisample.count = 1;
graphics_pipeline_desc.multisample.mask = UINT_MAX;
graphics_pipeline_desc.multisample.alphaToCoverageEnabled = false;
graphics_pipeline_desc.layout = nullptr; // Use automatic layout generation
// Create the shader module
WGPUShaderModule shader_module = ImGui_ImplWGPU_CreateShaderModule(__wgsl_shader);
graphics_pipeline_desc.vertex.module = shader_module;
graphics_pipeline_desc.vertex.entryPoint = "vs_main";
// Vertex input configuration
WGPUVertexAttribute attribute_desc[] =
{
{ WGPUVertexFormat_Float32x2, (uint64_t)IM_OFFSETOF(ImDrawVert, pos), 0 },
{ WGPUVertexFormat_Float32x2, (uint64_t)IM_OFFSETOF(ImDrawVert, uv), 1 },
{ WGPUVertexFormat_Unorm8x4, (uint64_t)IM_OFFSETOF(ImDrawVert, col), 2 },
};
WGPUVertexBufferLayout buffer_layouts[1];
buffer_layouts[0].arrayStride = sizeof(ImDrawVert);
buffer_layouts[0].stepMode = WGPUVertexStepMode_Vertex;
buffer_layouts[0].attributeCount = 3;
buffer_layouts[0].attributes = attribute_desc;
graphics_pipeline_desc.vertex.bufferCount = 1;
graphics_pipeline_desc.vertex.buffers = buffer_layouts;
// Create the blending setup
WGPUBlendState blend_state = {};
blend_state.alpha.operation = WGPUBlendOperation_Add;
blend_state.alpha.srcFactor = WGPUBlendFactor_One;
blend_state.alpha.dstFactor = WGPUBlendFactor_OneMinusSrcAlpha;
blend_state.color.operation = WGPUBlendOperation_Add;
blend_state.color.srcFactor = WGPUBlendFactor_SrcAlpha;
blend_state.color.dstFactor = WGPUBlendFactor_OneMinusSrcAlpha;
WGPUColorTargetState color_state = {};
color_state.format = g_renderTargetFormat;
color_state.blend = &blend_state;
color_state.writeMask = WGPUColorWriteMask_All;
WGPUFragmentState fragment_state = {};
fragment_state.module = shader_module;
fragment_state.entryPoint = "fs_main";
fragment_state.targetCount = 1;
fragment_state.targets = &color_state;
graphics_pipeline_desc.fragment = &fragment_state;
// Create depth-stencil State
WGPUDepthStencilState depth_stencil_state = {};
depth_stencil_state.format = g_depthStencilFormat;
depth_stencil_state.depthWriteEnabled = false;
depth_stencil_state.depthCompare = WGPUCompareFunction_Always;
depth_stencil_state.stencilFront.compare = WGPUCompareFunction_Always;
depth_stencil_state.stencilBack.compare = WGPUCompareFunction_Always;
// Configure disabled depth-stencil state
graphics_pipeline_desc.depthStencil = g_depthStencilFormat == WGPUTextureFormat_Undefined ? nullptr : &depth_stencil_state;
g_pipelineState = wgpuDeviceCreateRenderPipeline(g_wgpuDevice, &graphics_pipeline_desc);
ImGui_ImplWGPU_CreateFontsTexture();
ImGui_ImplWGPU_CreateUniformBuffer();
// Create resource bind group
WGPUBindGroupLayout bg_layouts[2];
bg_layouts[0] = wgpuRenderPipelineGetBindGroupLayout(g_pipelineState, 0);
bg_layouts[1] = wgpuRenderPipelineGetBindGroupLayout(g_pipelineState, 1);
WGPUBindGroupEntry common_bg_entries[] =
{
{ nullptr, 0, g_resources.Uniforms, 0, sizeof(Uniforms), 0, 0 },
{ nullptr, 1, 0, 0, 0, g_resources.Sampler, 0 },
};
WGPUBindGroupDescriptor common_bg_descriptor = {};
common_bg_descriptor.layout = bg_layouts[0];
common_bg_descriptor.entryCount = sizeof(common_bg_entries) / sizeof(WGPUBindGroupEntry);
common_bg_descriptor.entries = common_bg_entries;
g_resources.CommonBindGroup = wgpuDeviceCreateBindGroup(g_wgpuDevice, &common_bg_descriptor);
WGPUBindGroup image_bind_group = ImGui_ImplWGPU_CreateImageBindGroup(bg_layouts[1], g_resources.FontTextureView);
g_resources.ImageBindGroup = image_bind_group;
g_resources.ImageBindGroupLayout = bg_layouts[1];
g_resources.ImageBindGroups.SetVoidPtr(ImHashData(&g_resources.FontTextureView, sizeof(ImTextureID)), image_bind_group);
SafeRelease(shader_module);
SafeRelease(bg_layouts[0]);
return true;
}
void ImGui_ImplWGPU_InvalidateDeviceObjects()
{
if (!g_wgpuDevice)
return;
SafeRelease(g_pipelineState);
SafeRelease(g_resources);
ImGuiIO& io = ImGui::GetIO();
io.Fonts->SetTexID(0); // We copied g_pFontTextureView to io.Fonts->TexID so let's clear that as well.
for (unsigned int i = 0; i < g_numFramesInFlight; i++)
SafeRelease(g_pFrameResources[i]);
}
bool ImGui_ImplWGPU_Init(WGPUDevice device, int num_frames_in_flight, WGPUTextureFormat rt_format, WGPUTextureFormat depth_format)
{
// Setup backend capabilities flags
ImGuiIO& io = ImGui::GetIO();
io.BackendRendererName = "imgui_impl_webgpu";
io.BackendFlags |= ImGuiBackendFlags_RendererHasVtxOffset; // We can honor the ImDrawCmd::VtxOffset field, allowing for large meshes.
g_wgpuDevice = device;
g_defaultQueue = wgpuDeviceGetQueue(g_wgpuDevice);
g_renderTargetFormat = rt_format;
g_depthStencilFormat = depth_format;
g_pFrameResources = new FrameResources[num_frames_in_flight];
g_numFramesInFlight = num_frames_in_flight;
g_frameIndex = UINT_MAX;
g_resources.FontTexture = nullptr;
g_resources.FontTextureView = nullptr;
g_resources.Sampler = nullptr;
g_resources.Uniforms = nullptr;
g_resources.CommonBindGroup = nullptr;
g_resources.ImageBindGroups.Data.reserve(100);
g_resources.ImageBindGroup = nullptr;
g_resources.ImageBindGroupLayout = nullptr;
// Create buffers with a default size (they will later be grown as needed)
for (int i = 0; i < num_frames_in_flight; i++)
{
FrameResources* fr = &g_pFrameResources[i];
fr->IndexBuffer = nullptr;
fr->VertexBuffer = nullptr;
fr->IndexBufferHost = nullptr;
fr->VertexBufferHost = nullptr;
fr->IndexBufferSize = 10000;
fr->VertexBufferSize = 5000;
}
return true;
}
void ImGui_ImplWGPU_Shutdown()
{
ImGui_ImplWGPU_InvalidateDeviceObjects();
delete[] g_pFrameResources;
g_pFrameResources = nullptr;
wgpuQueueRelease(g_defaultQueue);
g_wgpuDevice = nullptr;
g_numFramesInFlight = 0;
g_frameIndex = UINT_MAX;
}
void ImGui_ImplWGPU_NewFrame()
{
if (!g_pipelineState)
ImGui_ImplWGPU_CreateDeviceObjects();
}

View File

@ -1,25 +0,0 @@
// dear imgui: Renderer for WebGPU
// This needs to be used along with a Platform Binding (e.g. GLFW)
// (Please note that WebGPU is currently experimental, will not run on non-beta browsers, and may break.)
// Implemented features:
// [X] Renderer: User texture binding. Use 'WGPUTextureView' as ImTextureID. Read the FAQ about ImTextureID!
// [X] Renderer: Support for large meshes (64k+ vertices) with 16-bit indices.
// You can use unmodified imgui_impl_* files in your project. See examples/ folder for examples of using this.
// Prefer including the entire imgui/ repository into your project (either as a copy or as a submodule), and only build the backends you need.
// If you are new to Dear ImGui, read documentation from the docs/ folder + read the top of imgui.cpp.
// Read online: https://github.com/ocornut/imgui/tree/master/docs
#pragma once
#include "imgui.h" // IMGUI_IMPL_API
#include <webgpu/webgpu.h>
IMGUI_IMPL_API bool ImGui_ImplWGPU_Init(WGPUDevice device, int num_frames_in_flight, WGPUTextureFormat rt_format, WGPUTextureFormat depth_format = WGPUTextureFormat_Undefined);
IMGUI_IMPL_API void ImGui_ImplWGPU_Shutdown();
IMGUI_IMPL_API void ImGui_ImplWGPU_NewFrame();
IMGUI_IMPL_API void ImGui_ImplWGPU_RenderDrawData(ImDrawData* draw_data, WGPURenderPassEncoder pass_encoder);
// Use if you want to reset your rendering device without losing Dear ImGui state.
IMGUI_IMPL_API void ImGui_ImplWGPU_InvalidateDeviceObjects();
IMGUI_IMPL_API bool ImGui_ImplWGPU_CreateDeviceObjects();

View File

@ -90,8 +90,7 @@ static std::optional<std::string> remap_controller_layout(std::string mapping) {
if (idx > 0) {
newMapping.push_back(',');
}
auto str = value.operator std::string();
newMapping.append(str);
newMapping.append(value);
} else {
const auto split = absl::StrSplit(value, absl::MaxSplits(':', 2));
auto iter = split.begin();
@ -123,9 +122,9 @@ static std::optional<std::string> remap_controller_layout(std::string mapping) {
}
for (auto [k, v] : entries) {
newMapping.push_back(',');
newMapping.append(k.operator std::string());
newMapping.append(k);
newMapping.push_back(':');
newMapping.append(v.operator std::string());
newMapping.append(v);
}
return newMapping;
}

View File

@ -1,18 +1,22 @@
#include "gpu.hpp"
#include <array>
#include <cstddef>
#include <cstdint>
#include <utility>
#include <vector>
#include <aurora/aurora.h>
#include "../window.hpp"
#include "../internal.hpp"
#include <SDL3/SDL.h>
#include <magic_enum.hpp>
#include <memory>
#include <algorithm>
#include <webgpu/webgpu.h>
#include <webgpu/webgpu_cpp.h>
#include "../internal.hpp"
#include "../window.hpp"
#ifdef WEBGPU_DAWN
#include <dawn/native/DawnNative.h>
#include "../dawn/BackendBinding.hpp"
#include <dawn/native/DawnNative.h>
#endif
namespace aurora::webgpu {
@ -20,7 +24,7 @@ static Module Log("aurora::gpu");
wgpu::Device g_device;
wgpu::Queue g_queue;
wgpu::SwapChain g_swapChain;
wgpu::Surface g_surface;
wgpu::BackendType g_backendType;
GraphicsConfig g_graphicsConfig;
TextureWithSampler g_frameBuffer;
@ -32,23 +36,17 @@ static wgpu::BindGroupLayout g_CopyBindGroupLayout;
wgpu::RenderPipeline g_CopyPipeline;
wgpu::BindGroup g_CopyBindGroup;
#ifdef WEBGPU_DAWN
static std::unique_ptr<dawn::native::Instance> g_dawnInstance;
static dawn::native::Adapter g_adapter;
#else
static wgpu::Adapter g_adapter;
#endif
wgpu::Instance g_instance;
static wgpu::Surface g_surface;
static wgpu::AdapterProperties g_adapterProperties;
static wgpu::AdapterInfo g_adapterInfo;
TextureWithSampler create_render_texture(bool multisampled) {
const wgpu::Extent3D size{
.width = g_graphicsConfig.swapChainDescriptor.width,
.height = g_graphicsConfig.swapChainDescriptor.height,
.width = g_graphicsConfig.surfaceConfiguration.width,
.height = g_graphicsConfig.surfaceConfiguration.height,
.depthOrArrayLayers = 1,
};
const auto format = g_graphicsConfig.swapChainDescriptor.format;
const auto format = g_graphicsConfig.surfaceConfiguration.format;
uint32_t sampleCount = 1;
if (multisampled) {
sampleCount = g_graphicsConfig.msaaSamples;
@ -98,8 +96,8 @@ TextureWithSampler create_render_texture(bool multisampled) {
static TextureWithSampler create_depth_texture() {
const wgpu::Extent3D size{
.width = g_graphicsConfig.swapChainDescriptor.width,
.height = g_graphicsConfig.swapChainDescriptor.height,
.width = g_graphicsConfig.surfaceConfiguration.width,
.height = g_graphicsConfig.surfaceConfiguration.height,
.depthOrArrayLayers = 1,
};
const auto format = g_graphicsConfig.depthFormat;
@ -188,7 +186,7 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
};
auto module = g_device.CreateShaderModule(&moduleDescriptor);
const std::array colorTargets{wgpu::ColorTargetState{
.format = g_graphicsConfig.swapChainDescriptor.format,
.format = g_graphicsConfig.surfaceConfiguration.format,
.writeMask = wgpu::ColorWriteMask::All,
}};
const wgpu::FragmentState fragmentState{
@ -266,52 +264,6 @@ void create_copy_bind_group() {
g_CopyBindGroup = g_device.CreateBindGroup(&bindGroupDescriptor);
}
static void log_callback(WGPULoggingType type, char const * message, void * userdata) {
AuroraLogLevel level = LOG_FATAL;
switch (type) {
case WGPULoggingType_Verbose:
level = LOG_DEBUG;
break;
case WGPULoggingType_Info:
level = LOG_INFO;
break;
case WGPULoggingType_Warning:
level = LOG_WARNING;
break;
case WGPULoggingType_Error:
level = LOG_ERROR;
break;
default:
break;
}
Log.report(level, FMT_STRING("WebGPU message: {}"), message);
}
static void error_callback(WGPUErrorType type, char const* message, void* userdata) {
FATAL("WebGPU error {}: {}", static_cast<int>(type), message);
}
#ifndef WEBGPU_DAWN
static void adapter_callback(WGPURequestAdapterStatus status, WGPUAdapter adapter, char const* message,
void* userdata) {
if (status == WGPURequestAdapterStatus_Success) {
g_adapter = wgpu::Adapter::Acquire(adapter);
} else {
Log.report(LOG_WARNING, FMT_STRING("Adapter request failed with message: {}"), message);
}
*static_cast<bool*>(userdata) = true;
}
#endif
static void device_callback(WGPURequestDeviceStatus status, WGPUDevice device, char const* message, void* userdata) {
if (status == WGPURequestDeviceStatus_Success) {
g_device = wgpu::Device::Acquire(device);
} else {
Log.report(LOG_WARNING, FMT_STRING("Device request failed with message: {}"), message);
}
*static_cast<bool*>(userdata) = true;
}
static wgpu::BackendType to_wgpu_backend(AuroraBackend backend) {
switch (backend) {
case BACKEND_WEBGPU:
@ -334,17 +286,25 @@ static wgpu::BackendType to_wgpu_backend(AuroraBackend backend) {
}
bool initialize(AuroraBackend auroraBackend) {
#ifdef WEBGPU_DAWN
if (!g_dawnInstance) {
Log.report(LOG_INFO, FMT_STRING("Creating Dawn instance"));
g_dawnInstance = std::make_unique<dawn::native::Instance>();
}
#else
if (!g_instance) {
const wgpu::InstanceDescriptor instanceDescriptor{};
g_instance = {}; // TODO use wgpuCreateInstance when supported
}
Log.report(LOG_INFO, FMT_STRING("Creating WGPU instance"));
wgpu::InstanceDescriptor instanceDescriptor{
.capabilities =
{
.timedWaitAnyEnable = true,
},
};
#ifdef WEBGPU_DAWN
dawn::native::DawnInstanceDescriptor dawnInstanceDescriptor;
dawnInstanceDescriptor.backendValidationLevel = dawn::native::BackendValidationLevel::Disabled;
instanceDescriptor.nextInChain = &dawnInstanceDescriptor;
#endif
g_instance = wgpu::CreateInstance(&instanceDescriptor);
if (!g_instance) {
Log.report(LOG_ERROR, FMT_STRING("Failed to create WGPU instance"));
return false;
}
}
const wgpu::BackendType backend = to_wgpu_backend(auroraBackend);
#ifdef EMSCRIPTEN
if (backend != wgpu::BackendType::WebGPU) {
@ -358,120 +318,87 @@ bool initialize(AuroraBackend auroraBackend) {
g_dawnInstance->EnableBackendValidation(backend != WGPUBackendType::D3D12);
#endif
#ifdef WEBGPU_DAWN
SDL_Window* window = window::get_sdl_window();
if (!utils::DiscoverAdapter(g_dawnInstance.get(), window, backend)) {
return false;
}
{
std::vector<dawn::native::Adapter> adapters = g_dawnInstance->GetAdapters();
std::sort(adapters.begin(), adapters.end(), [&](const auto& a, const auto& b) {
wgpu::AdapterProperties propertiesA;
wgpu::AdapterProperties propertiesB;
a.GetProperties(&propertiesA);
b.GetProperties(&propertiesB);
constexpr std::array PreferredTypeOrder{
wgpu::AdapterType::DiscreteGPU,
wgpu::AdapterType::IntegratedGPU,
wgpu::AdapterType::CPU,
};
const auto typeItA = std::find(PreferredTypeOrder.begin(), PreferredTypeOrder.end(), propertiesA.adapterType);
const auto typeItB = std::find(PreferredTypeOrder.begin(), PreferredTypeOrder.end(), propertiesB.adapterType);
return typeItA < typeItB;
});
const auto adapterIt = std::find_if(adapters.begin(), adapters.end(), [=](const auto& adapter) -> bool {
wgpu::AdapterProperties properties;
adapter.GetProperties(&properties);
return properties.backendType == backend;
});
if (adapterIt == adapters.end()) {
return false;
}
g_adapter = *adapterIt;
}
const auto chainedDescriptor = utils::SetupWindowAndGetSurfaceDescriptor(window);
wgpu::SurfaceDescriptor surfaceDescriptor;
surfaceDescriptor.nextInChain = chainedDescriptor.get();
g_surface = g_instance.CreateSurface(&surfaceDescriptor);
ASSERT(g_surface, "Failed to initialize surface");
#else
#ifdef EMSCRIPTEN
const WGPUSurfaceDescriptorFromCanvasHTMLSelector canvasDescriptor{
.chain = {.sType = WGPUSType_SurfaceDescriptorFromCanvasHTMLSelector},
.selector = "#canvas",
};
const WGPUSurfaceDescriptor surfaceDescriptor{
.nextInChain = &canvasDescriptor.chain,
#else
SDL_Window* window = window::get_sdl_window();
const auto chainedDescriptor = utils::SetupWindowAndGetSurfaceDescriptor(window);
#endif
const wgpu::SurfaceDescriptor surfaceDescriptor{
.nextInChain = chainedDescriptor.get(),
.label = "Surface",
};
g_surface = wgpu::Surface::Acquire(wgpuInstanceCreateSurface(g_instance.Get(), &surfaceDescriptor));
ASSERT(g_surface, "Failed to initialize surface");
const WGPURequestAdapterOptions options{
.compatibleSurface = g_surface.Get(),
.powerPreference = WGPUPowerPreference_HighPerformance,
.forceFallbackAdapter = false,
};
bool adapterCallbackRecieved = false;
wgpuInstanceRequestAdapter(g_instance.Get(), &options, adapter_callback, &adapterCallbackRecieved);
while (!adapterCallbackRecieved) {
emscripten_log(EM_LOG_CONSOLE, "Waiting for adapter...\n");
emscripten_sleep(100);
g_surface = g_instance.CreateSurface(&surfaceDescriptor);
if (!g_surface) {
Log.report(LOG_ERROR, FMT_STRING("Failed to create surface"));
return false;
}
#endif
g_adapter.GetProperties(&g_adapterProperties);
g_backendType = g_adapterProperties.backendType;
{
const wgpu::RequestAdapterOptions options{
.powerPreference = wgpu::PowerPreference::HighPerformance,
.backendType = backend,
.compatibleSurface = g_surface,
};
const auto future = g_instance.RequestAdapter(
&options, wgpu::CallbackMode::WaitAnyOnly,
[](wgpu::RequestAdapterStatus status, wgpu::Adapter adapter, wgpu::StringView message) {
if (status == wgpu::RequestAdapterStatus::Success) {
g_adapter = std::move(adapter);
} else {
Log.report(LOG_WARNING, FMT_STRING("Adapter request failed: {}"), message);
}
});
const auto status = g_instance.WaitAny(future, 5000000000);
if (status != wgpu::WaitStatus::Success) {
Log.report(LOG_ERROR, FMT_STRING("Failed to create adapter: {}"), magic_enum::enum_name(status));
return false;
}
if (!g_adapter) {
Log.report(LOG_ERROR, FMT_STRING("Failed to create adapter"));
return false;
}
}
g_adapter.GetInfo(&g_adapterInfo);
g_backendType = g_adapterInfo.backendType;
const auto backendName = magic_enum::enum_name(g_backendType);
const char* adapterName = g_adapterProperties.name;
if (adapterName == nullptr) {
adapterName = "Unknown";
auto adapterName = g_adapterInfo.device;
if (adapterName.IsUndefined()) {
adapterName = wgpu::StringView("Unknown");
}
const char* driverDescription = g_adapterProperties.driverDescription;
if (driverDescription == nullptr) {
driverDescription = "Unknown";
auto description = g_adapterInfo.description;
if (description.IsUndefined()) {
description = wgpu::StringView("Unknown");
}
Log.report(LOG_INFO, FMT_STRING("Graphics adapter information\n API: {}\n Device: {} ({})\n Driver: {}"),
backendName, adapterName, magic_enum::enum_name(g_adapterProperties.adapterType), driverDescription);
backendName, adapterName, magic_enum::enum_name(g_adapterInfo.adapterType), description);
{
// TODO: emscripten doesn't implement wgpuAdapterGetLimits
#ifdef WEBGPU_DAWN
WGPUSupportedLimits supportedLimits{};
wgpu::Limits supportedLimits{};
g_adapter.GetLimits(&supportedLimits);
const wgpu::RequiredLimits requiredLimits{
.limits =
{
// Use "best" supported alignments
.minUniformBufferOffsetAlignment = supportedLimits.limits.minUniformBufferOffsetAlignment == 0
? static_cast<uint32_t>(WGPU_LIMIT_U32_UNDEFINED)
: supportedLimits.limits.minUniformBufferOffsetAlignment,
.minStorageBufferOffsetAlignment = supportedLimits.limits.minStorageBufferOffsetAlignment == 0
? static_cast<uint32_t>(WGPU_LIMIT_U32_UNDEFINED)
: supportedLimits.limits.minStorageBufferOffsetAlignment,
},
const wgpu::Limits requiredLimits{
// Use "best" supported alignments
.minUniformBufferOffsetAlignment = supportedLimits.minUniformBufferOffsetAlignment == 0
? WGPU_LIMIT_U32_UNDEFINED
: supportedLimits.minUniformBufferOffsetAlignment,
.minStorageBufferOffsetAlignment = supportedLimits.minStorageBufferOffsetAlignment == 0
? WGPU_LIMIT_U32_UNDEFINED
: supportedLimits.minStorageBufferOffsetAlignment,
};
#endif
std::vector<wgpu::FeatureName> features;
#ifdef WEBGPU_DAWN
const auto supportedFeatures = g_adapter.GetSupportedFeatures();
for (const auto* const feature : supportedFeatures) {
if (strcmp(feature, "texture-compression-bc") == 0) {
features.push_back(wgpu::FeatureName::TextureCompressionBC);
}
}
#else
std::vector<wgpu::FeatureName> supportedFeatures;
size_t featureCount = g_adapter.EnumerateFeatures(nullptr);
supportedFeatures.resize(featureCount);
g_adapter.EnumerateFeatures(supportedFeatures.data());
for (const auto& feature : supportedFeatures) {
std::vector<wgpu::FeatureName> requiredFeatures;
wgpu::SupportedFeatures supportedFeatures;
g_adapter.GetFeatures(&supportedFeatures);
for (size_t i = 0; i < supportedFeatures.featureCount; ++i) {
const auto feature = supportedFeatures.features[i];
if (feature == wgpu::FeatureName::TextureCompressionBC) {
features.push_back(wgpu::FeatureName::TextureCompressionBC);
requiredFeatures.push_back(feature);
}
}
#endif
#ifdef WEBGPU_DAWN
const std::array enableToggles {
/* clang-format off */
const std::array enableToggles{
/* clang-format off */
#if _WIN32
"use_dxc",
#endif
@ -481,59 +408,105 @@ bool initialize(AuroraBackend auroraBackend) {
#endif
"use_user_defined_labels_in_backend",
"disable_symbol_renaming",
/* clang-format on */
"enable_immediate_error_handling",
/* clang-format on */
};
wgpu::DawnTogglesDescriptor togglesDescriptor{};
togglesDescriptor.enabledTogglesCount = enableToggles.size();
togglesDescriptor.enabledToggles = enableToggles.data();
const wgpu::DawnTogglesDescriptor togglesDescriptor({
.enabledToggleCount = enableToggles.size(),
.enabledToggles = enableToggles.data(),
});
#endif
const wgpu::DeviceDescriptor deviceDescriptor{
wgpu::DeviceDescriptor deviceDescriptor({
#ifdef WEBGPU_DAWN
.nextInChain = &togglesDescriptor,
#endif
.requiredFeaturesCount = static_cast<uint32_t>(features.size()),
.requiredFeatures = features.data(),
.requiredFeatureCount = requiredFeatures.size(),
.requiredFeatures = requiredFeatures.data(),
#ifdef WEBGPU_DAWN
.requiredLimits = &requiredLimits,
#endif
};
bool deviceCallbackReceived = false;
g_adapter.RequestDevice(&deviceDescriptor, device_callback, &deviceCallbackReceived);
#ifdef EMSCRIPTEN
while (!deviceCallbackReceived) {
emscripten_log(EM_LOG_CONSOLE, "Waiting for device...\n");
emscripten_sleep(100);
});
deviceDescriptor.SetUncapturedErrorCallback(
[](const wgpu::Device& device, wgpu::ErrorType type, wgpu::StringView message) {
FATAL("WebGPU error {}: {}", static_cast<int>(type), message);
});
deviceDescriptor.SetDeviceLostCallback(
wgpu::CallbackMode::AllowSpontaneous,
[](const wgpu::Device& device, wgpu::DeviceLostReason reason, wgpu::StringView message) {
Log.report(LOG_WARNING, FMT_STRING("Device lost: {}"), message);
});
const auto future =
g_adapter.RequestDevice(&deviceDescriptor, wgpu::CallbackMode::WaitAnyOnly,
[](wgpu::RequestDeviceStatus status, wgpu::Device device, wgpu::StringView message) {
if (status == wgpu::RequestDeviceStatus::Success) {
g_device = std::move(device);
} else {
Log.report(LOG_WARNING, FMT_STRING("Device request failed: {}"), message);
}
});
const auto status = g_instance.WaitAny(future, 5000000000);
if (status != wgpu::WaitStatus::Success) {
Log.report(LOG_ERROR, FMT_STRING("Failed to create device: {}"), magic_enum::enum_name(status));
return false;
}
#endif
if (!g_device) {
return false;
}
g_device.SetLoggingCallback(&log_callback, nullptr);
g_device.SetUncapturedErrorCallback(&error_callback, nullptr);
g_device.SetLoggingCallback([](wgpu::LoggingType type, wgpu::StringView message) {
AuroraLogLevel level = LOG_FATAL;
switch (type) {
case wgpu::LoggingType::Verbose:
level = LOG_DEBUG;
break;
case wgpu::LoggingType::Info:
level = LOG_INFO;
break;
case wgpu::LoggingType::Warning:
level = LOG_WARNING;
break;
case wgpu::LoggingType::Error:
level = LOG_ERROR;
break;
default:
break;
}
Log.report(level, FMT_STRING("WebGPU message: {}"), message);
});
}
g_device.SetDeviceLostCallback(nullptr, nullptr);
g_queue = g_device.GetQueue();
#if WEBGPU_DAWN
auto swapChainFormat = wgpu::TextureFormat::BGRA8UnormSrgb; // TODO
#else
auto swapChainFormat = g_surface.GetPreferredFormat(g_adapter);
#endif
if (swapChainFormat == wgpu::TextureFormat::RGBA8UnormSrgb) {
swapChainFormat = wgpu::TextureFormat::RGBA8Unorm;
} else if (swapChainFormat == wgpu::TextureFormat::BGRA8UnormSrgb) {
swapChainFormat = wgpu::TextureFormat::BGRA8Unorm;
wgpu::SurfaceCapabilities surfaceCapabilities;
const wgpu::Status status = g_surface.GetCapabilities(g_adapter, &surfaceCapabilities);
if (status != wgpu::Status::Success) {
Log.report(LOG_ERROR, FMT_STRING("Failed to get surface capabilities: {}"), magic_enum::enum_name(status));
return false;
}
Log.report(LOG_INFO, FMT_STRING("Using swapchain format {}"), magic_enum::enum_name(swapChainFormat));
if (surfaceCapabilities.formatCount == 0) {
Log.report(LOG_ERROR, FMT_STRING("Surface has no formats"));
return false;
}
if (surfaceCapabilities.presentModeCount == 0) {
Log.report(LOG_ERROR, FMT_STRING("Surface has no present modes"));
return false;
}
auto surfaceFormat = surfaceCapabilities.formats[0];
auto presentMode = surfaceCapabilities.presentModes[0];
if (surfaceFormat == wgpu::TextureFormat::RGBA8UnormSrgb) {
surfaceFormat = wgpu::TextureFormat::RGBA8Unorm;
} else if (surfaceFormat == wgpu::TextureFormat::BGRA8UnormSrgb) {
surfaceFormat = wgpu::TextureFormat::BGRA8Unorm;
}
Log.report(LOG_INFO, FMT_STRING("Using surface format {}, present mode {}"), magic_enum::enum_name(surfaceFormat),
magic_enum::enum_name(presentMode));
const auto size = window::get_window_size();
g_graphicsConfig = GraphicsConfig{
.swapChainDescriptor =
wgpu::SwapChainDescriptor{
.surfaceConfiguration =
wgpu::SurfaceConfiguration{
.format = surfaceFormat,
.usage = wgpu::TextureUsage::RenderAttachment,
.format = swapChainFormat,
.width = size.fb_width,
.height = size.fb_height,
.presentMode = wgpu::PresentMode::Fifo,
.presentMode = presentMode,
},
.depthFormat = wgpu::TextureFormat::Depth32Float,
.msaaSamples = g_config.msaa,
@ -551,26 +524,23 @@ void shutdown() {
g_frameBuffer = {};
g_frameBufferResolved = {};
g_depthBuffer = {};
wgpuSwapChainRelease(g_swapChain.Release());
wgpuQueueRelease(g_queue.Release());
wgpuDeviceDestroy(g_device.Release());
g_adapter = {};
g_queue = {};
g_surface = {};
#ifdef WEBGPU_DAWN
g_dawnInstance.reset();
#else
g_device = {};
g_adapter = {};
g_instance = {};
#endif
}
void resize_swapchain(uint32_t width, uint32_t height, bool force) {
if (!force && g_graphicsConfig.swapChainDescriptor.width == width &&
g_graphicsConfig.swapChainDescriptor.height == height) {
if (!force && g_graphicsConfig.surfaceConfiguration.width == width &&
g_graphicsConfig.surfaceConfiguration.height == height) {
return;
}
g_graphicsConfig.swapChainDescriptor.width = width;
g_graphicsConfig.swapChainDescriptor.height = height;
g_swapChain = g_device.CreateSwapChain(g_surface, &g_graphicsConfig.swapChainDescriptor);
g_graphicsConfig.surfaceConfiguration.width = width;
g_graphicsConfig.surfaceConfiguration.height = height;
auto surfaceConfiguration = g_graphicsConfig.surfaceConfiguration;
surfaceConfiguration.device = g_device;
g_surface.Configure(&surfaceConfiguration);
g_frameBuffer = create_render_texture(true);
g_frameBufferResolved = create_render_texture(false);
g_depthBuffer = create_depth_texture();

View File

@ -11,7 +11,7 @@ struct SDL_Window;
namespace aurora::webgpu {
struct GraphicsConfig {
wgpu::SwapChainDescriptor swapChainDescriptor;
wgpu::SurfaceConfiguration surfaceConfiguration;
wgpu::TextureFormat depthFormat;
uint32_t msaaSamples;
uint16_t textureAnisotropy;
@ -26,7 +26,7 @@ struct TextureWithSampler {
extern wgpu::Device g_device;
extern wgpu::Queue g_queue;
extern wgpu::SwapChain g_swapChain;
extern wgpu::Surface g_surface;
extern wgpu::BackendType g_backendType;
extern GraphicsConfig g_graphicsConfig;
extern TextureWithSampler g_frameBuffer;

View File

@ -64,6 +64,14 @@ const AuroraEvent* poll_events() {
});
break;
}
case SDL_EVENT_WINDOW_DISPLAY_SCALE_CHANGED: {
resize_swapchain(false);
g_events.push_back(AuroraEvent{
.type = AURORA_DISPLAY_SCALE_CHANGED,
.windowSize = get_window_size(),
});
break;
}
case SDL_EVENT_WINDOW_PIXEL_SIZE_CHANGED: {
resize_swapchain(false);
g_events.push_back(AuroraEvent{