#pragma once #include "../internal.hpp" #include #include #include #include #include #include namespace aurora { #if INTPTR_MAX == INT32_MAX using HashType = XXH32_hash_t; #else using HashType = XXH64_hash_t; #endif static inline HashType xxh3_hash_s(const void* input, size_t len, HashType seed = 0) { return static_cast(XXH3_64bits_withSeed(input, len, seed)); } template static inline HashType xxh3_hash(const T& input, HashType seed = 0) { // Validate that the type has no padding bytes, which can easily cause // hash mismatches. This also disallows floats, but that's okay for us. static_assert(std::has_unique_object_representations_v); return xxh3_hash_s(&input, sizeof(T), seed); } class ByteBuffer { public: ByteBuffer() noexcept = default; explicit ByteBuffer(size_t size) noexcept : m_data(static_cast(calloc(1, size))), m_length(size), m_capacity(size) {} explicit ByteBuffer(uint8_t* data, size_t size) noexcept : m_data(data), m_length(0), m_capacity(size), m_owned(false) {} ~ByteBuffer() noexcept { if (m_data != nullptr && m_owned) { free(m_data); } } ByteBuffer(ByteBuffer&& rhs) noexcept : m_data(rhs.m_data), m_length(rhs.m_length), m_capacity(rhs.m_capacity), m_owned(rhs.m_owned) { rhs.m_data = nullptr; rhs.m_length = 0; rhs.m_capacity = 0; rhs.m_owned = true; } ByteBuffer& operator=(ByteBuffer&& rhs) noexcept { if (m_data != nullptr && m_owned) { free(m_data); } m_data = rhs.m_data; m_length = rhs.m_length; m_capacity = rhs.m_capacity; m_owned = rhs.m_owned; rhs.m_data = nullptr; rhs.m_length = 0; rhs.m_capacity = 0; rhs.m_owned = true; return *this; } ByteBuffer(ByteBuffer const&) = delete; ByteBuffer& operator=(ByteBuffer const&) = delete; [[nodiscard]] uint8_t* data() noexcept { return m_data; } [[nodiscard]] const uint8_t* data() const noexcept { return m_data; } [[nodiscard]] size_t size() const noexcept { return m_length; } [[nodiscard]] bool empty() const noexcept { return m_length == 0; } void append(const void* data, size_t size) { resize(m_length + size, false); memcpy(m_data + m_length, data, size); m_length += size; } void append_zeroes(size_t size) { resize(m_length + size, true); m_length += size; } void clear() { if (m_data != nullptr && m_owned) { free(m_data); } m_data = nullptr; m_length = 0; m_capacity = 0; m_owned = true; } void reserve_extra(size_t size) { resize(m_length + size, true); } private: uint8_t* m_data = nullptr; size_t m_length = 0; size_t m_capacity = 0; bool m_owned = true; void resize(size_t size, bool zeroed) { if (size == 0) { clear(); } else if (m_data == nullptr) { if (zeroed) { m_data = static_cast(calloc(1, size)); } else { m_data = static_cast(malloc(size)); } m_owned = true; } else if (size > m_capacity) { if (!m_owned) { abort(); } m_data = static_cast(realloc(m_data, size)); if (zeroed) { memset(m_data + m_capacity, 0, size - m_capacity); } } else { return; } m_capacity = size; } }; } // namespace aurora namespace aurora::gfx { extern wgpu::Buffer g_vertexBuffer; extern wgpu::Buffer g_uniformBuffer; extern wgpu::Buffer g_indexBuffer; extern wgpu::Buffer g_storageBuffer; using BindGroupRef = HashType; using PipelineRef = HashType; using SamplerRef = HashType; using ShaderRef = HashType; struct Range { uint32_t offset = 0; uint32_t size = 0; bool operator==(const Range& rhs) const { return memcmp(this, &rhs, sizeof(*this)) == 0; } bool operator!=(const Range& rhs) const { return !(*this == rhs); } }; struct ClipRect { int32_t x; int32_t y; int32_t width; int32_t height; bool operator==(const ClipRect& rhs) const { return memcmp(this, &rhs, sizeof(*this)) == 0; } bool operator!=(const ClipRect& rhs) const { return !(*this == rhs); } }; struct TextureRef; using TextureHandle = std::shared_ptr; enum class ShaderType { Stream, Model, }; void initialize(); void shutdown(); void begin_frame(); void end_frame(const wgpu::CommandEncoder& cmd); void render(wgpu::CommandEncoder& cmd); void render_pass(const wgpu::RenderPassEncoder& pass, uint32_t idx); void map_staging_buffer(); void resolve_pass(TextureHandle texture, ClipRect rect, bool clear, Vec4 clearColor); Range push_verts(const uint8_t* data, size_t length); template static inline Range push_verts(ArrayRef data) { return push_verts(reinterpret_cast(data.data()), data.size() * sizeof(T)); } Range push_indices(const uint8_t* data, size_t length); template static inline Range push_indices(ArrayRef data) { return push_indices(reinterpret_cast(data.data()), data.size() * sizeof(T)); } Range push_uniform(const uint8_t* data, size_t length); template static inline Range push_uniform(const T& data) { return push_uniform(reinterpret_cast(&data), sizeof(T)); } Range push_storage(const uint8_t* data, size_t length); template static inline Range push_storage(ArrayRef data) { return push_storage(reinterpret_cast(data.data()), data.size() * sizeof(T)); } template static inline Range push_storage(const T& data) { return push_storage(reinterpret_cast(&data), sizeof(T)); } Range push_texture_data(const uint8_t* data, size_t length, uint32_t bytesPerRow, uint32_t rowsPerImage); std::pair map_verts(size_t length); std::pair map_indices(size_t length); std::pair map_uniform(size_t length); std::pair map_storage(size_t length); template const State& get_state(); template void push_draw_command(DrawData data); template void merge_draw_command(DrawData data); template PipelineRef pipeline_ref(PipelineConfig config); bool bind_pipeline(PipelineRef ref, const wgpu::RenderPassEncoder& pass); BindGroupRef bind_group_ref(const wgpu::BindGroupDescriptor& descriptor); const wgpu::BindGroup& find_bind_group(BindGroupRef id); const wgpu::Sampler& sampler_ref(const wgpu::SamplerDescriptor& descriptor); uint32_t align_uniform(uint32_t value); void set_viewport(float left, float top, float width, float height, float znear, float zfar) noexcept; void set_scissor(uint32_t x, uint32_t y, uint32_t w, uint32_t h) noexcept; } // namespace aurora::gfx