aurora/lib/gfx/common.hpp

207 lines
6.1 KiB
C++

#pragma once
#include "../internal.hpp"
#include <type_traits>
#include <utility>
#include <cstring>
#include <webgpu/webgpu.h>
#include <xxhash_impl.h>
namespace aurora {
#if INTPTR_MAX == INT32_MAX
using HashType = XXH32_hash_t;
#else
using HashType = XXH64_hash_t;
#endif
static inline HashType xxh3_hash_s(const void* input, size_t len, HashType seed = 0) {
return static_cast<HashType>(XXH3_64bits_withSeed(input, len, seed));
}
template <typename T>
static inline HashType xxh3_hash(const T& input, HashType seed = 0) {
// Validate that the type has no padding bytes, which can easily cause
// hash mismatches. This also disallows floats, but that's okay for us.
static_assert(std::has_unique_object_representations_v<T>);
return xxh3_hash_s(&input, sizeof(T), seed);
}
class ByteBuffer {
public:
ByteBuffer() noexcept = default;
explicit ByteBuffer(size_t size) noexcept
: m_data(static_cast<uint8_t*>(calloc(1, size))), m_length(size), m_capacity(size) {}
explicit ByteBuffer(uint8_t* data, size_t size) noexcept
: m_data(data), m_length(0), m_capacity(size), m_owned(false) {}
~ByteBuffer() noexcept {
if (m_data != nullptr && m_owned) {
free(m_data);
}
}
ByteBuffer(ByteBuffer&& rhs) noexcept
: m_data(rhs.m_data), m_length(rhs.m_length), m_capacity(rhs.m_capacity), m_owned(rhs.m_owned) {
rhs.m_data = nullptr;
rhs.m_length = 0;
rhs.m_capacity = 0;
rhs.m_owned = true;
}
ByteBuffer& operator=(ByteBuffer&& rhs) noexcept {
if (m_data != nullptr && m_owned) {
free(m_data);
}
m_data = rhs.m_data;
m_length = rhs.m_length;
m_capacity = rhs.m_capacity;
m_owned = rhs.m_owned;
rhs.m_data = nullptr;
rhs.m_length = 0;
rhs.m_capacity = 0;
rhs.m_owned = true;
return *this;
}
ByteBuffer(ByteBuffer const&) = delete;
ByteBuffer& operator=(ByteBuffer const&) = delete;
[[nodiscard]] uint8_t* data() noexcept { return m_data; }
[[nodiscard]] const uint8_t* data() const noexcept { return m_data; }
[[nodiscard]] size_t size() const noexcept { return m_length; }
[[nodiscard]] bool empty() const noexcept { return m_length == 0; }
void append(const void* data, size_t size) {
resize(m_length + size, false);
memcpy(m_data + m_length, data, size);
m_length += size;
}
void append_zeroes(size_t size) {
resize(m_length + size, true);
m_length += size;
}
void clear() {
if (m_data != nullptr && m_owned) {
free(m_data);
}
m_data = nullptr;
m_length = 0;
m_capacity = 0;
m_owned = true;
}
void reserve_extra(size_t size) { resize(m_length + size, true); }
private:
uint8_t* m_data = nullptr;
size_t m_length = 0;
size_t m_capacity = 0;
bool m_owned = true;
void resize(size_t size, bool zeroed) {
if (size == 0) {
clear();
} else if (m_data == nullptr) {
if (zeroed) {
m_data = static_cast<uint8_t*>(calloc(1, size));
} else {
m_data = static_cast<uint8_t*>(malloc(size));
}
m_owned = true;
} else if (size > m_capacity) {
if (!m_owned) {
abort();
}
m_data = static_cast<uint8_t*>(realloc(m_data, size));
if (zeroed) {
memset(m_data + m_capacity, 0, size - m_capacity);
}
} else {
return;
}
m_capacity = size;
}
};
} // namespace aurora
namespace aurora::gfx {
extern WGPUBuffer g_vertexBuffer;
extern WGPUBuffer g_uniformBuffer;
extern WGPUBuffer g_indexBuffer;
extern WGPUBuffer g_storageBuffer;
using BindGroupRef = HashType;
using PipelineRef = HashType;
using SamplerRef = HashType;
using ShaderRef = HashType;
struct Range {
uint32_t offset = 0;
uint32_t size = 0;
inline bool operator==(const Range& rhs) { return offset == rhs.offset && size == rhs.size; }
};
enum class ShaderType {
Stream,
Model,
};
void initialize();
void shutdown();
void begin_frame();
void end_frame(WGPUCommandEncoder cmd);
void render(WGPUCommandEncoder cmd);
void render_pass(WGPURenderPassEncoder pass, uint32_t idx);
void map_staging_buffer();
Range push_verts(const uint8_t* data, size_t length);
template <typename T>
static inline Range push_verts(ArrayRef<T> data) {
return push_verts(reinterpret_cast<const uint8_t*>(data.data()), data.size() * sizeof(T));
}
Range push_indices(const uint8_t* data, size_t length);
template <typename T>
static inline Range push_indices(ArrayRef<T> data) {
return push_indices(reinterpret_cast<const uint8_t*>(data.data()), data.size() * sizeof(T));
}
Range push_uniform(const uint8_t* data, size_t length);
template <typename T>
static inline Range push_uniform(const T& data) {
return push_uniform(reinterpret_cast<const uint8_t*>(&data), sizeof(T));
}
Range push_storage(const uint8_t* data, size_t length);
template <typename T>
static inline Range push_storage(ArrayRef<T> data) {
return push_storage(reinterpret_cast<const uint8_t*>(data.data()), data.size() * sizeof(T));
}
template <typename T>
static inline Range push_storage(const T& data) {
return push_storage(reinterpret_cast<const uint8_t*>(&data), sizeof(T));
}
Range push_texture_data(const uint8_t* data, size_t length, uint32_t bytesPerRow, uint32_t rowsPerImage);
std::pair<ByteBuffer, Range> map_verts(size_t length);
std::pair<ByteBuffer, Range> map_indices(size_t length);
std::pair<ByteBuffer, Range> map_uniform(size_t length);
std::pair<ByteBuffer, Range> map_storage(size_t length);
template <typename State>
const State& get_state();
template <typename DrawData>
void push_draw_command(DrawData data);
template <typename DrawData>
void merge_draw_command(DrawData data);
template <typename PipelineConfig>
PipelineRef pipeline_ref(PipelineConfig config);
bool bind_pipeline(PipelineRef ref, WGPURenderPassEncoder pass);
BindGroupRef bind_group_ref(const WGPUBindGroupDescriptor& descriptor);
WGPUBindGroup find_bind_group(BindGroupRef id);
WGPUSampler sampler_ref(const WGPUSamplerDescriptor& descriptor);
uint32_t align_uniform(uint32_t value);
void set_viewport(float left, float top, float width, float height, float znear, float zfar) noexcept;
void set_scissor(uint32_t x, uint32_t y, uint32_t w, uint32_t h) noexcept;
} // namespace aurora::gfx