Reimplement GXCopyTex; simplify assertions

This commit is contained in:
Luke Street 2022-08-09 02:05:33 -04:00
parent 893cabe55a
commit c060e1da6b
24 changed files with 241 additions and 394 deletions

View File

@ -37,6 +37,7 @@ typedef struct {
float scale;
} AuroraWindowSize;
typedef struct SDL_Window SDL_Window;
typedef struct AuroraEvent AuroraEvent;
typedef void (*AuroraLogCallback)(AuroraLogLevel level, const char* message, unsigned int len);
@ -61,6 +62,7 @@ typedef struct {
typedef struct {
AuroraBackend backend;
const char* configPath;
SDL_Window* window;
AuroraWindowSize windowSize;
} AuroraInfo;

View File

@ -87,17 +87,11 @@ static AuroraInfo initialize(int argc, char* argv[], const AuroraConfig& config)
}
}
if (!windowCreated) {
Log.report(LOG_FATAL, FMT_STRING("Error creating window: {}"), SDL_GetError());
unreachable();
}
ASSERT(windowCreated, "Error creating window: {}", SDL_GetError());
// Initialize SDL_Renderer for ImGui when we can't use a Dawn backend
if (webgpu::g_backendType == wgpu::BackendType::Null) {
if (!window::create_renderer()) {
Log.report(LOG_FATAL, FMT_STRING("Failed to initialize SDL renderer: {}"), SDL_GetError());
unreachable();
}
ASSERT(window::create_renderer(), "Failed to initialize SDL renderer: {}", SDL_GetError());
}
window::show_window();
@ -117,6 +111,7 @@ static AuroraInfo initialize(int argc, char* argv[], const AuroraConfig& config)
return {
.backend = selectedBackend,
.configPath = g_config.configPath,
.window = window::get_sdl_window(),
.windowSize = size,
};
}

View File

@ -31,9 +31,8 @@ private:
void CreateSwapChainImpl() {
VkSurfaceKHR surface = VK_NULL_HANDLE;
if (SDL_Vulkan_CreateSurface(m_window, dawn::native::vulkan::GetInstance(m_device), &surface) != SDL_TRUE) {
Log.report(LOG_FATAL, FMT_STRING("Failed to create Vulkan surface: {}"), SDL_GetError());
}
ASSERT(SDL_Vulkan_CreateSurface(m_window, dawn::native::vulkan::GetInstance(m_device), &surface),
"Failed to create Vulkan surface: {}", SDL_GetError());
m_swapChainImpl = dawn::native::vulkan::CreateNativeSwapChainImpl(m_device, surface);
}
};

View File

@ -15,9 +15,7 @@ void GXSetIndTexCoordScale(GXIndTexStageID indStage, GXIndTexScale scaleS, GXInd
}
void GXSetIndTexMtx(GXIndTexMtxID id, const void* offset, s8 scaleExp) {
if (id < GX_ITM_0 || id > GX_ITM_2) {
Log.report(LOG_FATAL, FMT_STRING("invalid ind tex mtx ID {}"), id);
}
CHECK(id >= GX_ITM_0 && id <= GX_ITM_2, "invalid ind tex mtx ID {}", static_cast<int>(id));
update_gx_state(g_gxState.indTexMtxs[id - 1], {*reinterpret_cast<const aurora::Mat3x2<float>*>(offset), scaleExp});
}

View File

@ -1,6 +1,7 @@
#include "gx.hpp"
#include "../window.hpp"
#include "../webgpu/wgpu.hpp"
extern "C" {
GXRenderModeObj GXNtsc480IntDf = {
@ -24,14 +25,13 @@ void GXAdjustForOverscan(GXRenderModeObj* rmin, GXRenderModeObj* rmout, u16 hor,
void GXSetDispCopySrc(u16 left, u16 top, u16 wd, u16 ht) {}
void GXSetTexCopySrc(u16 left, u16 top, u16 wd, u16 ht) {
// TODO
}
void GXSetTexCopySrc(u16 left, u16 top, u16 wd, u16 ht) { g_gxState.texCopySrc = {left, top, wd, ht}; }
void GXSetDispCopyDst(u16 wd, u16 ht) {}
void GXSetTexCopyDst(u16 wd, u16 ht, GXTexFmt fmt, GXBool mipmap) {
// TODO
CHECK(wd == g_gxState.texCopySrc.width && ht == g_gxState.texCopySrc.height, "Texture copy scaling unimplemented");
g_gxState.texCopyFmt = fmt;
}
// TODO GXSetDispCopyFrame2Field
@ -47,7 +47,23 @@ void GXSetDispCopyGamma(GXGamma gamma) {}
void GXCopyDisp(void* dest, GXBool clear) {}
// TODO move GXCopyTex here
void GXCopyTex(void* dest, GXBool clear) {
const auto& rect = g_gxState.texCopySrc;
const wgpu::Extent3D size{
.width = static_cast<uint32_t>(rect.width),
.height = static_cast<uint32_t>(rect.height),
.depthOrArrayLayers = 1,
};
aurora::gfx::TextureHandle handle;
const auto it = g_gxState.copyTextures.find(dest);
if (it == g_gxState.copyTextures.end() || it->second->size != size) {
handle = aurora::gfx::new_render_texture(rect.width, rect.height, g_gxState.texCopyFmt, "Resolved Texture");
g_gxState.copyTextures[dest] = handle;
} else {
handle = it->second;
}
aurora::gfx::resolve_pass(handle, rect, clear, g_gxState.clearColor);
}
// TODO GXGetYScaleFactor
// TODO GXGetNumXfbLines

View File

@ -15,14 +15,8 @@ void GXSetVtxDescv(GXVtxDescList* list) {
void GXClearVtxDesc() { g_gxState.vtxDesc.fill({}); }
void GXSetVtxAttrFmt(GXVtxFmt vtxfmt, GXAttr attr, GXCompCnt cnt, GXCompType type, u8 frac) {
if (vtxfmt < GX_VTXFMT0 || vtxfmt >= GX_MAX_VTXFMT) {
Log.report(LOG_FATAL, FMT_STRING("invalid vtxfmt {}"), vtxfmt);
unreachable();
}
if (attr < GX_VA_PNMTXIDX || attr >= GX_VA_MAX_ATTR) {
Log.report(LOG_FATAL, FMT_STRING("invalid attr {}"), attr);
unreachable();
}
CHECK(vtxfmt >= GX_VTXFMT0 && vtxfmt < GX_MAX_VTXFMT, "invalid vtxfmt {}", static_cast<int>(vtxfmt));
CHECK(attr >= GX_VA_PNMTXIDX && attr < GX_VA_MAX_ATTR, "invalid attr {}", static_cast<int>(attr));
auto& fmt = g_gxState.vtxFmts[vtxfmt].attrs[attr];
update_gx_state(fmt.cnt, cnt);
update_gx_state(fmt.type, type);
@ -42,10 +36,7 @@ void GXSetArray(GXAttr attr, const void* data, u32 size, u8 stride) {
// TODO move GXBegin, GXEnd here
void GXSetTexCoordGen2(GXTexCoordID dst, GXTexGenType type, GXTexGenSrc src, u32 mtx, GXBool normalize, u32 postMtx) {
if (dst < GX_TEXCOORD0 || dst > GX_TEXCOORD7) {
Log.report(LOG_FATAL, FMT_STRING("invalid tex coord {}"), dst);
unreachable();
}
CHECK(dst >= GX_TEXCOORD0 && dst <= GX_TEXCOORD7, "invalid tex coord {}", static_cast<int>(dst));
update_gx_state(g_gxState.tcgs[dst],
{type, src, static_cast<GXTexMtx>(mtx), static_cast<GXPTTexMtx>(postMtx), normalize});
}

View File

@ -152,10 +152,7 @@ void GXSetChanAmbColor(GXChannelID id, GXColor color) {
GXSetChanAmbColor(GX_ALPHA1, color);
return;
}
if (id < GX_COLOR0 || id > GX_ALPHA1) {
Log.report(LOG_FATAL, FMT_STRING("bad channel {}"), id);
unreachable();
}
CHECK(id >= GX_COLOR0 && id <= GX_ALPHA1, "bad channel {}", static_cast<int>(id));
update_gx_state(g_gxState.colorChannelState[id].ambColor, from_gx_color(color));
}
@ -169,10 +166,7 @@ void GXSetChanMatColor(GXChannelID id, GXColor color) {
GXSetChanMatColor(GX_ALPHA1, color);
return;
}
if (id < GX_COLOR0 || id > GX_ALPHA1) {
Log.report(LOG_FATAL, FMT_STRING("bad channel {}"), id);
unreachable();
}
CHECK(id >= GX_COLOR0 && id <= GX_ALPHA1, "bad channel {}", static_cast<int>(id));
update_gx_state(g_gxState.colorChannelState[id].matColor, from_gx_color(color));
}
@ -224,10 +218,7 @@ void GXSetChanCtrl(GXChannelID id, bool lightingEnabled, GXColorSrc ambSrc, GXCo
GXSetChanCtrl(GX_ALPHA1, lightingEnabled, ambSrc, matSrc, lightState, diffFn, attnFn);
return;
}
if (id < GX_COLOR0 || id > GX_ALPHA1) {
Log.report(LOG_FATAL, FMT_STRING("bad channel {}"), id);
unreachable();
}
CHECK(id >= GX_COLOR0 && id <= GX_ALPHA1, "bad channel {}", static_cast<int>(id));
auto& chan = g_gxState.colorChannelConfig[id];
update_gx_state(chan.lightingEnabled, lightingEnabled);
update_gx_state(chan.ambSrc, ambSrc);

View File

@ -52,10 +52,7 @@ void GXSetTevAlphaOp(GXTevStageID stageId, GXTevOp op, GXTevBias bias, GXTevScal
}
void GXSetTevColor(GXTevRegID id, GXColor color) {
if (id < GX_TEVPREV || id > GX_TEVREG2) {
Log.report(LOG_FATAL, FMT_STRING("bad tevreg {}"), id);
unreachable();
}
CHECK(id >= GX_TEVPREV && id < GX_MAX_TEVREG, "bad tevreg {}", static_cast<int>(id));
update_gx_state(g_gxState.colorRegs[id], from_gx_color(color));
}
@ -84,10 +81,7 @@ void GXSetTevOrder(GXTevStageID id, GXTexCoordID tcid, GXTexMapID tmid, GXChanne
void GXSetNumTevStages(u8 num) { update_gx_state(g_gxState.numTevStages, num); }
void GXSetTevKColor(GXTevKColorID id, GXColor color) {
if (id >= GX_MAX_KCOLOR) {
Log.report(LOG_FATAL, FMT_STRING("bad kcolor {}"), id);
unreachable();
}
CHECK(id >= GX_KCOLOR0 && id < GX_MAX_KCOLOR, "bad kcolor {}", static_cast<int>(id));
update_gx_state(g_gxState.kcolors[id], from_gx_color(color));
}
@ -103,9 +97,6 @@ void GXSetTevSwapMode(GXTevStageID stageId, GXTevSwapSel rasSel, GXTevSwapSel te
void GXSetTevSwapModeTable(GXTevSwapSel id, GXTevColorChan red, GXTevColorChan green, GXTevColorChan blue,
GXTevColorChan alpha) {
if (id < GX_TEV_SWAP0 || id >= GX_MAX_TEVSWAP) {
Log.report(LOG_FATAL, FMT_STRING("invalid tev swap sel {}"), id);
unreachable();
}
CHECK(id >= GX_TEV_SWAP0 && id < GX_MAX_TEVSWAP, "bad tev swap sel {}", static_cast<int>(id));
update_gx_state(g_gxState.tevSwapTable[id], {red, green, blue, alpha});
}

View File

@ -4,8 +4,6 @@
#include <absl/container/flat_hash_map.h>
static absl::flat_hash_map<void*, int> g_resolvedTexMap;
void GXInitTexObj(GXTexObj* obj_, const void* data, u16 width, u16 height, u32 format, GXTexWrapMode wrapS,
GXTexWrapMode wrapT, GXBool mipmap) {
memset(obj_, 0, sizeof(GXTexObj));
@ -27,8 +25,10 @@ void GXInitTexObj(GXTexObj* obj_, const void* data, u16 width, u16 height, u32 f
obj->doEdgeLod = false;
obj->maxAniso = GX_ANISO_4;
obj->tlut = GX_TLUT0;
if (g_resolvedTexMap.contains(data)) {
obj->dataInvalidated = false; // TODO hack
const auto it = g_gxState.copyTextures.find(data);
if (it != g_gxState.copyTextures.end()) {
obj->ref = it->second;
obj->dataInvalidated = false;
} else {
obj->dataInvalidated = true;
}
@ -55,8 +55,14 @@ void GXInitTexObjCI(GXTexObj* obj_, const void* data, u16 width, u16 height, GXC
obj->biasClamp = false;
obj->doEdgeLod = false;
obj->maxAniso = GX_ANISO_4;
const auto it = g_gxState.copyTextures.find(data);
if (it != g_gxState.copyTextures.end()) {
obj->ref = it->second;
obj->dataInvalidated = false;
} else {
obj->dataInvalidated = true;
}
}
void GXInitTexObjLOD(GXTexObj* obj_, GXTexFilter minFilt, GXTexFilter magFilt, float minLod, float maxLod,
float lodBias, GXBool biasClamp, GXBool doEdgeLod, GXAnisotropy maxAniso) {
@ -73,9 +79,15 @@ void GXInitTexObjLOD(GXTexObj* obj_, GXTexFilter minFilt, GXTexFilter magFilt, f
void GXInitTexObjData(GXTexObj* obj_, const void* data) {
auto* obj = reinterpret_cast<GXTexObj_*>(obj_);
const auto it = g_gxState.copyTextures.find(data);
if (it != g_gxState.copyTextures.end()) {
obj->ref = it->second;
obj->dataInvalidated = false;
} else {
obj->data = data;
obj->dataInvalidated = true;
}
}
void GXInitTexObjWrapMode(GXTexObj* obj_, GXTexWrapMode wrapS, GXTexWrapMode wrapT) {
auto* obj = reinterpret_cast<GXTexObj_*>(obj_);
@ -184,6 +196,7 @@ void GXInitTlutObj(GXTlutObj* obj_, const void* data, GXTlutFmt format, u16 entr
memset(obj_, 0, sizeof(GXTlutObj));
GXTexFmt texFmt;
switch (format) {
DEFAULT_FATAL("invalid tlut format {}", static_cast<int>(format));
case GX_TL_IA8:
texFmt = GX_TF_IA8;
break;
@ -193,9 +206,6 @@ void GXInitTlutObj(GXTlutObj* obj_, const void* data, GXTlutFmt format, u16 entr
case GX_TL_RGB5A3:
texFmt = GX_TF_RGB5A3;
break;
default:
Log.report(LOG_FATAL, FMT_STRING("invalid tlut format {}"), format);
unreachable();
}
auto* obj = reinterpret_cast<GXTlutObj_*>(obj_);
obj->ref = aurora::gfx::new_static_texture_2d(
@ -224,8 +234,3 @@ void GXInvalidateTexAll() {
// TODO GXSetTexCoordScaleManually
// TODO GXSetTexCoordCylWrap
// TODO GXSetTexCoordBias
void GXCopyTex(void* dest, GXBool clear) {
// TODO
g_resolvedTexMap.emplace(dest, 0);
}

View File

@ -21,10 +21,7 @@ void GXSetProjection(const void* mtx_, GXProjectionType type) {
// TODO GXSetProjectionv
void GXLoadPosMtxImm(const void* mtx_, u32 id) {
if (id < GX_PNMTX0 || id > GX_PNMTX9) {
Log.report(LOG_FATAL, FMT_STRING("invalid pn mtx {}"), id);
unreachable();
}
CHECK(id >= GX_PNMTX0 && id <= GX_PNMTX9, "invalid pn mtx {}", static_cast<int>(id));
auto& state = g_gxState.pnMtx[id / 3];
#ifdef AURORA_NATIVE_MATRIX
const auto& mtx = *reinterpret_cast<const aurora::Mat4x4<float>*>(mtx_);
@ -38,10 +35,7 @@ void GXLoadPosMtxImm(const void* mtx_, u32 id) {
// TODO GXLoadPosMtxIndx
void GXLoadNrmMtxImm(const void* mtx_, u32 id) {
if (id < GX_PNMTX0 || id > GX_PNMTX9) {
Log.report(LOG_FATAL, FMT_STRING("invalid pn mtx {}"), id);
unreachable();
}
CHECK(id >= GX_PNMTX0 && id <= GX_PNMTX9, "invalid pn mtx {}", static_cast<int>(id));
auto& state = g_gxState.pnMtx[id / 3];
#ifdef AURORA_NATIVE_MATRIX
const auto& mtx = *reinterpret_cast<const aurora::Mat4x4<float>*>(mtx_);
@ -56,23 +50,15 @@ void GXLoadNrmMtxImm(const void* mtx_, u32 id) {
// TODO GXLoadNrmMtxIndx3x3
void GXSetCurrentMtx(u32 id) {
if (id < GX_PNMTX0 || id > GX_PNMTX9) {
Log.report(LOG_FATAL, FMT_STRING("invalid pn mtx {}"), id);
unreachable();
}
CHECK(id >= GX_PNMTX0 && id <= GX_PNMTX9, "invalid pn mtx {}", static_cast<int>(id));
update_gx_state(g_gxState.currentPnMtx, id / 3);
}
void GXLoadTexMtxImm(const void* mtx_, u32 id, GXTexMtxType type) {
if ((id < GX_TEXMTX0 || id > GX_IDENTITY) && (id < GX_PTTEXMTX0 || id > GX_PTIDENTITY)) {
Log.report(LOG_FATAL, FMT_STRING("invalid tex mtx {}"), id);
unreachable();
}
CHECK((id >= GX_TEXMTX0 && id <= GX_IDENTITY) || (id >= GX_PTTEXMTX0 && id <= GX_PTIDENTITY), "invalid tex mtx {}",
static_cast<int>(id));
if (id >= GX_PTTEXMTX0) {
if (type != GX_MTX3x4) {
Log.report(LOG_FATAL, FMT_STRING("invalid pt mtx type {}"), type);
unreachable();
}
CHECK(type == GX_MTX3x4, "invalid pt mtx type {}", type);
const auto idx = (id - GX_PTTEXMTX0) / 3;
#ifdef AURORA_NATIVE_MATRIX
const auto& mtx = *reinterpret_cast<const aurora::Mat4x4<float>*>(mtx_);

View File

@ -48,12 +48,7 @@ static std::optional<SStreamState> sStreamState;
static u16 lastVertexStart = 0;
void GXBegin(GXPrimitive primitive, GXVtxFmt vtxFmt, u16 nVerts) {
#ifndef NDEBUG
if (sStreamState) {
Log.report(LOG_FATAL, FMT_STRING("Stream began twice!"));
unreachable();
}
#endif
CHECK(!sStreamState, "Stream began twice!");
uint16_t vertexSize = 0;
for (GXAttr attr{}; const auto type : g_gxState.vtxDesc) {
if (type == GX_DIRECT) {
@ -63,32 +58,23 @@ void GXBegin(GXPrimitive primitive, GXVtxFmt vtxFmt, u16 nVerts) {
vertexSize += 16;
} else if (attr >= GX_VA_TEX0 && attr <= GX_VA_TEX7) {
vertexSize += 8;
} else {
Log.report(LOG_FATAL, FMT_STRING("don't know how to handle attr {}"), attr);
unreachable();
} else UNLIKELY {
FATAL("dont know how to handle attr {}", static_cast<int>(attr));
}
} else if (type == GX_INDEX8 || type == GX_INDEX16) {
vertexSize += 2;
}
attr = GXAttr(attr + 1);
}
if (vertexSize == 0) {
Log.report(LOG_FATAL, FMT_STRING("no vtx attributes enabled?"));
unreachable();
}
CHECK(vertexSize > 0, "no vtx attributes enabled?");
sStreamState.emplace(primitive, nVerts, vertexSize, g_gxState.stateDirty ? 0 : lastVertexStart);
}
static inline void check_attr_order(GXAttr attr) noexcept {
#ifndef NDEBUG
if (!sStreamState) {
Log.report(LOG_FATAL, FMT_STRING("Stream not started!"));
unreachable();
}
if (sStreamState->nextAttr != attr) {
Log.report(LOG_FATAL, FMT_STRING("bad attribute order: {}, expected {}"), attr, sStreamState->nextAttr);
unreachable();
}
CHECK(sStreamState, "Stream not started!");
CHECK(sStreamState->nextAttr == attr, "bad attribute order: {}, expected {}", static_cast<int>(attr),
static_cast<int>(sStreamState->nextAttr));
sStreamState->nextAttr = next_attr(attr + 1);
#endif
}

View File

@ -143,14 +143,8 @@ size_t g_lastIndexSize;
size_t g_lastStorageSize;
using CommandList = std::vector<Command>;
struct ClipRect {
int32_t x;
int32_t y;
int32_t width;
int32_t height;
};
struct RenderPass {
u32 resolveTarget = UINT32_MAX;
TextureHandle resolveTarget;
ClipRect resolveRect;
Vec4<float> clearColor{0.f, 0.f, 0.f, 0.f};
CommandList commands;
@ -158,7 +152,6 @@ struct RenderPass {
};
static std::vector<RenderPass> g_renderPasses;
static u32 g_currentRenderPass = UINT32_MAX;
std::vector<TextureHandle> g_resolvedTextures;
std::vector<TextureUpload> g_textureUploads;
static ByteBuffer g_serializedPipelines{};
@ -212,7 +205,8 @@ static PipelineRef find_pipeline(ShaderType type, const PipelineConfig& config,
}
static inline void push_command(CommandType type, const Command::Data& data) {
if (g_currentRenderPass == UINT32_MAX) {
if (g_currentRenderPass == UINT32_MAX)
UNLIKELY {
Log.report(LOG_WARNING, FMT_STRING("Dropping command {}"), magic_enum::enum_name(type));
return;
}
@ -225,16 +219,13 @@ static inline void push_command(CommandType type, const Command::Data& data) {
});
}
static inline Command& get_last_draw_command(ShaderType type) {
if (g_currentRenderPass == UINT32_MAX) {
Log.report(LOG_FATAL, FMT_STRING("No last command"));
unreachable();
}
CHECK(g_currentRenderPass != UINT32_MAX, "No last command");
auto& last = g_renderPasses[g_currentRenderPass].commands.back();
if (last.type != CommandType::Draw || last.data.draw.type != type) {
Log.report(LOG_FATAL, FMT_STRING("Last command invalid: {} {}, expected {} {}"), magic_enum::enum_name(last.type),
if (last.type != CommandType::Draw || last.data.draw.type != type)
UNLIKELY {
FATAL("Last command invalid: {} {}, expected {} {}", magic_enum::enum_name(last.type),
magic_enum::enum_name(last.data.draw.type), magic_enum::enum_name(CommandType::Draw),
magic_enum::enum_name(type));
unreachable();
}
return last;
}
@ -261,29 +252,13 @@ void set_scissor(uint32_t x, uint32_t y, uint32_t w, uint32_t h) noexcept {
}
}
static inline bool operator==(const wgpu::Extent3D& lhs, const wgpu::Extent3D& rhs) {
return lhs.width == rhs.width && lhs.height == rhs.height && lhs.depthOrArrayLayers == rhs.depthOrArrayLayers;
}
static inline bool operator!=(const wgpu::Extent3D& lhs, const wgpu::Extent3D& rhs) { return !(lhs == rhs); }
void resolve_color(const ClipRect& rect, uint32_t bind, GXTexFmt fmt, bool clear_depth) noexcept {
if (g_resolvedTextures.size() < bind + 1) {
g_resolvedTextures.resize(bind + 1);
}
const wgpu::Extent3D size{
.width = static_cast<uint32_t>(rect.width),
.height = static_cast<uint32_t>(rect.height),
.depthOrArrayLayers = 1,
};
if (!g_resolvedTextures[bind] || g_resolvedTextures[bind]->size != size) {
g_resolvedTextures[bind] = new_render_texture(rect.width, rect.height, fmt, "Resolved Texture");
}
auto& currentPass = g_renderPasses[g_currentRenderPass];
currentPass.resolveTarget = bind;
void resolve_pass(TextureHandle texture, ClipRect rect, bool clear, Vec4<float> clearColor) {
auto& currentPass = aurora::gfx::g_renderPasses[g_currentRenderPass];
currentPass.resolveTarget = std::move(texture);
currentPass.resolveRect = rect;
auto& newPass = g_renderPasses.emplace_back();
newPass.clearColor = gx::g_gxState.clearColor;
newPass.clear = false; // TODO
newPass.clearColor = clearColor;
newPass.clear = clear;
++g_currentRenderPass;
}
@ -298,14 +273,10 @@ void push_draw_command(stream::DrawData data) {
template <>
void merge_draw_command(stream::DrawData data) {
auto& last = get_last_draw_command(ShaderType::Stream).data.draw.stream;
if (last.vertRange.offset + last.vertRange.size != data.vertRange.offset) {
Log.report(LOG_FATAL, FMT_STRING("Invalid merge range: {} -> {}"), last.vertRange.offset + last.vertRange.size,
data.vertRange.offset);
}
if (last.indexRange.offset + last.indexRange.size != data.indexRange.offset) {
Log.report(LOG_FATAL, FMT_STRING("Invalid merge range: {} -> {}"), last.indexRange.offset + last.indexRange.size,
data.indexRange.offset);
}
CHECK(last.vertRange.offset + last.vertRange.size == data.vertRange.offset, "Invalid vertex merge range: {} -> {}",
last.vertRange.offset + last.vertRange.size, data.vertRange.offset);
CHECK(last.indexRange.offset + last.indexRange.size == data.indexRange.offset, "Invalid index merge range: {} -> {}",
last.indexRange.offset + last.indexRange.size, data.indexRange.offset);
last.vertRange.size += data.vertRange.size;
last.indexRange.size += data.indexRange.size;
last.indexCount += data.indexCount;
@ -343,10 +314,7 @@ static void pipeline_worker() {
// std::this_thread::sleep_for(std::chrono::milliseconds{1500});
{
std::scoped_lock lock{g_pipelineMutex};
if (!g_pipelines.try_emplace(cb.first, std::move(result)).second) {
Log.report(LOG_FATAL, FMT_STRING("Duplicate pipeline {}"), cb.first);
unreachable();
}
ASSERT(g_pipelines.try_emplace(cb.first, std::move(result)).second, "Duplicate pipeline {}", cb.first);
g_queuedPipelines.pop_front();
hasMore = !g_queuedPipelines.empty();
}
@ -473,7 +441,6 @@ void shutdown() {
gx::shutdown();
g_resolvedTextures.clear();
g_textureUploads.clear();
g_cachedBindGroups.clear();
g_cachedSamplers.clear();
@ -502,10 +469,8 @@ void map_staging_buffer() {
[](WGPUBufferMapAsyncStatus status, void* userdata) {
if (status == WGPUBufferMapAsyncStatus_DestroyedBeforeCallback) {
return;
} else if (status != WGPUBufferMapAsyncStatus_Success) {
Log.report(LOG_FATAL, FMT_STRING("Buffer mapping failed: {}"), status);
unreachable();
}
ASSERT(status == WGPUBufferMapAsyncStatus_Success, "Buffer mapping failed: {}", static_cast<int>(status));
*static_cast<bool*>(userdata) = true;
},
&bufferMapped);
@ -585,10 +550,8 @@ void end_frame(const wgpu::CommandEncoder& cmd) {
void render(wgpu::CommandEncoder& cmd) {
for (u32 i = 0; i < g_renderPasses.size(); ++i) {
const auto& passInfo = g_renderPasses[i];
bool finalPass = i == g_renderPasses.size() - 1;
if (finalPass && passInfo.resolveTarget != UINT32_MAX) {
Log.report(LOG_FATAL, FMT_STRING("Final render pass must not have resolve target"));
unreachable();
if (i == g_renderPasses.size() - 1) {
ASSERT(!passInfo.resolveTarget, "Final render pass must not have resolve target");
}
const std::array attachments{
wgpu::RenderPassColorAttachment{
@ -622,7 +585,7 @@ void render(wgpu::CommandEncoder& cmd) {
render_pass(pass, i);
pass.End();
if (passInfo.resolveTarget != UINT32_MAX) {
if (passInfo.resolveTarget) {
wgpu::ImageCopyTexture src{
.origin =
wgpu::Origin3D{
@ -635,9 +598,8 @@ void render(wgpu::CommandEncoder& cmd) {
} else {
src.texture = webgpu::g_frameBuffer.texture;
}
auto& target = g_resolvedTextures[passInfo.resolveTarget];
const wgpu::ImageCopyTexture dst{
.texture = target->texture,
.texture = passInfo.resolveTarget->texture,
};
const wgpu::Extent3D size{
.width = static_cast<uint32_t>(passInfo.resolveRect.width),
@ -804,10 +766,7 @@ const wgpu::BindGroup& find_bind_group(BindGroupRef id) {
return g_cachedBindGroups[id];
#else
const auto it = g_cachedBindGroups.find(id);
if (it == g_cachedBindGroups.end()) {
Log.report(LOG_FATAL, FMT_STRING("get_bind_group: failed to locate {}"), id);
unreachable();
}
CHECK(it != g_cachedBindGroups.end(), "get_bind_group: failed to locate {:x}", id);
return it->second;
#endif
}

View File

@ -2,6 +2,7 @@
#include "../internal.hpp"
#include <aurora/math.hpp>
#include <type_traits>
#include <utility>
#include <cstring>
@ -136,9 +137,23 @@ struct Range {
uint32_t offset = 0;
uint32_t size = 0;
inline bool operator==(const Range& rhs) { return offset == rhs.offset && size == rhs.size; }
bool operator==(const Range& rhs) const { return memcmp(this, &rhs, sizeof(*this)) == 0; }
bool operator!=(const Range& rhs) const { return !(*this == rhs); }
};
struct ClipRect {
int32_t x;
int32_t y;
int32_t width;
int32_t height;
bool operator==(const ClipRect& rhs) const { return memcmp(this, &rhs, sizeof(*this)) == 0; }
bool operator!=(const ClipRect& rhs) const { return !(*this == rhs); }
};
struct TextureRef;
using TextureHandle = std::shared_ptr<TextureRef>;
enum class ShaderType {
Stream,
Model,
@ -152,6 +167,7 @@ void end_frame(const wgpu::CommandEncoder& cmd);
void render(wgpu::CommandEncoder& cmd);
void render_pass(const wgpu::RenderPassEncoder& pass, uint32_t idx);
void map_staging_buffer();
void resolve_pass(TextureHandle texture, ClipRect rect, bool clear, Vec4<float> clearColor);
Range push_verts(const uint8_t* data, size_t length);
template <typename T>

View File

@ -25,6 +25,7 @@ const TextureBind& get_texture(GXTexMapID id) noexcept { return g_gxState.textur
static inline wgpu::BlendFactor to_blend_factor(GXBlendFactor fac, bool isDst) {
switch (fac) {
DEFAULT_FATAL("invalid blend factor {}", static_cast<int>(fac));
case GX_BL_ZERO:
return wgpu::BlendFactor::Zero;
case GX_BL_ONE:
@ -49,14 +50,12 @@ static inline wgpu::BlendFactor to_blend_factor(GXBlendFactor fac, bool isDst) {
return wgpu::BlendFactor::DstAlpha;
case GX_BL_INVDSTALPHA:
return wgpu::BlendFactor::OneMinusDstAlpha;
default:
Log.report(LOG_FATAL, FMT_STRING("invalid blend factor {}"), fac);
unreachable();
}
}
static inline wgpu::CompareFunction to_compare_function(GXCompare func) {
switch (func) {
DEFAULT_FATAL("invalid depth fn {}", static_cast<int>(func));
case GX_NEVER:
return wgpu::CompareFunction::Never;
case GX_LESS:
@ -73,9 +72,6 @@ static inline wgpu::CompareFunction to_compare_function(GXCompare func) {
return wgpu::CompareFunction::GreaterEqual;
case GX_ALWAYS:
return wgpu::CompareFunction::Always;
default:
Log.report(LOG_FATAL, FMT_STRING("invalid depth fn {}"), func);
unreachable();
}
}
@ -83,6 +79,7 @@ static inline wgpu::BlendState to_blend_state(GXBlendMode mode, GXBlendFactor sr
GXLogicOp op, u32 dstAlpha) {
wgpu::BlendComponent colorBlendComponent;
switch (mode) {
DEFAULT_FATAL("unsupported blend mode {}", static_cast<int>(mode));
case GX_BM_NONE:
colorBlendComponent = {
.operation = wgpu::BlendOperation::Add,
@ -106,6 +103,7 @@ static inline wgpu::BlendState to_blend_state(GXBlendMode mode, GXBlendFactor sr
break;
case GX_BM_LOGIC:
switch (op) {
DEFAULT_FATAL("unsupported logic op {}", static_cast<int>(op));
case GX_LO_CLEAR:
colorBlendComponent = {
.operation = wgpu::BlendOperation::Add,
@ -127,14 +125,8 @@ static inline wgpu::BlendState to_blend_state(GXBlendMode mode, GXBlendFactor sr
.dstFactor = wgpu::BlendFactor::One,
};
break;
default:
Log.report(LOG_FATAL, FMT_STRING("unsupported logic op {}"), op);
unreachable();
}
break;
default:
Log.report(LOG_FATAL, FMT_STRING("unsupported blend mode {}"), mode);
unreachable();
}
wgpu::BlendComponent alphaBlendComponent{
.operation = wgpu::BlendOperation::Add,
@ -168,17 +160,16 @@ static inline wgpu::ColorWriteMask to_write_mask(bool colorUpdate, bool alphaUpd
static inline wgpu::PrimitiveState to_primitive_state(GXPrimitive gx_prim, GXCullMode gx_cullMode) {
wgpu::PrimitiveTopology primitive = wgpu::PrimitiveTopology::TriangleList;
switch (gx_prim) {
DEFAULT_FATAL("unsupported primitive type {}", static_cast<int>(gx_prim));
case GX_TRIANGLES:
break;
case GX_TRIANGLESTRIP:
primitive = wgpu::PrimitiveTopology::TriangleStrip;
break;
default:
Log.report(LOG_FATAL, FMT_STRING("Unsupported primitive type {}"), gx_prim);
unreachable();
}
wgpu::CullMode cullMode = wgpu::CullMode::None;
switch (gx_cullMode) {
DEFAULT_FATAL("unsupported cull mode {}", static_cast<int>(gx_cullMode));
case GX_CULL_FRONT:
cullMode = wgpu::CullMode::Front;
break;
@ -187,9 +178,6 @@ static inline wgpu::PrimitiveState to_primitive_state(GXPrimitive gx_prim, GXCul
break;
case GX_CULL_NONE:
break;
default:
Log.report(LOG_FATAL, FMT_STRING("Unsupported cull mode {}"), gx_cullMode);
unreachable();
}
return {
.topology = primitive,
@ -404,6 +392,7 @@ Range build_uniform(const ShaderInfo& info) noexcept {
}
const auto& state = g_gxState;
switch (info.texMtxTypes[i]) {
DEFAULT_FATAL("unhandled tex mtx type {}", static_cast<int>(info.texMtxTypes[i]));
case GX_TG_MTX2x4:
if (std::holds_alternative<Mat4x2<float>>(state.texMtxs[i])) {
buf.append(&std::get<Mat4x2<float>>(state.texMtxs[i]), 32);
@ -416,23 +405,16 @@ Range build_uniform(const ShaderInfo& info) noexcept {
{0.f, 0.f},
};
buf.append(&mtx, 32);
} else {
Log.report(LOG_FATAL, FMT_STRING("expected 2x4 mtx in idx {}"), i);
unreachable();
}
} else
UNLIKELY FATAL("expected 2x4 mtx in idx {}", i);
break;
case GX_TG_MTX3x4:
if (std::holds_alternative<Mat4x4<float>>(g_gxState.texMtxs[i])) {
const auto& mat = std::get<Mat4x4<float>>(g_gxState.texMtxs[i]);
buf.append(&mat, 64);
} else {
Log.report(LOG_FATAL, FMT_STRING("expected 3x4 mtx in idx {}"), i);
buf.append(&Mat4x4_Identity, 64);
}
} else
UNLIKELY FATAL("expected 3x4 mtx in idx {}", i);
break;
default:
Log.report(LOG_FATAL, FMT_STRING("unhandled tex mtx type {}"), info.texMtxTypes[i]);
unreachable();
}
}
for (int i = 0; i < info.usesPTTexMtx.size(); ++i) {
@ -465,10 +447,7 @@ Range build_uniform(const ShaderInfo& info) noexcept {
continue;
}
const auto& tex = get_texture(static_cast<GXTexMapID>(i));
if (!tex) {
Log.report(LOG_FATAL, FMT_STRING("unbound texture {}"), i);
unreachable();
}
CHECK(tex, "unbound texture {}", i);
buf.append(&tex.texObj.lodBias, 4);
}
g_gxState.stateDirty = false;
@ -512,10 +491,7 @@ GXBindGroups build_bind_groups(const ShaderInfo& info, const ShaderConfig& confi
continue;
}
const auto& tex = g_gxState.textures[i];
if (!tex) {
Log.report(LOG_FATAL, FMT_STRING("unbound texture {}"), i);
unreachable();
}
CHECK(tex, "unbound texture {}", i);
samplerEntries[samplerCount] = {
.binding = samplerCount,
.sampler = sampler_ref(tex.get_descriptor()),
@ -530,13 +506,8 @@ GXBindGroups build_bind_groups(const ShaderInfo& info, const ShaderConfig& confi
const auto& texConfig = config.textureConfig[i];
if (is_palette_format(texConfig.loadFmt)) {
u32 tlut = tex.texObj.tlut;
if (tlut < GX_TLUT0 || tlut > GX_TLUT7) {
Log.report(LOG_FATAL, FMT_STRING("tlut out of bounds {}"), tlut);
unreachable();
} else if (!g_gxState.tluts[tlut].ref) {
Log.report(LOG_FATAL, FMT_STRING("tlut unbound {}"), tlut);
unreachable();
}
CHECK(tlut >= GX_TLUT0 && tlut <= GX_BIGTLUT3, "tlut out of bounds {}", tlut);
CHECK(g_gxState.tluts[tlut].ref, "tlut unbound {}", tlut);
textureEntries[textureCount] = {
.binding = textureCount,
.textureView = g_gxState.tluts[tlut].ref->view,
@ -702,24 +673,24 @@ void shutdown() noexcept {
item.ref.reset();
}
g_gxCachedShaders.clear();
g_gxState.copyTextures.clear();
}
} // namespace gx
static wgpu::AddressMode wgpu_address_mode(GXTexWrapMode mode) {
switch (mode) {
DEFAULT_FATAL("invalid wrap mode {}", static_cast<int>(mode));
case GX_CLAMP:
return wgpu::AddressMode::ClampToEdge;
case GX_REPEAT:
return wgpu::AddressMode::Repeat;
case GX_MIRROR:
return wgpu::AddressMode::MirrorRepeat;
default:
Log.report(LOG_FATAL, FMT_STRING("invalid wrap mode {}"), mode);
unreachable();
}
}
static std::pair<wgpu::FilterMode, wgpu::FilterMode> wgpu_filter_mode(GXTexFilter filter) {
switch (filter) {
DEFAULT_FATAL("invalid filter mode {}", static_cast<int>(filter));
case GX_NEAR:
return {wgpu::FilterMode::Nearest, wgpu::FilterMode::Linear};
case GX_LINEAR:
@ -732,22 +703,17 @@ static std::pair<wgpu::FilterMode, wgpu::FilterMode> wgpu_filter_mode(GXTexFilte
return {wgpu::FilterMode::Nearest, wgpu::FilterMode::Linear};
case GX_LIN_MIP_LIN:
return {wgpu::FilterMode::Linear, wgpu::FilterMode::Linear};
default:
Log.report(LOG_FATAL, FMT_STRING("invalid filter mode {}"), filter);
unreachable();
}
}
static u16 wgpu_aniso(GXAnisotropy aniso) {
switch (aniso) {
DEFAULT_FATAL("invalid aniso {}", static_cast<int>(aniso));
case GX_ANISO_1:
return 1;
case GX_ANISO_2:
return std::max<u16>(webgpu::g_graphicsConfig.textureAnisotropy / 2, 1);
case GX_ANISO_4:
return std::max<u16>(webgpu::g_graphicsConfig.textureAnisotropy, 1);
default:
Log.report(LOG_FATAL, FMT_STRING("invalid aniso mode {}"), aniso);
unreachable();
}
}
wgpu::SamplerDescriptor TextureBind::get_descriptor() const noexcept {

View File

@ -6,6 +6,7 @@
#include "../internal.hpp"
#include "texture.hpp"
#include <absl/container/flat_hash_map.h>
#include <type_traits>
#include <utility>
#include <variant>
@ -46,6 +47,7 @@ constexpr float GX_LARGE_NUMBER = -1048576.0f;
namespace aurora::gfx::gx {
constexpr u32 MaxTextures = GX_MAX_TEXMAP;
constexpr u32 MaxTluts = 20;
constexpr u32 MaxTevStages = GX_MAX_TEVSTAGE;
constexpr u32 MaxColorChannels = 4;
constexpr u32 MaxTevRegs = 4; // TEVPREV, TEVREG0-2
@ -151,6 +153,7 @@ struct TcgConfig {
u8 _p3 = 0;
bool operator==(const TcgConfig& rhs) const { return memcmp(this, &rhs, sizeof(*this)) == 0; }
bool operator!=(const TcgConfig& rhs) const { return !(*this == rhs); }
};
static_assert(std::has_unique_object_representations_v<TcgConfig>);
struct FogState {
@ -165,6 +168,7 @@ struct FogState {
return type == rhs.type && startZ == rhs.startZ && endZ == rhs.endZ && nearZ == rhs.nearZ && farZ == rhs.farZ &&
color == rhs.color;
}
bool operator!=(const FogState& rhs) const { return !(*this == rhs); }
};
struct TevSwap {
GXTevColorChan red = GX_CH_RED;
@ -173,6 +177,7 @@ struct TevSwap {
GXTevColorChan alpha = GX_CH_ALPHA;
bool operator==(const TevSwap& rhs) const { return memcmp(this, &rhs, sizeof(*this)) == 0; }
bool operator!=(const TevSwap& rhs) const { return !(*this == rhs); }
explicit operator bool() const { return !(*this == TevSwap{}); }
};
static_assert(std::has_unique_object_representations_v<TevSwap>);
@ -184,6 +189,7 @@ struct AlphaCompare {
u32 ref1;
bool operator==(const AlphaCompare& rhs) const { return memcmp(this, &rhs, sizeof(*this)) == 0; }
bool operator!=(const AlphaCompare& rhs) const { return !(*this == rhs); }
explicit operator bool() const { return comp0 != GX_ALWAYS || comp1 != GX_ALWAYS; }
};
static_assert(std::has_unique_object_representations_v<AlphaCompare>);
@ -192,6 +198,7 @@ struct IndTexMtxInfo {
s8 scaleExp;
bool operator==(const IndTexMtxInfo& rhs) const { return mtx == rhs.mtx && scaleExp == rhs.scaleExp; }
bool operator!=(const IndTexMtxInfo& rhs) const { return !(*this == rhs); }
};
struct VtxAttrFmt {
GXCompCnt cnt;
@ -216,6 +223,7 @@ struct Light {
bool operator==(const Light& rhs) const {
return pos == rhs.pos && dir == rhs.dir && color == rhs.color && cosAtt == rhs.cosAtt && distAtt == rhs.distAtt;
}
bool operator!=(const Light& rhs) const { return !(*this == rhs); }
};
static_assert(sizeof(Light) == 80);
struct AttrArray {
@ -227,6 +235,7 @@ struct AttrArray {
inline bool operator==(const AttrArray& lhs, const AttrArray& rhs) {
return lhs.data == rhs.data && lhs.size == rhs.size && lhs.stride == rhs.stride;
}
inline bool operator!=(const AttrArray& lhs, const AttrArray& rhs) { return !(lhs == rhs); }
struct GXState {
std::array<PnMtx, MaxPnMtx> pnMtx;
@ -251,7 +260,7 @@ struct GXState {
std::array<Light, GX::MaxLights> lights;
std::array<TevStage, MaxTevStages> tevStages;
std::array<TextureBind, MaxTextures> textures;
std::array<GXTlutObj_, MaxTextures> tluts;
std::array<GXTlutObj_, MaxTluts> tluts;
std::array<TexMtxVariant, MaxTexMtx> texMtxs;
std::array<Mat4x4<float>, MaxPTTexMtx> ptTexMtxs;
std::array<TcgConfig, MaxTexCoord> tcgs;
@ -266,6 +275,9 @@ struct GXState {
std::array<IndStage, MaxIndStages> indStages;
std::array<IndTexMtxInfo, MaxIndTexMtxs> indTexMtxs;
std::array<AttrArray, GX_VA_MAX_ATTR> arrays;
ClipRect texCopySrc;
GXTexFmt texCopyFmt;
absl::flat_hash_map<void*, TextureHandle> copyTextures;
bool depthCompare = true;
bool depthUpdate = true;
bool colorUpdate = true;

View File

@ -64,12 +64,8 @@ static void color_arg_reg_info(GXTevColorArg arg, const TevStage& stage, ShaderI
break;
case GX_CC_TEXC:
case GX_CC_TEXA:
if (stage.texCoordId == GX_TEXCOORD_NULL) {
Log.report(LOG_FATAL, FMT_STRING("texCoord not bound"));
}
if (stage.texMapId == GX_TEXMAP_NULL) {
Log.report(LOG_FATAL, FMT_STRING("texMap not bound"));
}
CHECK(stage.texCoordId != GX_TEXCOORD_NULL, "tex coord not bound");
CHECK(stage.texMapId != GX_TEXMAP_NULL, "tex map not bound");
info.sampledTexCoords.set(stage.texCoordId);
info.sampledTextures.set(stage.texMapId);
break;
@ -139,6 +135,7 @@ static bool formatHasAlpha(u32 format) {
static std::string color_arg_reg(GXTevColorArg arg, size_t stageIdx, const ShaderConfig& config,
const TevStage& stage) {
switch (arg) {
DEFAULT_FATAL("invalid color arg {}", static_cast<int>(arg));
case GX_CC_CPREV:
return "prev.rgb";
case GX_CC_APREV:
@ -156,53 +153,39 @@ static std::string color_arg_reg(GXTevColorArg arg, size_t stageIdx, const Shade
case GX_CC_A2:
return "vec3<f32>(tevreg2.a)";
case GX_CC_TEXC: {
if (stage.texMapId == GX_TEXMAP_NULL) {
Log.report(LOG_FATAL, FMT_STRING("unmapped texture for stage {}"), stageIdx);
unreachable();
} else if (stage.texMapId < GX_TEXMAP0 || stage.texMapId > GX_TEXMAP7) {
Log.report(LOG_FATAL, FMT_STRING("invalid texture {} for stage {}"), stage.texMapId, stageIdx);
unreachable();
}
CHECK(stage.texMapId != GX_TEXMAP_NULL, "unmapped texture for stage {}", stageIdx);
CHECK(stage.texMapId >= GX_TEXMAP0 && stage.texMapId <= GX_TEXMAP7, "invalid texture {} for stage {}",
static_cast<int>(stage.texMapId), stageIdx);
const auto& swap = config.tevSwapTable[stage.tevSwapTex];
return fmt::format(FMT_STRING("sampled{}.{}{}{}"), stageIdx, chan_comp(swap.red), chan_comp(swap.green),
chan_comp(swap.blue));
}
case GX_CC_TEXA: {
if (stage.texMapId == GX_TEXMAP_NULL) {
Log.report(LOG_FATAL, FMT_STRING("unmapped texture for stage {}"), stageIdx);
unreachable();
} else if (stage.texMapId < GX_TEXMAP0 || stage.texMapId > GX_TEXMAP7) {
Log.report(LOG_FATAL, FMT_STRING("invalid texture {} for stage {}"), stage.texMapId, stageIdx);
unreachable();
}
CHECK(stage.texMapId != GX_TEXMAP_NULL, "unmapped texture for stage {}", stageIdx);
CHECK(stage.texMapId >= GX_TEXMAP0 && stage.texMapId <= GX_TEXMAP7, "invalid texture {} for stage {}",
static_cast<int>(stage.texMapId), stageIdx);
const auto& swap = config.tevSwapTable[stage.tevSwapTex];
return fmt::format(FMT_STRING("vec3<f32>(sampled{}.{})"), stageIdx, chan_comp(swap.alpha));
}
case GX_CC_RASC: {
if (stage.channelId == GX_COLOR_NULL) {
Log.report(LOG_FATAL, FMT_STRING("unmapped color channel for stage {}"), stageIdx);
unreachable();
} else if (stage.channelId == GX_COLOR_ZERO) {
CHECK(stage.channelId != GX_COLOR_NULL, "unmapped color channel for stage {}", stageIdx);
if (stage.channelId == GX_COLOR_ZERO) {
return "vec3<f32>(0.0)";
} else if (stage.channelId < GX_COLOR0A0 || stage.channelId > GX_COLOR1A1) {
Log.report(LOG_FATAL, FMT_STRING("invalid color channel {} for stage {}"), stage.channelId, stageIdx);
unreachable();
}
CHECK(stage.channelId >= GX_COLOR0A0 && stage.channelId <= GX_COLOR1A1, "invalid color channel {} for stage {}",
static_cast<int>(stage.channelId), stageIdx);
u32 idx = stage.channelId - GX_COLOR0A0;
const auto& swap = config.tevSwapTable[stage.tevSwapRas];
return fmt::format(FMT_STRING("rast{}.{}{}{}"), idx, chan_comp(swap.red), chan_comp(swap.green),
chan_comp(swap.blue));
}
case GX_CC_RASA: {
if (stage.channelId == GX_COLOR_NULL) {
Log.report(LOG_FATAL, FMT_STRING("unmapped color channel for stage {}"), stageIdx);
unreachable();
} else if (stage.channelId == GX_COLOR_ZERO) {
CHECK(stage.channelId != GX_COLOR_NULL, "unmapped color channel for stage {}", stageIdx);
if (stage.channelId == GX_COLOR_ZERO) {
return "vec3<f32>(0.0)";
} else if (stage.channelId < GX_COLOR0A0 || stage.channelId > GX_COLOR1A1) {
Log.report(LOG_FATAL, FMT_STRING("invalid color channel {} for stage {}"), stage.channelId, stageIdx);
unreachable();
}
CHECK(stage.channelId >= GX_COLOR0A0 && stage.channelId <= GX_COLOR1A1, "invalid color channel {} for stage {}",
static_cast<int>(stage.channelId), stageIdx);
u32 idx = stage.channelId - GX_COLOR0A0;
const auto& swap = config.tevSwapTable[stage.tevSwapRas];
return fmt::format(FMT_STRING("vec3<f32>(rast{}.{})"), idx, chan_comp(swap.alpha));
@ -213,6 +196,7 @@ static std::string color_arg_reg(GXTevColorArg arg, size_t stageIdx, const Shade
return "vec3<f32>(0.5)";
case GX_CC_KONST: {
switch (stage.kcSel) {
DEFAULT_FATAL("invalid kcSel {}", static_cast<int>(stage.kcSel));
case GX_TEV_KCSEL_8_8:
return "vec3<f32>(1.0)";
case GX_TEV_KCSEL_7_8:
@ -269,16 +253,10 @@ static std::string color_arg_reg(GXTevColorArg arg, size_t stageIdx, const Shade
return "vec3<f32>(ubuf.kcolor2.a)";
case GX_TEV_KCSEL_K3_A:
return "vec3<f32>(ubuf.kcolor3.a)";
default:
Log.report(LOG_FATAL, FMT_STRING("invalid kcSel {}"), stage.kcSel);
unreachable();
}
}
case GX_CC_ZERO:
return "vec3<f32>(0.0)";
default:
Log.report(LOG_FATAL, FMT_STRING("invalid color arg {}"), arg);
unreachable();
}
}
@ -305,12 +283,8 @@ static void alpha_arg_reg_info(GXTevAlphaArg arg, const TevStage& stage, ShaderI
}
break;
case GX_CA_TEXA:
if (stage.texCoordId == GX_TEXCOORD_NULL) {
Log.report(LOG_FATAL, FMT_STRING("texCoord not bound"));
}
if (stage.texMapId == GX_TEXMAP_NULL) {
Log.report(LOG_FATAL, FMT_STRING("texMap not bound"));
}
CHECK(stage.texCoordId != GX_TEXCOORD_NULL, "tex coord not bound");
CHECK(stage.texMapId != GX_TEXMAP_NULL, "tex map not bound");
info.sampledTexCoords.set(stage.texCoordId);
info.sampledTextures.set(stage.texMapId);
break;
@ -357,6 +331,7 @@ static void alpha_arg_reg_info(GXTevAlphaArg arg, const TevStage& stage, ShaderI
static std::string alpha_arg_reg(GXTevAlphaArg arg, size_t stageIdx, const ShaderConfig& config,
const TevStage& stage) {
switch (arg) {
DEFAULT_FATAL("invalid alpha arg {}", static_cast<int>(arg));
case GX_CA_APREV:
return "prev.a";
case GX_CA_A0:
@ -366,32 +341,26 @@ static std::string alpha_arg_reg(GXTevAlphaArg arg, size_t stageIdx, const Shade
case GX_CA_A2:
return "tevreg2.a";
case GX_CA_TEXA: {
if (stage.texMapId == GX_TEXMAP_NULL) {
Log.report(LOG_FATAL, FMT_STRING("unmapped texture for stage {}"), stageIdx);
unreachable();
} else if (stage.texMapId < GX_TEXMAP0 || stage.texMapId > GX_TEXMAP7) {
Log.report(LOG_FATAL, FMT_STRING("invalid texture {} for stage {}"), stage.texMapId, stageIdx);
unreachable();
}
CHECK(stage.texMapId != GX_TEXMAP_NULL, "unmapped texture for stage {}", stageIdx);
CHECK(stage.texMapId >= GX_TEXMAP0 && stage.texMapId <= GX_TEXMAP7, "invalid texture {} for stage {}",
static_cast<int>(stage.texMapId), stageIdx);
const auto& swap = config.tevSwapTable[stage.tevSwapTex];
return fmt::format(FMT_STRING("sampled{}.{}"), stageIdx, chan_comp(swap.alpha));
}
case GX_CA_RASA: {
if (stage.channelId == GX_COLOR_NULL) {
Log.report(LOG_FATAL, FMT_STRING("unmapped color channel for stage {}"), stageIdx);
unreachable();
} else if (stage.channelId == GX_COLOR_ZERO) {
CHECK(stage.channelId != GX_COLOR_NULL, "unmapped color channel for stage {}", stageIdx);
if (stage.channelId == GX_COLOR_ZERO) {
return "0.0";
} else if (stage.channelId < GX_COLOR0A0 || stage.channelId > GX_COLOR1A1) {
Log.report(LOG_FATAL, FMT_STRING("invalid color channel {} for stage {}"), stage.channelId, stageIdx);
unreachable();
}
CHECK(stage.channelId >= GX_COLOR0A0 && stage.channelId <= GX_COLOR1A1, "invalid color channel {} for stage {}",
static_cast<int>(stage.channelId), stageIdx);
u32 idx = stage.channelId - GX_COLOR0A0;
const auto& swap = config.tevSwapTable[stage.tevSwapRas];
return fmt::format(FMT_STRING("rast{}.{}"), idx, chan_comp(swap.alpha));
}
case GX_CA_KONST: {
switch (stage.kaSel) {
DEFAULT_FATAL("invalid kaSel {}", static_cast<int>(stage.kaSel));
case GX_TEV_KASEL_8_8:
return "1.0";
case GX_TEV_KASEL_7_8:
@ -440,48 +409,39 @@ static std::string alpha_arg_reg(GXTevAlphaArg arg, size_t stageIdx, const Shade
return "ubuf.kcolor2.a";
case GX_TEV_KASEL_K3_A:
return "ubuf.kcolor3.a";
default:
Log.report(LOG_FATAL, FMT_STRING("invalid kaSel {}"), stage.kaSel);
unreachable();
}
}
case GX_CA_ZERO:
return "0.0";
default:
Log.report(LOG_FATAL, FMT_STRING("invalid alpha arg {}"), arg);
unreachable();
}
}
static std::string_view tev_op(GXTevOp op) {
switch (op) {
DEFAULT_FATAL("unimplemented tev op {}", static_cast<int>(op));
case GX_TEV_ADD:
return ""sv;
case GX_TEV_SUB:
return "-"sv;
default:
Log.report(LOG_FATAL, FMT_STRING("TODO {}"), op);
unreachable();
}
}
static std::string_view tev_bias(GXTevBias bias) {
switch (bias) {
DEFAULT_FATAL("invalid tev bias {}", static_cast<int>(bias));
case GX_TB_ZERO:
return ""sv;
case GX_TB_ADDHALF:
return " + 0.5"sv;
case GX_TB_SUBHALF:
return " - 0.5"sv;
default:
Log.report(LOG_FATAL, FMT_STRING("invalid bias {}"), bias);
unreachable();
}
}
static std::string alpha_compare(GXCompare comp, u8 ref, bool& valid) {
const float fref = ref / 255.f;
switch (comp) {
DEFAULT_FATAL("invalid alpha comp {}", static_cast<int>(comp));
case GX_NEVER:
return "false"s;
case GX_LESS:
@ -499,14 +459,12 @@ static std::string alpha_compare(GXCompare comp, u8 ref, bool& valid) {
case GX_ALWAYS:
valid = false;
return "true"s;
default:
Log.report(LOG_FATAL, FMT_STRING("invalid compare {}"), comp);
unreachable();
}
}
static std::string_view tev_scale(GXTevScale scale) {
switch (scale) {
DEFAULT_FATAL("invalid tev scale {}", static_cast<int>(scale));
case GX_CS_SCALE_1:
return ""sv;
case GX_CS_SCALE_2:
@ -515,9 +473,6 @@ static std::string_view tev_scale(GXTevScale scale) {
return " * 4.0"sv;
case GX_CS_DIVIDE_2:
return " / 2.0"sv;
default:
Log.report(LOG_FATAL, FMT_STRING("invalid scale {}"), scale);
unreachable();
}
}
@ -528,8 +483,7 @@ static inline std::string vtx_attr(const ShaderConfig& config, GXAttr attr) {
// Default normal
return "vec3<f32>(1.0, 0.0, 0.0)"s;
}
Log.report(LOG_FATAL, FMT_STRING("unmapped attr {}"), attr);
unreachable();
UNLIKELY FATAL("unmapped vtx attr {}", static_cast<int>(attr));
}
if (attr == GX_VA_POS) {
return "in_pos"s;
@ -545,8 +499,7 @@ static inline std::string vtx_attr(const ShaderConfig& config, GXAttr attr) {
const auto idx = attr - GX_VA_TEX0;
return fmt::format(FMT_STRING("in_tex{}_uv"), idx);
}
Log.report(LOG_FATAL, FMT_STRING("unhandled attr {}"), attr);
unreachable();
UNLIKELY FATAL("unhandled vtx attr {}", static_cast<int>(attr));
}
static inline std::string texture_conversion(const TextureConfig& tex, u32 stageIdx, u32 texMapId) {
@ -705,12 +658,7 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
const auto hash = xxh3_hash(config);
const auto it = g_gxCachedShaders.find(hash);
if (it != g_gxCachedShaders.end()) {
#ifndef NDEBUG
if (g_gxCachedShaderConfigs[hash] != config) {
Log.report(LOG_FATAL, FMT_STRING("Shader collision!"));
unreachable();
}
#endif
CHECK(g_gxCachedShaderConfigs[hash] == config, "Shader collision! {:x}", hash);
return it->second.first;
}
@ -868,6 +816,7 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
{
std::string outReg;
switch (stage.colorOp.outReg) {
DEFAULT_FATAL("invalid colorOp outReg {}", static_cast<int>(stage.colorOp.outReg));
case GX_TEVPREV:
outReg = "prev";
break;
@ -880,8 +829,6 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
case GX_TEVREG2:
outReg = "tevreg2";
break;
default:
Log.report(LOG_FATAL, FMT_STRING("invalid colorOp outReg {}"), stage.colorOp.outReg);
}
std::string op = fmt::format(
FMT_STRING("(({4}mix({0}, {1}, {2}) + {3}){5}){6}"), color_arg_reg(stage.colorPass.a, idx, config, stage),
@ -897,6 +844,7 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
{
std::string outReg;
switch (stage.alphaOp.outReg) {
DEFAULT_FATAL("invalid alphaOp outReg {}", static_cast<int>(stage.alphaOp.outReg));
case GX_TEVPREV:
outReg = "prev.a";
break;
@ -909,8 +857,6 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
case GX_TEVREG2:
outReg = "tevreg2.a";
break;
default:
Log.report(LOG_FATAL, FMT_STRING("invalid alphaOp outReg {}"), stage.alphaOp.outReg);
}
std::string op = fmt::format(
FMT_STRING("(({4}mix({0}, {1}, {2}) + {3}){5}){6}"), alpha_arg_reg(stage.alphaPass.a, idx, config, stage),
@ -1029,7 +975,7 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
attn = max(0.0, cos_attn / dist_attn);)"""));
} else if (cc.attnFn == GX_AF_SPEC) {
diffFn = GX_DF_NONE;
Log.report(LOG_FATAL, FMT_STRING("AF_SPEC unimplemented"));
FATAL("AF_SPEC unimplemented");
}
if (diffFn == GX_DF_NONE) {
lightDiffFn = "1.0";
@ -1116,10 +1062,8 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
vtxXfrAttrs += fmt::format(FMT_STRING("\n var tc{} = vec4<f32>(in_pos, 1.0);"), i);
} else if (tcg.src == GX_TG_NRM) {
vtxXfrAttrs += fmt::format(FMT_STRING("\n var tc{} = vec4<f32>(in_nrm, 1.0);"), i);
} else {
Log.report(LOG_FATAL, FMT_STRING("unhandled tcg src {} for "), tcg.src);
unreachable();
}
} else
UNLIKELY FATAL("unhandled tcg src {}", static_cast<int>(tcg.src));
if (tcg.mtx == GX_IDENTITY) {
vtxXfrAttrs += fmt::format(FMT_STRING("\n var tc{0}_tmp = tc{0}.xyz;"), i);
} else {
@ -1152,6 +1096,7 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
std::string_view suffix;
if (!is_palette_format(texConfig.copyFmt)) {
switch (texConfig.loadFmt) {
DEFAULT_FATAL("unimplemented palette format {}", static_cast<int>(texConfig.loadFmt));
case GX_TF_C4:
suffix = "I4"sv;
break;
@ -1161,9 +1106,6 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
// case GX_TF_C14X2:
// suffix = "I14X2";
// break;
default:
Log.report(LOG_FATAL, FMT_STRING("Unsupported palette format {}"), texConfig.loadFmt);
unreachable();
}
}
fragmentFnPre +=
@ -1179,15 +1121,13 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
for (int i = 0; i < info.usesTexMtx.size(); ++i) {
if (info.usesTexMtx.test(i)) {
switch (info.texMtxTypes[i]) {
DEFAULT_FATAL("unhandled tex mtx type {}", static_cast<int>(info.texMtxTypes[i]));
case GX_TG_MTX2x4:
uniBufAttrs += fmt::format(FMT_STRING("\n texmtx{}: mat4x2<f32>,"), i);
break;
case GX_TG_MTX3x4:
uniBufAttrs += fmt::format(FMT_STRING("\n texmtx{}: mat4x3<f32>,"), i);
break;
default:
Log.report(LOG_FATAL, FMT_STRING("unhandled tex mtx type {}"), info.texMtxTypes[i]);
unreachable();
}
}
}
@ -1210,6 +1150,7 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
fragmentFn += "\n // Fog\n var fogF = clamp((ubuf.fog.a / (ubuf.fog.b - in.pos.z)) - ubuf.fog.c, 0.0, 1.0);";
switch (config.fogType) {
DEFAULT_FATAL("invalid fog type {}", static_cast<int>(config.fogType));
case GX_FOG_PERSP_LIN:
case GX_FOG_ORTHO_LIN:
fragmentFn += "\n var fogZ = fogF;";
@ -1232,9 +1173,6 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
"\n fogF = 1.0 - fogF;"
"\n var fogZ = exp2(-8.0 * fogF * fogF);";
break;
default:
Log.report(LOG_FATAL, FMT_STRING("invalid fog type {}"), config.fogType);
unreachable();
}
fragmentFn += "\n prev = vec4<f32>(mix(prev.rgb, ubuf.fog.color.rgb, clamp(fogZ, 0.0, 1.0)), prev.a);";
}
@ -1274,6 +1212,7 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
if (comp0Valid || comp1Valid) {
fragmentFn += "\n // Alpha compare";
switch (config.alphaCompare.op) {
DEFAULT_FATAL("invalid alpha compare op {}", static_cast<int>(config.alphaCompare.op));
case GX_AOP_AND:
fragmentFn += fmt::format(FMT_STRING("\n if (!({} && {})) {{ discard; }}"), comp0, comp1);
break;
@ -1286,9 +1225,6 @@ wgpu::ShaderModule build_shader(const ShaderConfig& config, const ShaderInfo& in
case GX_AOP_XNOR:
fragmentFn += fmt::format(FMT_STRING("\n if (({} ^^ {})) {{ discard; }}"), comp0, comp1);
break;
default:
Log.report(LOG_FATAL, FMT_STRING("invalid alpha compare op {}"), config.alphaCompare.op);
unreachable();
}
}
}

View File

@ -66,11 +66,14 @@ static u32 prepare_vtx_buffer(ByteBuffer& buf, GXVtxFmt vtxfmt, const u8* ptr, u
for (int attr = 0; attr < GX_VA_MAX_ATTR; attr++) {
const auto& attrFmt = g_gxState.vtxFmts[vtxfmt].attrs[attr];
switch (g_gxState.vtxDesc[attr]) {
DEFAULT_FATAL("unhandled attribute type {}", static_cast<int>(g_gxState.vtxDesc[attr]));
case GX_NONE:
break;
case GX_DIRECT:
#define COMBINE(val1, val2, val3) (((val1) << 16) | ((val2) << 8) | (val3))
switch (COMBINE(attr, attrFmt.cnt, attrFmt.type)) {
DEFAULT_FATAL("not handled: attr {}, cnt {}, type {}", static_cast<int>(attr), static_cast<int>(attrFmt.cnt),
static_cast<int>(attrFmt.type));
case COMBINE(GX_VA_POS, GX_POS_XYZ, GX_F32):
case COMBINE(GX_VA_NRM, GX_NRM_XYZ, GX_F32):
attrArrays[attr].count = 3;
@ -118,9 +121,6 @@ static u32 prepare_vtx_buffer(ByteBuffer& buf, GXVtxFmt vtxfmt, const u8* ptr, u
vtxSize += 4;
outVtxSize += 16;
break;
default:
Log.report(LOG_FATAL, FMT_STRING("not handled: attr {}, cnt {}, type {}"), attr, attrFmt.cnt, attrFmt.type);
break;
}
#undef COMBINE
break;
@ -134,8 +134,6 @@ static u32 prepare_vtx_buffer(ByteBuffer& buf, GXVtxFmt vtxfmt, const u8* ptr, u
outVtxSize += 2;
indexedAttrs[attr] = true;
break;
default:
Log.report(LOG_FATAL, FMT_STRING("unhandled attribute type {}"), g_gxState.vtxDesc[attr]);
}
}
// Align to 4
@ -263,9 +261,8 @@ static u16 prepare_idx_buffer(ByteBuffer& buf, GXPrimitive prim, u16 vtxStart, u
}
numIndices += 3;
}
} else {
Log.report(LOG_FATAL, FMT_STRING("Unsupported primitive type {}"), static_cast<u32>(prim));
}
} else
UNLIKELY FATAL("unsupported primitive type {}", static_cast<u32>(prim));
return numIndices;
}
@ -293,6 +290,7 @@ void queue_surface(const u8* dlStart, u32 dlSize) noexcept {
u8 opcode = cmd & GX_OPCODE_MASK;
switch (opcode) {
DEFAULT_FATAL("unimplemented opcode: {}", opcode);
case GX_NOP:
continue;
case GX_LOAD_BP_REG:
@ -315,10 +313,7 @@ void queue_surface(const u8* dlStart, u32 dlSize) noexcept {
case GX_DRAW_LINES:
case GX_DRAW_LINE_STRIP:
case GX_DRAW_POINTS:
Log.report(LOG_FATAL, FMT_STRING("unimplemented prim type: {}"), opcode);
break;
default:
Log.report(LOG_FATAL, FMT_STRING("unimplemented opcode: {}"), opcode);
FATAL("unimplemented prim type: {}", opcode);
break;
}
}
@ -415,6 +410,7 @@ wgpu::RenderPipeline create_pipeline(const State& state, [[maybe_unused]] const
}
const auto attr = static_cast<GXAttr>(i);
switch (attr) {
DEFAULT_FATAL("unhandled direct attr {}", i);
case GX_VA_POS:
case GX_VA_NRM:
vtxAttrs[shaderLocation] = wgpu::VertexAttribute{
@ -448,8 +444,6 @@ wgpu::RenderPipeline create_pipeline(const State& state, [[maybe_unused]] const
};
offset += 8;
break;
default:
Log.report(LOG_FATAL, FMT_STRING("unhandled direct attr {}"), i);
}
++shaderLocation;
}

View File

@ -21,6 +21,7 @@ struct TextureFormatInfo {
};
static TextureFormatInfo format_info(wgpu::TextureFormat format) {
switch (format) {
DEFAULT_FATAL("unimplemented texture format {}", magic_enum::enum_name(format));
case wgpu::TextureFormat::R8Unorm:
return {1, 1, 1, false};
case wgpu::TextureFormat::R16Sint:
@ -30,9 +31,6 @@ static TextureFormatInfo format_info(wgpu::TextureFormat format) {
return {1, 1, 4, false};
case wgpu::TextureFormat::BC1RGBAUnorm:
return {4, 4, 8, true};
default:
Log.report(LOG_FATAL, FMT_STRING("format_info: unimplemented format {}"), magic_enum::enum_name(format));
unreachable();
}
}
static wgpu::Extent3D physical_size(wgpu::Extent3D size, TextureFormatInfo info) {
@ -67,11 +65,8 @@ TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mi
const uint32_t heightBlocks = physicalSize.height / info.blockHeight;
const uint32_t bytesPerRow = widthBlocks * info.blockSize;
const uint32_t dataSize = bytesPerRow * heightBlocks * mipSize.depthOrArrayLayers;
if (offset + dataSize > data.size()) {
Log.report(LOG_FATAL, FMT_STRING("new_static_texture_2d[{}]: expected at least {} bytes, got {}"), label,
CHECK(offset + dataSize <= data.size(), "new_static_texture_2d[{}]: expected at least {} bytes, got {}", label,
offset + dataSize, data.size());
unreachable();
}
const wgpu::ImageCopyTexture dstView{
.texture = ref.texture,
.mipLevel = mip,
@ -176,11 +171,8 @@ void write_texture(const TextureRef& ref, ArrayRef<uint8_t> data) noexcept {
const uint32_t heightBlocks = physicalSize.height / info.blockHeight;
const uint32_t bytesPerRow = widthBlocks * info.blockSize;
const uint32_t dataSize = bytesPerRow * heightBlocks * mipSize.depthOrArrayLayers;
if (offset + dataSize > data.size()) {
Log.report(LOG_FATAL, FMT_STRING("write_texture: expected at least {} bytes, got {}"), offset + dataSize,
CHECK(offset + dataSize <= data.size(), "write_texture: expected at least {} bytes, got {}", offset + dataSize,
data.size());
unreachable();
}
// auto dstView = wgpu::ImageCopyTexture{
// .texture = ref.texture,
// .mipLevel = mip,

View File

@ -35,8 +35,6 @@ struct TextureRef {
, isRenderTexture(isRenderTexture) {}
};
using TextureHandle = std::shared_ptr<TextureRef>;
TextureHandle new_static_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format, ArrayRef<uint8_t> data,
const char* label) noexcept;
TextureHandle new_dynamic_texture_2d(uint32_t width, uint32_t height, uint32_t mips, u32 format,

View File

@ -569,9 +569,7 @@ ByteBuffer BuildRGBA8FromCMPR(uint32_t width, uint32_t height, uint32_t mips, Ar
ByteBuffer convert_texture(u32 format, uint32_t width, uint32_t height, uint32_t mips, ArrayRef<uint8_t> data) {
switch (format) {
default:
Log.report(LOG_FATAL, FMT_STRING("convert_texture: unknown format supplied {}"), format);
unreachable();
DEFAULT_FATAL("convert_texture: unknown texture format {}", format);
case GX_TF_R8_PC:
case GX_TF_RGBA8_PC:
return {}; // No conversion
@ -588,8 +586,7 @@ ByteBuffer convert_texture(u32 format, uint32_t width, uint32_t height, uint32_t
case GX_TF_C8:
return BuildC8FromGCN(width, height, mips, data);
case GX_TF_C14X2:
Log.report(LOG_FATAL, FMT_STRING("convert_texture: C14X2 unimplemented"));
unreachable();
FATAL("convert_texture: C14X2 unimplemented");
case GX_TF_RGB565:
return BuildRGB565FromGCN(width, height, mips, data);
case GX_TF_RGB5A3:

View File

@ -32,6 +32,29 @@ using namespace std::string_view_literals;
#define ALIGN(x, a) (((x) + ((a)-1)) & ~((a)-1))
#endif
#if !defined(__has_cpp_attribute)
#define __has_cpp_attribute(name) 0
#endif
#if __has_cpp_attribute(unlikely)
#define UNLIKELY [[unlikely]]
#else
#define UNLIKELY
#endif
#define FATAL(msg, ...) \
{ \
Log.report(LOG_FATAL, FMT_STRING(msg), ##__VA_ARGS__); \
unreachable(); \
}
#define ASSERT(cond, msg, ...) \
if (!(cond)) \
UNLIKELY FATAL(msg, ##__VA_ARGS__)
#ifdef NDEBUG
#define CHECK
#else
#define CHECK(cond, msg, ...) ASSERT(cond, msg, ##__VA_ARGS__)
#endif
#define DEFAULT_FATAL(msg, ...) UNLIKELY default: FATAL(msg, ##__VA_ARGS__)
namespace aurora {
extern AuroraConfig g_config;
@ -72,8 +95,6 @@ public:
template <size_t N>
constexpr ArrayRef(const std::array<T, N>& arr) : ptr(arr.data()), length(arr.size()) {}
ArrayRef(const std::vector<T>& vec) : ptr(vec.data()), length(vec.size()) {}
// template <size_t N>
// ArrayRef(const rstl::reserved_vector<T, N>& vec) : ptr(vec.data()), length(vec.size()) {}
const T* data() const { return ptr; }
size_t size() const { return length; }

View File

@ -268,8 +268,7 @@ void create_copy_bind_group() {
}
static void error_callback(WGPUErrorType type, char const* message, void* userdata) {
Log.report(LOG_FATAL, FMT_STRING("WebGPU error {}: {}"), magic_enum::enum_name(static_cast<WGPUErrorType>(type)),
message);
FATAL("WebGPU error {}: {}", static_cast<int>(type), message);
}
#ifndef WEBGPU_DAWN
@ -378,9 +377,7 @@ bool initialize(AuroraBackend auroraBackend) {
.label = "Surface",
};
g_surface = wgpu::Surface::Acquire(wgpuInstanceCreateSurface(g_instance.Get(), &surfaceDescriptor));
if (!g_surface) {
Log.report(LOG_FATAL, FMT_STRING("Failed to initialize surface"));
}
ASSERT(g_surface, "Failed to initialize surface");
const WGPURequestAdapterOptions options{
.compatibleSurface = g_surface.Get(),
.powerPreference = WGPUPowerPreference_HighPerformance,

View File

@ -2,3 +2,8 @@
#ifdef EMSCRIPTEN
#include <emscripten.h>
#endif
static inline bool operator==(const wgpu::Extent3D& lhs, const wgpu::Extent3D& rhs) {
return lhs.width == rhs.width && lhs.height == rhs.height && lhs.depthOrArrayLayers == rhs.depthOrArrayLayers;
}
static inline bool operator!=(const wgpu::Extent3D& lhs, const wgpu::Extent3D& rhs) { return !(lhs == rhs); }

View File

@ -109,10 +109,7 @@ static void set_window_icon() noexcept {
}
auto* iconSurface = SDL_CreateRGBSurfaceFrom(g_config.iconRGBA8, g_config.iconWidth, g_config.iconHeight, 32,
4 * g_config.iconWidth, 0x000000ff, 0x0000ff00, 0x00ff0000, 0xff000000);
if (iconSurface == nullptr) {
Log.report(LOG_FATAL, FMT_STRING("Failed to create icon surface: {}"), SDL_GetError());
unreachable();
}
ASSERT(iconSurface != nullptr, "Failed to create icon surface: {}", SDL_GetError());
SDL_SetWindowIcon(g_window, iconSurface);
SDL_FreeSurface(iconSurface);
}
@ -197,10 +194,7 @@ void show_window() {
}
bool initialize() {
if (SDL_Init(SDL_INIT_EVERYTHING & ~SDL_INIT_HAPTIC) < 0) {
Log.report(LOG_FATAL, FMT_STRING("Error initializing SDL: {}"), SDL_GetError());
unreachable();
}
ASSERT(SDL_Init(SDL_INIT_EVERYTHING & ~SDL_INIT_HAPTIC) == 0, "Error initializing SDL: {}", SDL_GetError());
#if !defined(_WIN32) && !defined(__APPLE__)
SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");