GX Backend work

This commit is contained in:
Jack Andersen 2015-10-13 16:16:21 -10:00
parent 9a47c3dd6d
commit 2148bc2459
6 changed files with 654 additions and 81 deletions

View File

@ -1,16 +1,22 @@
#ifndef HECLBACKEND_HPP
#define HECLBACKEND_HPP
#include "HECL/Frontend.hpp"
namespace HECL
{
namespace Frontend {struct IR;}
namespace Backend
{
using IR = Frontend::IR;
using Diagnostics = Frontend::Diagnostics;
using SourceLocation = Frontend::SourceLocation;
using ArithmeticOp = IR::Instruction::ArithmeticOpType;
class IBackend
{
public:
virtual void reset(const Frontend::IR& ir)=0;
virtual void reset(const IR& ir, Diagnostics& diag)=0;
};
}

View File

@ -2,7 +2,6 @@
#define HECLBACKEND_GX_HPP
#include "Backend.hpp"
#include "HECL/Frontend.hpp"
#include <Athena/Types.hpp>
#include <stdint.h>
#include <stdlib.h>
@ -185,25 +184,37 @@ struct GX : IBackend
struct TexCoordGen
{
TexGenSrc m_src;
TexMtx m_mtx;
TexGenSrc m_src = TG_TEX0;
TexMtx m_mtx = IDENTITY;
/* Not actually part of GX, but a way to relate out-of-band
* texmtx animation parameters */
std::string m_gameFunction;
std::vector<atVec4f> m_gameArgs;
};
unsigned m_tcgCount = 0;
TexCoordGen m_tcgs;
TexCoordGen m_tcgs[8];
unsigned m_texMtxCount = 0;
TexCoordGen* m_texMtxRefs[8];
struct TEVStage
{
TevOp m_op;
TevColorArg m_color[4];
TevAlphaArg m_alpha[4];
TevKColorSel m_kColor;
TevKAlphaSel m_kAlpha;
TevRegID m_regOut;
unsigned m_texMapIdx;
unsigned m_texGenIdx;
TevOp m_op = TEV_ADD;
TevColorArg m_color[4] = {CC_ZERO, CC_ZERO, CC_ZERO, CC_CPREV};
TevAlphaArg m_alpha[4] = {CA_ZERO, CA_ZERO, CA_ZERO, CA_APREV};
TevKColorSel m_kColor = TEV_KCSEL_1;
TevKAlphaSel m_kAlpha = TEV_KASEL_1;
TevRegID m_regOut = TEVPREV;
int m_texMapIdx = -1;
int m_texGenIdx = -1;
/* Convenience Links */
TEVStage* m_prev = nullptr;
TEVStage* m_next = nullptr;
};
unsigned m_tevCount = 0;
TEVStage m_tevs;
TEVStage m_tevs[16];
enum BlendFactor
{
@ -232,10 +243,19 @@ struct GX : IBackend
color[0] = uint8_t(std::min(std::max(vec.vec[0] * 255.f, 0.f), 255.f));
color[1] = uint8_t(std::min(std::max(vec.vec[1] * 255.f, 0.f), 255.f));
color[2] = uint8_t(std::min(std::max(vec.vec[2] * 255.f, 0.f), 255.f));
color[3] = uint8_t(std::min(std::max(vec.vec[3] * 255.f, 0.f), 255.f));
color[3] = 0;
return *this;
}
Color& operator=(uint8_t val)
{
color[0] = val;
color[1] = val;
color[2] = val;
color[3] = val;
return *this;
}
Color(const atVec4f& vec) {*this = vec;}
Color(uint8_t val) {*this = val;}
bool operator==(const Color& other) const {return num == other.num;}
bool operator!=(const Color& other) const {return num != other.num;}
uint8_t operator[](size_t idx) const {return color[idx];}
@ -244,12 +264,48 @@ struct GX : IBackend
unsigned m_kcolorCount = 0;
Color m_kcolors[4];
void reset(const Frontend::IR& ir);
int m_alphaTraceStage = -1;
void reset(const IR& ir, Diagnostics& diag);
private:
unsigned addKColor(const Color& color);
void RecursiveTraceColor(const Frontend::IR::Instruction& inst);
void RecursiveTraceAlpha(const Frontend::IR::Instruction& inst);
struct TraceResult
{
enum
{
TraceInvalid,
TraceTEVStage,
TraceTEVColorArg,
TraceTEVAlphaArg,
TraceTEVKColorSel,
TraceTEVKAlphaSel
} type;
union
{
GX::TEVStage* tevStage;
GX::TevColorArg tevColorArg;
GX::TevAlphaArg tevAlphaArg;
GX::TevKColorSel tevKColorSel;
GX::TevKAlphaSel tevKAlphaSel;
};
TraceResult() : type(TraceInvalid) {}
TraceResult(GX::TEVStage* stage) : type(TraceTEVStage), tevStage(stage) {}
TraceResult(GX::TevColorArg arg) : type(TraceTEVColorArg), tevColorArg(arg) {}
TraceResult(GX::TevAlphaArg arg) : type(TraceTEVAlphaArg), tevAlphaArg(arg) {}
TraceResult(GX::TevKColorSel arg) : type(TraceTEVKColorSel), tevKColorSel(arg) {}
TraceResult(GX::TevKAlphaSel arg) : type(TraceTEVKAlphaSel), tevKAlphaSel(arg) {}
};
unsigned addKColor(Diagnostics& diag, const SourceLocation& loc, const Color& color);
unsigned addKAlpha(Diagnostics& diag, const SourceLocation& loc, float alpha);
unsigned addTexCoordGen(Diagnostics& diag, const SourceLocation& loc,
TexGenSrc src, TexMtx mtx);
TEVStage& addTEVStage(Diagnostics& diag, const SourceLocation& loc);
TraceResult RecursiveTraceColor(const IR& ir, Diagnostics& diag,
const IR::Instruction& inst);
TraceResult RecursiveTraceAlpha(const IR& ir, Diagnostics& diag,
const IR::Instruction& inst);
unsigned RecursiveTraceTexGen(const IR& ir, Diagnostics& diag, const IR::Instruction& inst, TexMtx mtx);
};
}

View File

@ -5,6 +5,7 @@
#include <vector>
#include <forward_list>
#include <Athena/Types.hpp>
#include <HECL/HECL.hpp>
namespace HECL
{
@ -26,6 +27,8 @@ public:
void setName(const std::string& name) {m_name = name;}
void reportParserErr(const SourceLocation& l, const char* format, ...);
void reportLexerErr(const SourceLocation& l, const char* format, ...);
void reportCompileErr(const SourceLocation& l, const char* format, ...);
void reportBackendErr(const SourceLocation& l, const char* format, ...);
};
class Parser
@ -87,6 +90,7 @@ struct IR
{
OpType m_op = OpNone;
RegID m_target = -1;
SourceLocation m_loc;
struct
{
@ -120,7 +124,50 @@ struct IR
size_t m_instIdx;
} m_swizzle;
Instruction(OpType type) : m_op(type) {}
Instruction(OpType type, const SourceLocation& loc) : m_op(type), m_loc(loc) {}
ssize_t getChildCount() const
{
switch (m_op)
{
case OpCall:
return m_call.m_argInstIdxs.size();
case OpArithmetic:
return 2;
case OpSwizzle:
return 1;
default:
LogModule.report(LogVisor::FatalError, "invalid op type");
}
return -1;
}
const IR::Instruction& getChildInst(const IR& ir, size_t idx) const
{
switch (m_op)
{
case OpCall:
return ir.m_instructions.at(m_call.m_argInstIdxs.at(idx));
case OpArithmetic:
if (idx > 1)
LogModule.report(LogVisor::FatalError, "arithmetic child idx must be 0 or 1");
return ir.m_instructions.at(m_arithmetic.m_instIdxs[idx]);
case OpSwizzle:
if (idx > 0)
LogModule.report(LogVisor::FatalError, "swizzle child idx must be 0");
return ir.m_instructions.at(m_swizzle.m_instIdx);
default:
LogModule.report(LogVisor::FatalError, "invalid op type");
}
return *this;
}
const atVec4f& getImmVec() const
{
if (m_op != OpLoadImm)
LogModule.report(LogVisor::FatalError, "invalid op type");
return m_loadImm.m_immVec;
}
};
size_t m_regCount = 0;
@ -151,15 +198,15 @@ class Lexer
OperationNode* m_root = nullptr;
/* Helper for relinking operator precedence */
static void ReconnectArithmetic(OperationNode* sn, OperationNode** lastSub, OperationNode** newSub);
void ReconnectArithmetic(OperationNode* sn, OperationNode** lastSub, OperationNode** newSub) const;
/* Recursive IR compile funcs */
static void RecursiveFuncCompile(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target);
static void RecursiveGroupCompile(IR& ir, const Lexer::OperationNode* groupNode, IR::RegID target);
static void EmitVec3(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target);
static void EmitVec4(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target);
static void EmitArithmetic(IR& ir, const Lexer::OperationNode* arithNode, IR::RegID target);
static void EmitVectorSwizzle(IR& ir, const Lexer::OperationNode* swizNode, IR::RegID target);
void RecursiveFuncCompile(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target) const;
void RecursiveGroupCompile(IR& ir, const Lexer::OperationNode* groupNode, IR::RegID target) const;
void EmitVec3(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target) const;
void EmitVec4(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target) const;
void EmitArithmetic(IR& ir, const Lexer::OperationNode* arithNode, IR::RegID target) const;
void EmitVectorSwizzle(IR& ir, const Lexer::OperationNode* swizNode, IR::RegID target) const;
public:
void reset();
@ -183,6 +230,8 @@ public:
return m_lexer.compileIR();
}
Diagnostics& getDiagnostics() {return m_diag;}
Frontend() : m_parser(m_diag), m_lexer(m_diag) {}
};

View File

@ -1,54 +1,471 @@
#include <LogVisor/LogVisor.hpp>
#include "HECL/Backend/GX.hpp"
static LogVisor::LogModule Log("HECL::GX");
#include <map>
namespace HECL
{
namespace Backend
{
unsigned GX::addKColor(const Color& color)
unsigned GX::addKColor(Diagnostics& diag, const SourceLocation& loc, const Color& color)
{
for (unsigned i=0 ; i<m_kcolorCount ; ++i)
if (m_kcolors[i] == color)
return i;
if (m_kcolorCount >= 4)
Log.report(LogVisor::FatalError, "GX KColor overflow");
diag.reportBackendErr(loc, "GX KColor overflow");
m_kcolors[m_kcolorCount] = color;
return m_kcolorCount++;
}
void GX::RecursiveTraceColor(const Frontend::IR::Instruction& inst)
unsigned GX::addKAlpha(Diagnostics& diag, const SourceLocation& loc, float alpha)
{
uint8_t ai = uint8_t(std::min(std::max(alpha * 255.f, 0.f), 255.f));
for (unsigned i=0 ; i<m_kcolorCount ; ++i)
{
if (m_kcolors[i].color[3] == ai)
return i;
else if (m_kcolors[i].color[3] == 0)
{
m_kcolors[i].color[3] = ai;
return i;
}
}
if (m_kcolorCount >= 4)
diag.reportBackendErr(loc, "GX KColor overflow");
m_kcolors[m_kcolorCount] = ai;
return m_kcolorCount++;
}
unsigned GX::addTexCoordGen(Diagnostics& diag, const SourceLocation& loc,
TexGenSrc src, TexMtx mtx)
{
for (unsigned i=0 ; i<m_tcgCount ; ++i)
{
TexCoordGen& tcg = m_tcgs[i];
if (tcg.m_src == src && tcg.m_mtx == mtx)
return i;
}
if (m_tcgCount >= 8)
diag.reportBackendErr(loc, "GX TexCoordGen overflow");
GX::TexCoordGen& newTcg = m_tcgs[m_tcgCount];
newTcg.m_src = src;
newTcg.m_mtx = mtx;
return m_tcgCount++;
}
GX::TEVStage& GX::addTEVStage(Diagnostics& diag, const SourceLocation& loc)
{
if (m_tevCount >= 16)
diag.reportBackendErr(loc, "GX TEV stage overflow");
GX::TEVStage& newTEV = m_tevs[m_tevCount];
if (m_tevCount)
{
newTEV.m_prev = &m_tevs[m_tevCount-1];
newTEV.m_prev->m_next = &newTEV;
}
++m_tevCount;
return newTEV;
}
unsigned GX::RecursiveTraceTexGen(const IR& ir, Diagnostics& diag, const IR::Instruction& inst, TexMtx mtx)
{
if (inst.m_op != IR::OpCall)
diag.reportBackendErr(inst.m_loc, "TexCoordGen resolution requires function");
const std::string& tcgName = inst.m_call.m_name;
if (!tcgName.compare("UV"))
{
if (inst.getChildCount() < 1)
diag.reportBackendErr(inst.m_loc, "TexCoordGen UV(layerIdx) requires one argument");
const IR::Instruction& idxInst = inst.getChildInst(ir, 0);
const atVec4f& idxImm = idxInst.getImmVec();
return addTexCoordGen(diag, inst.m_loc, TexGenSrc(TG_TEX0 + unsigned(idxImm.vec[0])), mtx);
}
else if (!tcgName.compare("Normal"))
return addTexCoordGen(diag, inst.m_loc, TG_NRM, mtx);
else if (!tcgName.compare("View"))
return addTexCoordGen(diag, inst.m_loc, TG_POS, mtx);
/* Otherwise treat as game-specific function */
const IR::Instruction& tcgSrcInst = inst.getChildInst(ir, 0);
unsigned idx = RecursiveTraceTexGen(ir, diag, tcgSrcInst, TexMtx(TEXMTX0 + m_texMtxCount * 3));
GX::TexCoordGen& tcg = m_tcgs[idx];
m_texMtxRefs[m_texMtxCount] = &tcg;
++m_texMtxCount;
tcg.m_gameFunction = tcgName;
tcg.m_gameArgs.clear();
for (ssize_t i=1 ; i<inst.getChildCount() ; ++i)
{
const IR::Instruction& ci = inst.getChildInst(ir, i);
tcg.m_gameArgs.push_back(ci.getImmVec());
}
return idx;
}
GX::TraceResult GX::RecursiveTraceColor(const IR& ir, Diagnostics& diag, const IR::Instruction& inst)
{
switch (inst.m_op)
{
case Frontend::IR::OpCall:
case IR::OpCall:
{
if (!inst.m_call.m_name.compare("texture"))
const std::string& name = inst.m_call.m_name;
if (!name.compare("Texture"))
{
TEVStage& newStage = addTEVStage(diag, inst.m_loc);
if (inst.getChildCount() < 2)
diag.reportBackendErr(inst.m_loc, "Texture(map, texgen) requires 2 arguments");
const IR::Instruction& mapInst = inst.getChildInst(ir, 0);
const atVec4f& mapImm = mapInst.getImmVec();
newStage.m_texMapIdx = unsigned(mapImm.vec[0]);
newStage.m_color[0] = CC_TEXC;
const IR::Instruction& tcgInst = inst.getChildInst(ir, 1);
newStage.m_texGenIdx = RecursiveTraceTexGen(ir, diag, tcgInst, IDENTITY);
return TraceResult(&newStage);
}
else if (!name.compare("ColorReg"))
{
const IR::Instruction& idxInst = inst.getChildInst(ir, 0);
unsigned idx = unsigned(idxInst.getImmVec().vec[0]);
return TraceResult(GX::TevColorArg(CC_C0 + idx * 2));
}
else if (!name.compare("Lighting"))
{
return TraceResult(CC_RASC);
}
else
diag.reportBackendErr(inst.m_loc, "GX backend unable to interpret '%s'", name.c_str());
break;
}
case IR::OpLoadImm:
{
const atVec4f& vec = inst.m_loadImm.m_immVec;
if (vec.vec[0] == 0.f && vec.vec[1] == 0.f && vec.vec[2] == 0.f)
return TraceResult(CC_ZERO);
else if (vec.vec[0] == 1.f && vec.vec[1] == 1.f && vec.vec[2] == 1.f)
return TraceResult(CC_ONE);
unsigned idx = addKColor(diag, inst.m_loc, vec);
return TraceResult(TevKColorSel(TEV_KCSEL_K0 + idx));
}
case IR::OpArithmetic:
{
ArithmeticOp op = inst.m_arithmetic.m_op;
const IR::Instruction& aInst = inst.getChildInst(ir, 0);
TraceResult aTrace = RecursiveTraceColor(ir, diag, aInst);
const IR::Instruction& bInst = inst.getChildInst(ir, 1);
TraceResult bTrace = RecursiveTraceColor(ir, diag, bInst);
switch (op)
{
case ArithmeticOp::ArithmeticOpAdd:
{
if (aTrace.type == TraceResult::TraceTEVStage &&
bTrace.type == TraceResult::TraceTEVStage)
{
TEVStage* a = aTrace.tevStage;
TEVStage* b = bTrace.tevStage;
if (b->m_prev != a)
diag.reportBackendErr(inst.m_loc, "TEV stages must have monotonic progression");
return TraceResult(b);
}
break;
}
case ArithmeticOp::ArithmeticOpSubtract:
{
if (aTrace.type == TraceResult::TraceTEVStage &&
bTrace.type == TraceResult::TraceTEVStage)
{
TEVStage* a = aTrace.tevStage;
TEVStage* b = bTrace.tevStage;
if (b->m_prev != a)
diag.reportBackendErr(inst.m_loc, "TEV stages must have monotonic progression");
b->m_op = TEV_SUB;
return TraceResult(b);
}
break;
}
case ArithmeticOp::ArithmeticOpMultiply:
{
if (aTrace.type == TraceResult::TraceTEVStage &&
bTrace.type == TraceResult::TraceTEVStage)
{
TEVStage* a = aTrace.tevStage;
TEVStage* b = bTrace.tevStage;
if (b->m_prev != a)
diag.reportBackendErr(inst.m_loc, "TEV stages must have monotonic progression");
if (a->m_color[2] != CC_ZERO)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine");
b->m_color[1] = b->m_color[0];
b->m_color[0] = CC_ZERO;
b->m_color[2] = CC_CPREV;
b->m_color[3] = CC_ZERO;
return TraceResult(b);
}
else if (aTrace.type == TraceResult::TraceTEVStage &&
bTrace.type == TraceResult::TraceTEVColorArg)
{
TEVStage* a = aTrace.tevStage;
if (a->m_color[1] != CC_ZERO)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine");
a->m_color[1] = a->m_color[0];
a->m_color[0] = CC_ZERO;
a->m_color[2] = bTrace.tevColorArg;
return TraceResult(a);
}
else if (aTrace.type == TraceResult::TraceTEVColorArg &&
bTrace.type == TraceResult::TraceTEVStage)
{
TEVStage* b = bTrace.tevStage;
if (b->m_color[1] != CC_ZERO)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine");
b->m_color[1] = b->m_color[0];
b->m_color[0] = CC_ZERO;
b->m_color[2] = bTrace.tevColorArg;
return TraceResult(b);
}
else if (aTrace.type == TraceResult::TraceTEVStage &&
bTrace.type == TraceResult::TraceTEVKColorSel)
{
TEVStage* a = aTrace.tevStage;
if (a->m_kColor != TEV_KCSEL_1)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for KColor combine");
if (a->m_color[1] != CC_ZERO)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine");
a->m_color[1] = a->m_color[0];
a->m_color[0] = CC_ZERO;
a->m_color[2] = CC_KONST;
a->m_kColor = bTrace.tevKColorSel;
return TraceResult(a);
}
else if (aTrace.type == TraceResult::TraceTEVKColorSel &&
bTrace.type == TraceResult::TraceTEVStage)
{
TEVStage* b = bTrace.tevStage;
if (b->m_kColor != TEV_KCSEL_1)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for KColor combine");
if (b->m_color[1] != CC_ZERO)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine");
b->m_color[1] = b->m_color[0];
b->m_color[0] = CC_ZERO;
b->m_color[2] = CC_KONST;
b->m_kColor = aTrace.tevKColorSel;
return TraceResult(b);
}
break;
}
case Frontend::IR::OpArithmetic:
default:
Log.report(LogVisor::FatalError, "invalid color op");
diag.reportBackendErr(inst.m_loc, "invalid arithmetic op");
}
diag.reportBackendErr(inst.m_loc, "unable to convert arithmetic to TEV stage");
}
default:
diag.reportBackendErr(inst.m_loc, "invalid color op");
}
return TraceResult();
}
void GX::RecursiveTraceAlpha(const Frontend::IR::Instruction& inst)
GX::TraceResult GX::RecursiveTraceAlpha(const IR& ir, Diagnostics& diag, const IR::Instruction& inst)
{
switch (inst.m_op)
{
case IR::OpCall:
{
const std::string& name = inst.m_call.m_name;
if (!name.compare("Texture"))
{
if (inst.getChildCount() < 2)
diag.reportBackendErr(inst.m_loc, "Texture(map, texgen) requires 2 arguments");
const IR::Instruction& mapInst = inst.getChildInst(ir, 0);
const atVec4f& mapImm = mapInst.getImmVec();
unsigned mapIdx = unsigned(mapImm.vec[0]);
int foundStage = -1;
for (int i=0 ; i<m_tevCount ; ++i)
{
TEVStage& testStage = m_tevs[i];
if (testStage.m_texMapIdx == mapIdx && i > m_alphaTraceStage)
{
foundStage = i;
break;
}
}
if (foundStage >= 0)
{
m_alphaTraceStage = foundStage;
TEVStage& stage = m_tevs[foundStage];
stage.m_alpha[0] = CA_TEXA;
return TraceResult(&stage);
}
TEVStage& newStage = addTEVStage(diag, inst.m_loc);
newStage.m_texMapIdx = mapIdx;
newStage.m_alpha[0] = CA_TEXA;
const IR::Instruction& tcgInst = inst.getChildInst(ir, 1);
newStage.m_texGenIdx = RecursiveTraceTexGen(ir, diag, tcgInst, IDENTITY);
return TraceResult(&newStage);
}
else if (!name.compare("ColorReg"))
{
const IR::Instruction& idxInst = inst.getChildInst(ir, 0);
unsigned idx = unsigned(idxInst.getImmVec().vec[0]);
return TraceResult(GX::TevAlphaArg(CA_A0 + idx));
}
else if (!name.compare("Lighting"))
{
return TraceResult(CA_RASA);
}
else
diag.reportBackendErr(inst.m_loc, "GX backend unable to interpret '%s'", name.c_str());
break;
}
case IR::OpLoadImm:
{
const atVec4f& vec = inst.m_loadImm.m_immVec;
if (vec.vec[0] == 0.f)
return TraceResult(CA_ZERO);
unsigned idx = addKAlpha(diag, inst.m_loc, vec.vec[0]);
return TraceResult(TevKAlphaSel(TEV_KASEL_K0_A + idx));
}
case IR::OpArithmetic:
{
ArithmeticOp op = inst.m_arithmetic.m_op;
const IR::Instruction& aInst = inst.getChildInst(ir, 0);
TraceResult aTrace = RecursiveTraceAlpha(ir, diag, aInst);
const IR::Instruction& bInst = inst.getChildInst(ir, 1);
TraceResult bTrace = RecursiveTraceAlpha(ir, diag, bInst);
switch (op)
{
case ArithmeticOp::ArithmeticOpAdd:
{
if (aTrace.type == TraceResult::TraceTEVStage &&
bTrace.type == TraceResult::TraceTEVStage)
{
TEVStage* a = aTrace.tevStage;
TEVStage* b = bTrace.tevStage;
if (b->m_prev != a)
diag.reportBackendErr(inst.m_loc, "TEV stages must have monotonic progression");
return TraceResult(b);
}
break;
}
case ArithmeticOp::ArithmeticOpSubtract:
{
if (aTrace.type == TraceResult::TraceTEVStage &&
bTrace.type == TraceResult::TraceTEVStage)
{
TEVStage* a = aTrace.tevStage;
TEVStage* b = bTrace.tevStage;
if (b->m_prev != a)
diag.reportBackendErr(inst.m_loc, "TEV stages must have monotonic progression");
if (b->m_op != TEV_SUB)
diag.reportBackendErr(inst.m_loc, "unable to integrate alpha subtraction into stage chain");
return TraceResult(b);
}
break;
}
case ArithmeticOp::ArithmeticOpMultiply:
{
if (aTrace.type == TraceResult::TraceTEVStage &&
bTrace.type == TraceResult::TraceTEVStage)
{
TEVStage* a = aTrace.tevStage;
TEVStage* b = bTrace.tevStage;
if (b->m_prev != a)
diag.reportBackendErr(inst.m_loc, "TEV stages must have monotonic progression");
if (a->m_alpha[2] != CA_ZERO)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine");
b->m_alpha[1] = b->m_alpha[0];
b->m_alpha[0] = CA_ZERO;
b->m_alpha[2] = CA_APREV;
b->m_alpha[3] = CA_ZERO;
return TraceResult(b);
}
else if (aTrace.type == TraceResult::TraceTEVStage &&
bTrace.type == TraceResult::TraceTEVColorArg)
{
TEVStage* a = aTrace.tevStage;
if (a->m_alpha[1] != CA_ZERO)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine");
a->m_alpha[1] = a->m_alpha[0];
a->m_alpha[0] = CA_ZERO;
a->m_alpha[2] = bTrace.tevAlphaArg;
return TraceResult(a);
}
else if (aTrace.type == TraceResult::TraceTEVColorArg &&
bTrace.type == TraceResult::TraceTEVStage)
{
TEVStage* b = bTrace.tevStage;
if (b->m_alpha[1] != CA_ZERO)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine");
b->m_alpha[1] = b->m_alpha[0];
b->m_alpha[0] = CA_ZERO;
b->m_alpha[2] = bTrace.tevAlphaArg;
return TraceResult(b);
}
else if (aTrace.type == TraceResult::TraceTEVStage &&
bTrace.type == TraceResult::TraceTEVKColorSel)
{
TEVStage* a = aTrace.tevStage;
if (a->m_kAlpha != TEV_KASEL_1)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for KAlpha combine");
if (a->m_alpha[1] != CA_ZERO)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine");
a->m_alpha[1] = a->m_alpha[0];
a->m_alpha[0] = CA_ZERO;
a->m_alpha[2] = CA_KONST;
a->m_kAlpha = bTrace.tevKAlphaSel;
return TraceResult(a);
}
else if (aTrace.type == TraceResult::TraceTEVKColorSel &&
bTrace.type == TraceResult::TraceTEVStage)
{
TEVStage* b = bTrace.tevStage;
if (b->m_kAlpha != TEV_KASEL_1)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for KAlpha combine");
if (b->m_alpha[1] != CA_ZERO)
diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine");
b->m_alpha[1] = b->m_alpha[0];
b->m_alpha[0] = CA_ZERO;
b->m_alpha[2] = CA_KONST;
b->m_kAlpha = aTrace.tevKAlphaSel;
return TraceResult(b);
}
break;
}
default:
diag.reportBackendErr(inst.m_loc, "invalid arithmetic op");
}
diag.reportBackendErr(inst.m_loc, "unable to convert arithmetic to TEV stage");
}
default:
diag.reportBackendErr(inst.m_loc, "invalid alpha op");
}
return TraceResult();
}
void GX::reset(const Frontend::IR& ir)
void GX::reset(const IR& ir, Diagnostics& diag)
{
m_tevCount = 0;
m_tcgCount = 0;
m_texMtxCount = 0;
m_kcolorCount = 0;
m_alphaTraceStage = -1;
/* Final instruction is the root call by hecl convention */
const Frontend::IR::Instruction& rootCall = ir.m_instructions.back();
const IR::Instruction& rootCall = ir.m_instructions.back();
bool doAlpha = false;
if (!rootCall.m_call.m_name.compare("HECLOpaque"))
{
@ -69,16 +486,16 @@ void GX::reset(const Frontend::IR& ir)
}
/* Follow Color Chain */
const Frontend::IR::Instruction& colorRoot =
const IR::Instruction& colorRoot =
ir.m_instructions.at(rootCall.m_call.m_argInstIdxs.at(0));
RecursiveTraceColor(colorRoot);
RecursiveTraceColor(ir, diag, colorRoot);
/* Follow Alpha Chain */
if (doAlpha)
{
const Frontend::IR::Instruction& alphaRoot =
const IR::Instruction& alphaRoot =
ir.m_instructions.at(rootCall.m_call.m_argInstIdxs.at(1));
RecursiveTraceAlpha(alphaRoot);
RecursiveTraceAlpha(ir, diag, alphaRoot);
}
}

View File

@ -61,5 +61,49 @@ void Diagnostics::reportLexerErr(const SourceLocation& l, const char* fmt, ...)
free(result);
}
void Diagnostics::reportCompileErr(const SourceLocation& l, const char* fmt, ...)
{
va_list ap;
va_start(ap, fmt);
char* result = nullptr;
#ifdef _WIN32
int length = _vscprintf(fmt, ap);
result = (char*)malloc(length);
vsnprintf(result, length, fmt, ap);
#else
vasprintf(&result, fmt, ap);
#endif
va_end(ap);
if (LogVisor::XtermColor)
LogModule.report(LogVisor::FatalError, RED "Error compiling" NORMAL " '%s' " YELLOW "@%d:%d " NORMAL "\n%s",
m_name.c_str(), l.line, l.col, result);
else
LogModule.report(LogVisor::FatalError, "Error compiling '%s' @%d:%d\n%s",
m_name.c_str(), l.line, l.col, result);
free(result);
}
void Diagnostics::reportBackendErr(const SourceLocation& l, const char* fmt, ...)
{
va_list ap;
va_start(ap, fmt);
char* result = nullptr;
#ifdef _WIN32
int length = _vscprintf(fmt, ap);
result = (char*)malloc(length);
vsnprintf(result, length, fmt, ap);
#else
vasprintf(&result, fmt, ap);
#endif
va_end(ap);
if (LogVisor::XtermColor)
LogModule.report(LogVisor::FatalError, RED "Backend error" NORMAL " in '%s' " YELLOW "@%d:%d " NORMAL "\n%s",
m_name.c_str(), l.line, l.col, result);
else
LogModule.report(LogVisor::FatalError, "Backend error in '%s' @%d:%d\n%s",
m_name.c_str(), l.line, l.col, result);
free(result);
}
}
}

View File

@ -25,7 +25,7 @@ static IR::Instruction::ArithmeticOpType ArithType(int aChar)
}
}
void Lexer::ReconnectArithmetic(OperationNode* sn, OperationNode** lastSub, OperationNode** newSub)
void Lexer::ReconnectArithmetic(OperationNode* sn, OperationNode** lastSub, OperationNode** newSub) const
{
sn->m_sub = sn->m_prev;
sn->m_prev = nullptr;
@ -314,13 +314,13 @@ void Lexer::consumeAllTokens(Parser& parser)
m_root = firstNode->m_next;
}
void Lexer::EmitVec3(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target)
void Lexer::EmitVec3(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target) const
{
/* Optimization case: if empty call, emit zero imm load */
const Lexer::OperationNode* gn = funcNode->m_sub;
if (!gn)
{
ir.m_instructions.emplace_back(IR::OpLoadImm);
ir.m_instructions.emplace_back(IR::OpLoadImm, funcNode->m_tok.m_location);
ir.m_instructions.back().m_loadImm.m_immVec = {};
return;
}
@ -340,7 +340,7 @@ void Lexer::EmitVec3(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID tar
}
if (opt)
{
ir.m_instructions.emplace_back(IR::OpLoadImm);
ir.m_instructions.emplace_back(IR::OpLoadImm, funcNode->m_tok.m_location);
atVec4f& vec = ir.m_instructions.back().m_loadImm.m_immVec;
vec.vec[0] = imms[0]->m_tokenFloat;
vec.vec[1] = imms[1]->m_tokenFloat;
@ -353,13 +353,13 @@ void Lexer::EmitVec3(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID tar
RecursiveFuncCompile(ir, funcNode, target);
}
void Lexer::EmitVec4(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target)
void Lexer::EmitVec4(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target) const
{
/* Optimization case: if empty call, emit zero imm load */
const Lexer::OperationNode* gn = funcNode->m_sub;
if (!gn)
{
ir.m_instructions.emplace_back(IR::OpLoadImm);
ir.m_instructions.emplace_back(IR::OpLoadImm, funcNode->m_tok.m_location);
ir.m_instructions.back().m_loadImm.m_immVec = {};
return;
}
@ -379,7 +379,7 @@ void Lexer::EmitVec4(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID tar
}
if (opt)
{
ir.m_instructions.emplace_back(IR::OpLoadImm);
ir.m_instructions.emplace_back(IR::OpLoadImm, funcNode->m_tok.m_location);
atVec4f& vec = ir.m_instructions.back().m_loadImm.m_immVec;
vec.vec[0] = imms[0]->m_tokenFloat;
vec.vec[1] = imms[1]->m_tokenFloat;
@ -392,7 +392,7 @@ void Lexer::EmitVec4(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID tar
RecursiveFuncCompile(ir, funcNode, target);
}
void Lexer::EmitArithmetic(IR& ir, const Lexer::OperationNode* arithNode, IR::RegID target)
void Lexer::EmitArithmetic(IR& ir, const Lexer::OperationNode* arithNode, IR::RegID target) const
{
/* Evaluate operands */
atVec4f* opt[2] = {nullptr};
@ -418,7 +418,7 @@ void Lexer::EmitArithmetic(IR& ir, const Lexer::OperationNode* arithNode, IR::Re
break;
case Parser::TokenNumLiteral:
{
ir.m_instructions.emplace_back(IR::OpLoadImm);
ir.m_instructions.emplace_back(IR::OpLoadImm, arithNode->m_tok.m_location);
IR::Instruction& inst = ir.m_instructions.back();
inst.m_target = tgt;
inst.m_loadImm.m_immVec.vec[0] = tok.m_tokenFloat;
@ -431,7 +431,7 @@ void Lexer::EmitArithmetic(IR& ir, const Lexer::OperationNode* arithNode, IR::Re
EmitVectorSwizzle(ir, on, tgt);
break;
default:
LogModule.report(LogVisor::FatalError, "invalid lexer node for IR");
m_diag.reportCompileErr(tok.m_location, "invalid lexer node for IR");
break;
};
argInsts[i] = ir.m_instructions.size() - 1;
@ -471,19 +471,19 @@ void Lexer::EmitArithmetic(IR& ir, const Lexer::OperationNode* arithNode, IR::Re
eval.vec[3] = opt[0]->vec[3] / opt[1]->vec[3];
break;
default:
LogModule.report(LogVisor::FatalError, "invalid arithmetic type");
m_diag.reportCompileErr(arithNode->m_tok.m_location, "invalid arithmetic type");
break;
}
ir.m_instructions.pop_back();
ir.m_instructions.pop_back();
ir.m_instructions.emplace_back(IR::OpLoadImm);
ir.m_instructions.emplace_back(IR::OpLoadImm, arithNode->m_tok.m_location);
IR::Instruction& inst = ir.m_instructions.back();
inst.m_target = target;
inst.m_loadImm.m_immVec = eval;
}
else
{
ir.m_instructions.emplace_back(IR::OpArithmetic);
ir.m_instructions.emplace_back(IR::OpArithmetic, arithNode->m_tok.m_location);
IR::Instruction& inst = ir.m_instructions.back();
inst.m_target = target;
inst.m_arithmetic.m_instIdxs[0] = argInsts[0];
@ -494,7 +494,7 @@ void Lexer::EmitArithmetic(IR& ir, const Lexer::OperationNode* arithNode, IR::Re
}
}
static int SwizzleCompIdx(char aChar)
static int SwizzleCompIdx(char aChar, Diagnostics& diag, const SourceLocation& loc)
{
switch (aChar)
{
@ -511,16 +511,16 @@ static int SwizzleCompIdx(char aChar)
case 'a':
return 3;
default:
LogModule.report(LogVisor::FatalError, "invalid swizzle char %c", aChar);
diag.reportCompileErr(loc, "invalid swizzle char %c", aChar);
}
return -1;
}
void Lexer::EmitVectorSwizzle(IR& ir, const Lexer::OperationNode* swizNode, IR::RegID target)
void Lexer::EmitVectorSwizzle(IR& ir, const Lexer::OperationNode* swizNode, IR::RegID target) const
{
const std::string& str = swizNode->m_tok.m_tokenString;
if (str.size() != 1 && str.size() != 3 && str.size() != 4)
LogModule.report(LogVisor::FatalError, "%d component swizzles not supported", int(str.size()));
m_diag.reportCompileErr(swizNode->m_tok.m_location, "%d component swizzles not supported", int(str.size()));
size_t instCount = ir.m_instructions.size();
const Lexer::OperationNode* on = swizNode->m_sub;
@ -540,7 +540,7 @@ void Lexer::EmitVectorSwizzle(IR& ir, const Lexer::OperationNode* swizNode, IR::
break;
case Parser::TokenNumLiteral:
{
ir.m_instructions.emplace_back(IR::OpLoadImm);
ir.m_instructions.emplace_back(IR::OpLoadImm, swizNode->m_tok.m_location);
IR::Instruction& inst = ir.m_instructions.back();
inst.m_target = target;
inst.m_loadImm.m_immVec.vec[0] = tok.m_tokenFloat;
@ -553,7 +553,7 @@ void Lexer::EmitVectorSwizzle(IR& ir, const Lexer::OperationNode* swizNode, IR::
EmitVectorSwizzle(ir, on, target);
break;
default:
LogModule.report(LogVisor::FatalError, "invalid lexer node for IR");
m_diag.reportCompileErr(tok.m_location, "invalid lexer node for IR");
break;
};
@ -561,46 +561,47 @@ void Lexer::EmitVectorSwizzle(IR& ir, const Lexer::OperationNode* swizNode, IR::
if (ir.m_instructions.back().m_op == IR::OpLoadImm && (ir.m_instructions.size() - instCount == 1))
{
atVec4f* opt = &ir.m_instructions.back().m_loadImm.m_immVec;
const SourceLocation& loc = ir.m_instructions.back().m_loc;
atVec4f eval;
switch (str.size())
{
case 1:
eval = {opt->vec[SwizzleCompIdx(str[0])]};
eval = {opt->vec[SwizzleCompIdx(str[0], m_diag, loc)]};
break;
case 3:
eval.vec[0] = opt->vec[SwizzleCompIdx(str[0])];
eval.vec[1] = opt->vec[SwizzleCompIdx(str[1])];
eval.vec[2] = opt->vec[SwizzleCompIdx(str[2])];
eval.vec[0] = opt->vec[SwizzleCompIdx(str[0], m_diag, loc)];
eval.vec[1] = opt->vec[SwizzleCompIdx(str[1], m_diag, loc)];
eval.vec[2] = opt->vec[SwizzleCompIdx(str[2], m_diag, loc)];
eval.vec[3] = 1.0;
break;
case 4:
eval.vec[0] = opt->vec[SwizzleCompIdx(str[0])];
eval.vec[1] = opt->vec[SwizzleCompIdx(str[1])];
eval.vec[2] = opt->vec[SwizzleCompIdx(str[2])];
eval.vec[3] = opt->vec[SwizzleCompIdx(str[3])];
eval.vec[0] = opt->vec[SwizzleCompIdx(str[0], m_diag, loc)];
eval.vec[1] = opt->vec[SwizzleCompIdx(str[1], m_diag, loc)];
eval.vec[2] = opt->vec[SwizzleCompIdx(str[2], m_diag, loc)];
eval.vec[3] = opt->vec[SwizzleCompIdx(str[3], m_diag, loc)];
break;
default:
break;
}
ir.m_instructions.pop_back();
ir.m_instructions.emplace_back(IR::OpLoadImm);
ir.m_instructions.emplace_back(IR::OpLoadImm, swizNode->m_tok.m_location);
IR::Instruction& inst = ir.m_instructions.back();
inst.m_target = target;
inst.m_loadImm.m_immVec = eval;
}
else
{
ir.m_instructions.emplace_back(IR::OpSwizzle);
ir.m_instructions.emplace_back(IR::OpSwizzle, swizNode->m_tok.m_location);
IR::Instruction& inst = ir.m_instructions.back();
inst.m_swizzle.m_instIdx = ir.m_instructions.size() - 2;
inst.m_target = target;
for (int i=0 ; i<str.size() ; ++i)
inst.m_swizzle.m_idxs[i] = SwizzleCompIdx(str[i]);
inst.m_swizzle.m_idxs[i] = SwizzleCompIdx(str[i], m_diag, swizNode->m_tok.m_location);
}
}
void Lexer::RecursiveGroupCompile(IR& ir, const Lexer::OperationNode* groupNode, IR::RegID target)
void Lexer::RecursiveGroupCompile(IR& ir, const Lexer::OperationNode* groupNode, IR::RegID target) const
{
IR::RegID tgt = target;
for (const Lexer::OperationNode* sn = groupNode->m_sub ; sn ; sn = sn->m_next, ++tgt)
@ -621,7 +622,7 @@ void Lexer::RecursiveGroupCompile(IR& ir, const Lexer::OperationNode* groupNode,
break;
case Parser::TokenNumLiteral:
{
ir.m_instructions.emplace_back(IR::OpLoadImm);
ir.m_instructions.emplace_back(IR::OpLoadImm, tok.m_location);
IR::Instruction& inst = ir.m_instructions.back();
inst.m_target = tgt;
inst.m_loadImm.m_immVec.vec[0] = tok.m_tokenFloat;
@ -637,7 +638,7 @@ void Lexer::RecursiveGroupCompile(IR& ir, const Lexer::OperationNode* groupNode,
EmitVectorSwizzle(ir, sn, tgt);
break;
default:
LogModule.report(LogVisor::FatalError, "invalid lexer node for IR");
m_diag.reportCompileErr(tok.m_location, "invalid lexer node for IR");
break;
};
}
@ -645,7 +646,7 @@ void Lexer::RecursiveGroupCompile(IR& ir, const Lexer::OperationNode* groupNode,
ir.m_regCount = tgt;
}
void Lexer::RecursiveFuncCompile(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target)
void Lexer::RecursiveFuncCompile(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target) const
{
IR::RegID tgt = target;
std::vector<size_t> instIdxs;
@ -654,7 +655,7 @@ void Lexer::RecursiveFuncCompile(IR& ir, const Lexer::OperationNode* funcNode, I
RecursiveGroupCompile(ir, gn, tgt);
instIdxs.push_back(ir.m_instructions.size() - 1);
}
ir.m_instructions.emplace_back(IR::OpCall);
ir.m_instructions.emplace_back(IR::OpCall, funcNode->m_tok.m_location);
IR::Instruction& inst = ir.m_instructions.back();
inst.m_call.m_name = funcNode->m_tok.m_tokenString;
inst.m_call.m_argInstIdxs = std::move(instIdxs);
@ -666,7 +667,7 @@ void Lexer::RecursiveFuncCompile(IR& ir, const Lexer::OperationNode* funcNode, I
IR Lexer::compileIR() const
{
if (!m_root)
LogModule.report(LogVisor::FatalError, "unable to compile HECL-IR for invalid source");
m_diag.reportCompileErr(SourceLocation(), "unable to compile HECL-IR for invalid source");
IR ir;
RecursiveFuncCompile(ir, m_root, 0);