diff --git a/hecl/.gitmodules b/hecl/.gitmodules index 027041d3f..96015cd59 100644 --- a/hecl/.gitmodules +++ b/hecl/.gitmodules @@ -7,3 +7,6 @@ [submodule "extern/LogVisor"] path = extern/LogVisor url = https://github.com/AxioDL/LogVisor.git +[submodule "extern/libBoo"] + path = extern/libBoo + url = https://github.com/AxioDL/libBoo.git diff --git a/hecl/CMakeLists.txt b/hecl/CMakeLists.txt index ab70d8da2..fe5dfd0fe 100644 --- a/hecl/CMakeLists.txt +++ b/hecl/CMakeLists.txt @@ -19,9 +19,10 @@ set(LOG_VISOR_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/extern/LogVisor/include) set(LOG_VISOR_INCLUDE_DIR ${LOG_VISOR_INCLUDE_DIR} PARENT_SCOPE) set(SQUISH_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/extern/libSquish) set(SQUISH_INCLUDE_DIR ${SQUISH_INCLUDE_DIR} PARENT_SCOPE) +set(BOO_INCLUDE_DIR extern/libBoo/include) add_subdirectory(bintoc) add_subdirectory(extern) -include_directories(include ${LOG_VISOR_INCLUDE_DIR} ${ATHENA_INCLUDE_DIR}) +include_directories(include ${LOG_VISOR_INCLUDE_DIR} ${ATHENA_INCLUDE_DIR} ${BOO_INCLUDE_DIR}) add_subdirectory(lib) add_subdirectory(blender) add_subdirectory(driver) diff --git a/hecl/extern/CMakeLists.txt b/hecl/extern/CMakeLists.txt index e5f282323..ff35928c8 100644 --- a/hecl/extern/CMakeLists.txt +++ b/hecl/extern/CMakeLists.txt @@ -1,3 +1,9 @@ +#disable libBoo for FreeBSD for the time being +if(NOT ${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD") +add_subdirectory(libBoo) +set(BOO_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/libBoo/include) +endif() + add_subdirectory(libSquish) add_subdirectory(xxhash) if (NOT TARGET LogVisor) diff --git a/hecl/extern/LogVisor b/hecl/extern/LogVisor index 4c2442df2..189e04797 160000 --- a/hecl/extern/LogVisor +++ b/hecl/extern/LogVisor @@ -1 +1 @@ -Subproject commit 4c2442df2d800fc25339d3d301d7d3691da7bafb +Subproject commit 189e047977b138b711259ad84d94471f5d006ffb diff --git a/hecl/extern/libBoo b/hecl/extern/libBoo new file mode 160000 index 000000000..df72fca65 --- /dev/null +++ b/hecl/extern/libBoo @@ -0,0 +1 @@ +Subproject commit df72fca65d3a04a1b28957823e6db956ec68ecff diff --git a/hecl/include/HECL/Backend/GLSL.hpp b/hecl/include/HECL/Backend/GLSL.hpp new file mode 100644 index 000000000..cba7ba020 --- /dev/null +++ b/hecl/include/HECL/Backend/GLSL.hpp @@ -0,0 +1,62 @@ +#ifndef HECLBACKEND_GLSL_HPP +#define HECLBACKEND_GLSL_HPP + +#include "Backend.hpp" +#include +#include +#include +#include +#include + +namespace HECL +{ +namespace Backend +{ + +struct GLSL : IBackend +{ + boo::BlendFactor m_blendSrc; + boo::BlendFactor m_blendDst; + std::string m_vertSource; + std::string m_fragSource; + + size_t m_texSamplingCount = 0; + std::string m_texSamplings; + + enum TexGenSrc + { + TG_POS, + TG_NRM, + TG_UV + }; + + struct TexCoordGen + { + TexGenSrc m_src; + int m_uvIdx = 0; + int m_mtx = -1; + std::string m_gameFunction; + std::vector m_gameArgs; + }; + std::vector m_tcgs; + std::vector m_texMtxRefs; + + void reset(const IR& ir, Diagnostics& diag); + +private: + unsigned addTexCoordGen(Diagnostics& diag, const SourceLocation& loc, + TexGenSrc src, int uvIdx, int mtx); + std::string RecursiveTraceColor(const IR& ir, Diagnostics& diag, + const IR::Instruction& inst, + bool swizzleAlpha=false); + std::string RecursiveTraceAlpha(const IR& ir, Diagnostics& diag, + const IR::Instruction& inst); + unsigned RecursiveTraceTexGen(const IR& ir, Diagnostics& diag, + const IR::Instruction& inst, + int mtx); +}; + +} +} + +#endif // HECLBACKEND_GLSL_HPP diff --git a/hecl/include/HECL/Backend/GX.hpp b/hecl/include/HECL/Backend/GX.hpp index 74bacd828..356d11e26 100644 --- a/hecl/include/HECL/Backend/GX.hpp +++ b/hecl/include/HECL/Backend/GX.hpp @@ -550,10 +550,6 @@ private: unsigned addTexCoordGen(Diagnostics& diag, const SourceLocation& loc, TexGenSrc src, TexMtx mtx); TEVStage& addTEVStage(Diagnostics& diag, const SourceLocation& loc); - void PreTraceColor(const IR& ir, Diagnostics& diag, - const IR::Instruction& inst); - void PreTraceAlpha(const IR& ir, Diagnostics& diag, - const IR::Instruction& inst); TraceResult RecursiveTraceColor(const IR& ir, Diagnostics& diag, const IR::Instruction& inst, bool swizzleAlpha=false); diff --git a/hecl/include/HECL/Frontend.hpp b/hecl/include/HECL/Frontend.hpp index c601efb67..da221970a 100644 --- a/hecl/include/HECL/Frontend.hpp +++ b/hecl/include/HECL/Frontend.hpp @@ -5,6 +5,7 @@ #include #include #include +#include #include namespace HECL @@ -111,9 +112,13 @@ public: Parser(Diagnostics& diag) : m_diag(diag) {} }; -struct IR +using BigDNA = Athena::io::DNA; + +struct IR : BigDNA { - enum OpType + Delete _d; + + enum OpType : uint8_t { OpNone, /**< NOP */ OpCall, /**< Deferred function insertion for HECL backend using specified I/O regs */ @@ -122,26 +127,31 @@ struct IR OpSwizzle /**< Vector insertion/extraction/swizzling operation */ }; - using RegID = int; + using RegID = atUint16; - struct Instruction + struct Instruction : BigDNA { + Delete _d; + OpType m_op = OpNone; - RegID m_target = -1; + RegID m_target = RegID(-1); SourceLocation m_loc; - struct + struct Call : BigDNA { - std::string m_name; - std::vector m_argInstIdxs; + DECL_DNA + String<-1> m_name; + Value m_argInstCount; + Vector m_argInstIdxs; } m_call; - struct + struct LoadImm : BigDNA { - atVec4f m_immVec; + DECL_DNA + Value m_immVec; } m_loadImm; - enum ArithmeticOpType + enum ArithmeticOpType : uint8_t { ArithmeticOpNone, ArithmeticOpAdd, @@ -150,16 +160,18 @@ struct IR ArithmeticOpDivide }; - struct + struct Arithmetic : BigDNA { - ArithmeticOpType m_op = ArithmeticOpNone; - size_t m_instIdxs[2]; + DECL_DNA + Value m_op = ArithmeticOpNone; + Value m_instIdxs[2]; } m_arithmetic; - struct + struct Swizzle : BigDNA { - int m_idxs[4] = {-1, -1, -1, -1}; - size_t m_instIdx; + DECL_DNA + Value m_idxs[4] = {-1, -1, -1, -1}; + Value m_instIdx; } m_swizzle; Instruction(OpType type, const SourceLocation& loc) : m_op(type), m_loc(loc) {} @@ -206,10 +218,104 @@ struct IR LogModule.report(LogVisor::FatalError, "invalid op type"); return m_loadImm.m_immVec; } + + void read(Athena::io::IStreamReader& reader) + { + m_op = OpType(reader.readUByte()); + m_target = reader.readUint16Big(); + switch (m_op) + { + default: break; + case OpCall: + m_call.read(reader); + break; + case OpLoadImm: + m_loadImm.read(reader); + break; + case OpArithmetic: + m_arithmetic.read(reader); + break; + case OpSwizzle: + m_swizzle.read(reader); + break; + } + } + + void write(Athena::io::IStreamWriter& writer) const + { + writer.writeUByte(m_op); + writer.writeUint16Big(m_target); + switch (m_op) + { + default: break; + case OpCall: + m_call.write(writer); + break; + case OpLoadImm: + m_loadImm.write(writer); + break; + case OpArithmetic: + m_arithmetic.write(writer); + break; + case OpSwizzle: + m_swizzle.write(writer); + break; + } + } + + size_t binarySize(size_t sz) const + { + sz += 3; + switch (m_op) + { + default: break; + case OpCall: + sz = m_call.binarySize(sz); + break; + case OpLoadImm: + sz = m_loadImm.binarySize(sz); + break; + case OpArithmetic: + sz = m_arithmetic.binarySize(sz); + break; + case OpSwizzle: + sz = m_swizzle.binarySize(sz); + break; + } + return sz; + } + + Instruction(Athena::io::IStreamReader& reader) {read(reader);} }; - size_t m_regCount = 0; + atUint16 m_regCount = 0; std::vector m_instructions; + + void read(Athena::io::IStreamReader& reader) + { + m_regCount = reader.readUint16Big(); + atUint16 instCount = reader.readUint16Big(); + m_instructions.clear(); + m_instructions.reserve(instCount); + for (atUint16 i=0 ; i + +namespace HECL +{ +namespace Backend +{ + +unsigned GLSL::addTexCoordGen(Diagnostics&, const SourceLocation&, + TexGenSrc src, int uvIdx, int mtx) +{ + for (unsigned i=0 ; i) + + const IR::Instruction& tcgInst = inst.getChildInst(ir, 1); + unsigned texGenIdx = RecursiveTraceTexGen(ir, diag, tcgInst, IDENTITY); + + return TraceResult(&newStage); + } + else if (!name.compare("ColorReg")) + { + const IR::Instruction& idxInst = inst.getChildInst(ir, 0); + unsigned idx = unsigned(idxInst.getImmVec().vec[0]); + if (swizzleAlpha) + m_aRegMask |= 1 << idx; + else + m_cRegMask |= 1 << idx; + return TraceResult(TevColorArg((swizzleAlpha ? CC_A0 : CC_C0) + idx * 2)); + } + else if (!name.compare("Lighting")) + { + return TraceResult(swizzleAlpha ? CC_RASA : CC_RASC); + } + else + diag.reportBackendErr(inst.m_loc, "GX backend unable to interpret '%s'", name.c_str()); + break; + } + case IR::OpLoadImm: + { + const atVec4f& vec = inst.m_loadImm.m_immVec; + if (vec.vec[0] == 0.f && vec.vec[1] == 0.f && vec.vec[2] == 0.f) + return TraceResult(CC_ZERO); + else if (vec.vec[0] == 1.f && vec.vec[1] == 1.f && vec.vec[2] == 1.f) + return TraceResult(CC_ONE); + unsigned idx = addKColor(diag, inst.m_loc, vec); + return TraceResult(TevKColorSel(TEV_KCSEL_K0 + idx)); + } + case IR::OpArithmetic: + { + ArithmeticOp op = inst.m_arithmetic.m_op; + const IR::Instruction& aInst = inst.getChildInst(ir, 0); + const IR::Instruction& bInst = inst.getChildInst(ir, 1); + TraceResult aTrace; + TraceResult bTrace; + if (aInst.m_op != IR::OpArithmetic && bInst.m_op == IR::OpArithmetic) + { + bTrace = RecursiveTraceColor(ir, diag, bInst); + aTrace = RecursiveTraceColor(ir, diag, aInst); + } + else + { + aTrace = RecursiveTraceColor(ir, diag, aInst); + bTrace = RecursiveTraceColor(ir, diag, bInst); + } + if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVStage && + getStageIdx(aTrace.tevStage) > getStageIdx(bTrace.tevStage)) + { + TraceResult tmp = aTrace; + aTrace = bTrace; + bTrace = tmp; + } + + TevKColorSel newKColor = TEV_KCSEL_1; + if (aTrace.type == TraceResult::TraceTEVKColorSel && + bTrace.type == TraceResult::TraceTEVKColorSel) + diag.reportBackendErr(inst.m_loc, "unable to handle 2 KColors in one stage"); + else if (aTrace.type == TraceResult::TraceTEVKColorSel) + { + newKColor = aTrace.tevKColorSel; + aTrace.type = TraceResult::TraceTEVColorArg; + aTrace.tevColorArg = CC_KONST; + } + else if (bTrace.type == TraceResult::TraceTEVKColorSel) + { + newKColor = bTrace.tevKColorSel; + bTrace.type = TraceResult::TraceTEVColorArg; + bTrace.tevColorArg = CC_KONST; + } + + switch (op) + { + case ArithmeticOp::ArithmeticOpAdd: + { + if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVStage) + { + TEVStage* a = aTrace.tevStage; + TEVStage* b = bTrace.tevStage; + if (b->m_prev != a) + { + a->m_cRegOut = TEVLAZY; + b->m_color[3] = CC_LAZY; + b->m_lazyCInIdx = m_cRegLazy; + a->m_lazyCOutIdx = m_cRegLazy++; + } + else if (b == &m_tevs[m_tevCount-1] && + a->m_texMapIdx == b->m_texMapIdx && a->m_texGenIdx == b->m_texGenIdx && + a->m_color[3] == CC_ZERO && b->m_color[0] != CC_ZERO) + { + a->m_color[3] = b->m_color[0]; + --m_tevCount; + return TraceResult(a); + } + else + b->m_color[3] = CC_CPREV; + return TraceResult(b); + } + else if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVColorArg) + { + TEVStage* a = aTrace.tevStage; + if (a->m_color[3] != CC_ZERO) + diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for add combine"); + a->m_color[3] = bTrace.tevColorArg; + a->m_kColor = newKColor; + return TraceResult(a); + } + else if (aTrace.type == TraceResult::TraceTEVColorArg && + bTrace.type == TraceResult::TraceTEVStage) + { + TEVStage* b = bTrace.tevStage; + if (b->m_color[3] != CC_ZERO) + diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for add combine"); + b->m_color[3] = aTrace.tevColorArg; + b->m_kColor = newKColor; + return TraceResult(b); + } + break; + } + case ArithmeticOp::ArithmeticOpSubtract: + { + if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVStage) + { + TEVStage* a = aTrace.tevStage; + TEVStage* b = bTrace.tevStage; + if (b->m_prev != a) + { + a->m_cRegOut = TEVLAZY; + b->m_color[3] = CC_LAZY; + b->m_lazyCInIdx = m_cRegLazy; + a->m_lazyCOutIdx = m_cRegLazy++; + } + else + b->m_color[3] = CC_CPREV; + b->m_cop = TEV_SUB; + return TraceResult(b); + } + else if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVColorArg) + { + TEVStage* a = aTrace.tevStage; + if (a->m_color[3] != CC_ZERO) + diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for subtract combine"); + a->m_color[3] = bTrace.tevColorArg; + a->m_kColor = newKColor; + a->m_cop = TEV_SUB; + return TraceResult(a); + } + break; + } + case ArithmeticOp::ArithmeticOpMultiply: + { + if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVStage) + { + TEVStage* a = aTrace.tevStage; + TEVStage* b = bTrace.tevStage; + if (b->m_color[2] != CC_ZERO) + diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine"); + if (b->m_prev != a) + { + a->m_cRegOut = TEVLAZY; + b->m_color[2] = CC_LAZY; + b->m_lazyCInIdx = m_cRegLazy; + a->m_lazyCOutIdx = m_cRegLazy++; + } + else + b->m_color[2] = CC_CPREV; + b->m_color[1] = b->m_color[0]; + b->m_color[0] = CC_ZERO; + b->m_color[3] = CC_ZERO; + return TraceResult(b); + } + else if (aTrace.type == TraceResult::TraceTEVColorArg && + bTrace.type == TraceResult::TraceTEVColorArg) + { + TEVStage& stage = addTEVStage(diag, inst.m_loc); + stage.m_color[1] = aTrace.tevColorArg; + stage.m_color[2] = bTrace.tevColorArg; + stage.m_kColor = newKColor; + return TraceResult(&stage); + } + else if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVColorArg) + { + TEVStage* a = aTrace.tevStage; + if (a->m_color[1] != CC_ZERO) + { + if (a->m_cRegOut != TEVPREV) + diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine"); + TEVStage& stage = addTEVStage(diag, inst.m_loc); + stage.m_color[1] = CC_CPREV; + stage.m_color[2] = bTrace.tevColorArg; + stage.m_kColor = newKColor; + return TraceResult(&stage); + } + a->m_color[1] = a->m_color[0]; + a->m_color[0] = CC_ZERO; + a->m_color[2] = bTrace.tevColorArg; + a->m_kColor = newKColor; + return TraceResult(a); + } + else if (aTrace.type == TraceResult::TraceTEVColorArg && + bTrace.type == TraceResult::TraceTEVStage) + { + TEVStage* b = bTrace.tevStage; + if (b->m_color[1] != CC_ZERO) + { + if (b->m_cRegOut != TEVPREV) + diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine"); + TEVStage& stage = addTEVStage(diag, inst.m_loc); + stage.m_color[1] = aTrace.tevColorArg; + stage.m_color[2] = CC_CPREV; + stage.m_kColor = newKColor; + return TraceResult(&stage); + } + b->m_color[1] = b->m_color[0]; + b->m_color[0] = CC_ZERO; + b->m_color[2] = bTrace.tevColorArg; + b->m_kColor = newKColor; + return TraceResult(b); + } + break; + } + default: + diag.reportBackendErr(inst.m_loc, "invalid arithmetic op"); + } + + diag.reportBackendErr(inst.m_loc, "unable to convert arithmetic to TEV stage"); + } + case IR::OpSwizzle: + { + if (inst.m_swizzle.m_idxs[0] == 3 && inst.m_swizzle.m_idxs[1] == 3 && + inst.m_swizzle.m_idxs[2] == 3 && inst.m_swizzle.m_idxs[3] == -1) + { + const IR::Instruction& cInst = inst.getChildInst(ir, 0); + if (cInst.m_op != IR::OpCall) + diag.reportBackendErr(inst.m_loc, "only functions accepted for alpha swizzle"); + return RecursiveTraceColor(ir, diag, cInst, true); + } + else + diag.reportBackendErr(inst.m_loc, "only alpha extract may be performed with swizzle operation"); + } + default: + diag.reportBackendErr(inst.m_loc, "invalid color op"); + } + + return TraceResult(); +} + +std::string GLSL::RecursiveTraceAlpha(const IR& ir, Diagnostics& diag, const IR::Instruction& inst) +{ + switch (inst.m_op) + { + case IR::OpCall: + { + const std::string& name = inst.m_call.m_name; + if (!name.compare("Texture")) + { + if (inst.getChildCount() < 2) + diag.reportBackendErr(inst.m_loc, "Texture(map, texgen) requires 2 arguments"); + + const IR::Instruction& mapInst = inst.getChildInst(ir, 0); + const atVec4f& mapImm = mapInst.getImmVec(); + unsigned mapIdx = unsigned(mapImm.vec[0]); + + int foundStage = -1; + for (int i=0 ; i m_alphaTraceStage) + { + foundStage = i; + break; + } + } + + if (foundStage >= 0) + { + m_alphaTraceStage = foundStage; + TEVStage& stage = m_tevs[foundStage]; + stage.m_alpha[0] = CA_TEXA; + return TraceResult(&stage); + } + + TEVStage& newStage = addTEVStage(diag, inst.m_loc); + newStage.m_color[3] = CC_CPREV; + + newStage.m_texMapIdx = mapIdx; + newStage.m_alpha[0] = CA_TEXA; + + const IR::Instruction& tcgInst = inst.getChildInst(ir, 1); + newStage.m_texGenIdx = RecursiveTraceTexGen(ir, diag, tcgInst, IDENTITY); + + return TraceResult(&newStage); + } + else if (!name.compare("ColorReg")) + { + const IR::Instruction& idxInst = inst.getChildInst(ir, 0); + unsigned idx = unsigned(idxInst.getImmVec().vec[0]); + m_aRegMask |= 1 << idx; + return TraceResult(TevAlphaArg(CA_A0 + idx)); + } + else if (!name.compare("Lighting")) + { + return TraceResult(CA_RASA); + } + else + diag.reportBackendErr(inst.m_loc, "GX backend unable to interpret '%s'", name.c_str()); + break; + } + case IR::OpLoadImm: + { + const atVec4f& vec = inst.m_loadImm.m_immVec; + if (vec.vec[0] == 0.f) + return TraceResult(CA_ZERO); + unsigned idx = addKAlpha(diag, inst.m_loc, vec.vec[0]); + return TraceResult(TevKAlphaSel(TEV_KASEL_K0_A + idx)); + } + case IR::OpArithmetic: + { + ArithmeticOp op = inst.m_arithmetic.m_op; + const IR::Instruction& aInst = inst.getChildInst(ir, 0); + const IR::Instruction& bInst = inst.getChildInst(ir, 1); + TraceResult aTrace; + TraceResult bTrace; + if (aInst.m_op != IR::OpArithmetic && bInst.m_op == IR::OpArithmetic) + { + bTrace = RecursiveTraceAlpha(ir, diag, bInst); + aTrace = RecursiveTraceAlpha(ir, diag, aInst); + } + else + { + aTrace = RecursiveTraceAlpha(ir, diag, aInst); + bTrace = RecursiveTraceAlpha(ir, diag, bInst); + } + + TevKAlphaSel newKAlpha = TEV_KASEL_1; + if (aTrace.type == TraceResult::TraceTEVKAlphaSel && + bTrace.type == TraceResult::TraceTEVKAlphaSel) + diag.reportBackendErr(inst.m_loc, "unable to handle 2 KAlphas in one stage"); + else if (aTrace.type == TraceResult::TraceTEVKAlphaSel) + { + newKAlpha = aTrace.tevKAlphaSel; + aTrace.type = TraceResult::TraceTEVAlphaArg; + aTrace.tevAlphaArg = CA_KONST; + } + else if (bTrace.type == TraceResult::TraceTEVKAlphaSel) + { + newKAlpha = bTrace.tevKAlphaSel; + bTrace.type = TraceResult::TraceTEVAlphaArg; + bTrace.tevAlphaArg = CA_KONST; + } + + switch (op) + { + case ArithmeticOp::ArithmeticOpAdd: + { + if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVStage) + { + TEVStage* a = aTrace.tevStage; + TEVStage* b = bTrace.tevStage; + if (b->m_prev != a) + { + a->m_aRegOut = TEVLAZY; + b->m_alpha[3] = CA_LAZY; + if (a->m_lazyAOutIdx != -1) + b->m_lazyAInIdx = a->m_lazyAOutIdx; + else + { + b->m_lazyAInIdx = m_aRegLazy; + a->m_lazyAOutIdx = m_aRegLazy++; + } + } + else + b->m_alpha[3] = CA_APREV; + return TraceResult(b); + } + else if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVAlphaArg) + { + TEVStage* a = aTrace.tevStage; + if (a->m_alpha[3] != CA_ZERO) + diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for add combine"); + a->m_alpha[3] = bTrace.tevAlphaArg; + a->m_kAlpha = newKAlpha; + return TraceResult(a); + } + else if (aTrace.type == TraceResult::TraceTEVAlphaArg && + bTrace.type == TraceResult::TraceTEVStage) + { + TEVStage* b = bTrace.tevStage; + if (b->m_alpha[3] != CA_ZERO) + diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for add combine"); + b->m_alpha[3] = aTrace.tevAlphaArg; + b->m_kAlpha = newKAlpha; + return TraceResult(b); + } + break; + } + case ArithmeticOp::ArithmeticOpSubtract: + { + if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVStage) + { + TEVStage* a = aTrace.tevStage; + TEVStage* b = bTrace.tevStage; + if (b->m_aop != TEV_SUB) + diag.reportBackendErr(inst.m_loc, "unable to integrate alpha subtraction into stage chain"); + if (b->m_prev != a) + { + a->m_aRegOut = TEVLAZY; + b->m_alpha[3] = CA_LAZY; + if (a->m_lazyAOutIdx != -1) + b->m_lazyAInIdx = a->m_lazyAOutIdx; + else + { + b->m_lazyAInIdx = m_aRegLazy; + a->m_lazyAOutIdx = m_aRegLazy++; + } + } + else + b->m_alpha[3] = CA_APREV; + return TraceResult(b); + } + else if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVAlphaArg) + { + TEVStage* a = aTrace.tevStage; + if (a->m_aop != TEV_SUB) + diag.reportBackendErr(inst.m_loc, "unable to integrate alpha subtraction into stage chain"); + if (a->m_alpha[3] != CA_ZERO) + diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for add combine"); + a->m_alpha[3] = bTrace.tevAlphaArg; + a->m_kAlpha = newKAlpha; + return TraceResult(a); + } + break; + } + case ArithmeticOp::ArithmeticOpMultiply: + { + if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVStage) + { + TEVStage* a = aTrace.tevStage; + TEVStage* b = bTrace.tevStage; + if (b->m_alpha[2] != CA_ZERO) + diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine"); + if (b->m_prev != a) + { + a->m_aRegOut = TEVLAZY; + b->m_alpha[2] = CA_LAZY; + b->m_lazyAInIdx = m_aRegLazy; + a->m_lazyAOutIdx = m_aRegLazy++; + } + else + b->m_alpha[2] = CA_APREV; + b->m_alpha[1] = b->m_alpha[0]; + b->m_alpha[0] = CA_ZERO; + b->m_alpha[3] = CA_ZERO; + return TraceResult(b); + } + else if (aTrace.type == TraceResult::TraceTEVAlphaArg && + bTrace.type == TraceResult::TraceTEVAlphaArg) + { + TEVStage& stage = addTEVStage(diag, inst.m_loc); + stage.m_color[3] = CC_CPREV; + stage.m_alpha[1] = aTrace.tevAlphaArg; + stage.m_alpha[2] = bTrace.tevAlphaArg; + stage.m_kAlpha = newKAlpha; + return TraceResult(&stage); + } + else if (aTrace.type == TraceResult::TraceTEVStage && + bTrace.type == TraceResult::TraceTEVAlphaArg) + { + TEVStage* a = aTrace.tevStage; + if (a->m_alpha[1] != CA_ZERO) + { + if (a->m_aRegOut != TEVPREV) + diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine"); + TEVStage& stage = addTEVStage(diag, inst.m_loc); + stage.m_alpha[1] = CA_APREV; + stage.m_alpha[2] = bTrace.tevAlphaArg; + stage.m_kAlpha = newKAlpha; + return TraceResult(&stage); + } + a->m_alpha[1] = a->m_alpha[0]; + a->m_alpha[0] = CA_ZERO; + a->m_alpha[2] = bTrace.tevAlphaArg; + a->m_kAlpha = newKAlpha; + return TraceResult(a); + } + else if (aTrace.type == TraceResult::TraceTEVAlphaArg && + bTrace.type == TraceResult::TraceTEVStage) + { + TEVStage* b = bTrace.tevStage; + if (b->m_alpha[1] != CA_ZERO) + { + if (b->m_aRegOut != TEVPREV) + diag.reportBackendErr(inst.m_loc, "unable to modify TEV stage for multiply combine"); + TEVStage& stage = addTEVStage(diag, inst.m_loc); + stage.m_alpha[1] = aTrace.tevAlphaArg; + stage.m_alpha[2] = CA_APREV; + stage.m_kAlpha = newKAlpha; + return TraceResult(&stage); + } + b->m_alpha[1] = b->m_alpha[0]; + b->m_alpha[0] = CA_ZERO; + b->m_alpha[2] = bTrace.tevAlphaArg; + b->m_kAlpha = newKAlpha; + return TraceResult(b); + } + break; + } + default: + diag.reportBackendErr(inst.m_loc, "invalid arithmetic op"); + } + + diag.reportBackendErr(inst.m_loc, "unable to convert arithmetic to TEV stage"); + } + case IR::OpSwizzle: + { + if (inst.m_swizzle.m_idxs[0] == 3 && inst.m_swizzle.m_idxs[1] == 3 && + inst.m_swizzle.m_idxs[2] == 3 && inst.m_swizzle.m_idxs[3] == -1) + { + const IR::Instruction& cInst = inst.getChildInst(ir, 0); + if (cInst.m_op != IR::OpCall) + diag.reportBackendErr(inst.m_loc, "only functions accepted for alpha swizzle"); + return RecursiveTraceAlpha(ir, diag, cInst); + } + else + diag.reportBackendErr(inst.m_loc, "only alpha extract may be performed with swizzle operation"); + } + default: + diag.reportBackendErr(inst.m_loc, "invalid alpha op"); + } + + return TraceResult(); +} + +void GLSL::reset(const IR& ir, Diagnostics& diag) +{ + diag.setBackend("GLSL"); + m_vertSource.clear(); + m_fragSource.clear(); + + /* Final instruction is the root call by hecl convention */ + const IR::Instruction& rootCall = ir.m_instructions.back(); + bool doAlpha = false; + if (!rootCall.m_call.m_name.compare("HECLOpaque")) + { + m_blendSrc = boo::BlendFactorOne; + m_blendDst = boo::BlendFactorZero; + } + else if (!rootCall.m_call.m_name.compare("HECLAlpha")) + { + m_blendSrc = boo::BlendFactorSrcAlpha; + m_blendDst = boo::BlendFactorInvSrcAlpha; + doAlpha = true; + } + else if (!rootCall.m_call.m_name.compare("HECLAdditive")) + { + m_blendSrc = boo::BlendFactorSrcAlpha; + m_blendDst = boo::BlendFactorOne; + doAlpha = true; + } + else + { + diag.reportBackendErr(rootCall.m_loc, "GLSL backend doesn't handle '%s' root", + rootCall.m_call.m_name.c_str()); + return; + } + + /* Follow Color Chain */ + const IR::Instruction& colorRoot = + ir.m_instructions.at(rootCall.m_call.m_argInstIdxs.at(0)); + RecursiveTraceColor(ir, diag, colorRoot); + + /* Follow Alpha Chain */ + if (doAlpha) + { + const IR::Instruction& alphaRoot = + ir.m_instructions.at(rootCall.m_call.m_argInstIdxs.at(1)); + RecursiveTraceAlpha(ir, diag, alphaRoot); + } +} + +#endif + +} +} diff --git a/hecl/lib/CMakeLists.txt b/hecl/lib/CMakeLists.txt index 1f1ee79e0..7f820eb5f 100644 --- a/hecl/lib/CMakeLists.txt +++ b/hecl/lib/CMakeLists.txt @@ -7,6 +7,8 @@ if(WIN32) list(APPEND PLAT_SRCS winsupport.cpp ../include/HECL/winsupport.hpp) endif() +atdna(atdna_Frontend.cpp ../include/HECL/Frontend.hpp) + add_library(HECLCommon HECL.cpp ProjectPath.cpp @@ -14,8 +16,10 @@ add_library(HECLCommon ../include/HECL/HECL.hpp ../include/HECL/Backend/Backend.hpp ../include/HECL/Backend/GX.hpp + ../include/HECL/Backend/GLSL.hpp ../include/HECL/Frontend.hpp ../include/HECL/Database.hpp ../include/HECL/Runtime.hpp + atdna_Frontend.cpp ${PLAT_SRCS}) diff --git a/hecl/lib/Frontend/Lexer.cpp b/hecl/lib/Frontend/Lexer.cpp index 928e984c5..bbb3fa82c 100644 --- a/hecl/lib/Frontend/Lexer.cpp +++ b/hecl/lib/Frontend/Lexer.cpp @@ -689,7 +689,7 @@ void Lexer::RecursiveGroupCompile(IR& ir, const Lexer::OperationNode* groupNode, void Lexer::RecursiveFuncCompile(IR& ir, const Lexer::OperationNode* funcNode, IR::RegID target) const { IR::RegID tgt = target; - std::vector instIdxs; + std::vector instIdxs; for (const Lexer::OperationNode* gn = funcNode->m_sub ; gn ; gn = gn->m_next, ++tgt) { RecursiveGroupCompile(ir, gn, tgt);