D3D12: Indirect validation shader name cleanup and parameter merge

Bug: dawn:548
Change-Id: Id2bf9c54e10ba791e5878844a0fe98efd4fc85d6
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87705
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Commit-Queue: Enrico Galli <enrico.galli@intel.com>
Kokoro: Kokoro <noreply+kokoro@google.com>
This commit is contained in:
Enrico Galli 2022-04-25 23:28:24 +00:00 committed by Dawn LUCI CQ
parent 857175e59b
commit b52d740227
12 changed files with 102 additions and 177 deletions

View File

@ -39,7 +39,7 @@ namespace dawn::native {
uint32_t maxDrawCallsPerIndirectValidationBatch,
uint64_t maxBatchOffsetRange,
IndirectDraw draw) {
const uint64_t newOffset = draw.clientBufferOffset;
const uint64_t newOffset = draw.inputBufferOffset;
auto it = mBatches.begin();
while (it != mBatches.end()) {
IndirectValidationBatch& batch = *it;
@ -186,7 +186,7 @@ namespace dawn::native {
}
IndirectDraw draw{};
draw.clientBufferOffset = indirectOffset;
draw.inputBufferOffset = indirectOffset;
draw.cmd = cmd;
it->second.AddIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, draw);
}
@ -205,23 +205,23 @@ namespace dawn::native {
}
IndirectDraw draw{};
draw.clientBufferOffset = indirectOffset;
draw.inputBufferOffset = indirectOffset;
draw.cmd = cmd;
it->second.AddIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, draw);
}
bool IndirectDrawMetadata::IndexedIndirectConfig::operator<(
const IndexedIndirectConfig& other) const {
return std::tie(clientIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
drawType) < std::tie(other.clientIndirectBuffer,
return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
drawType) < std::tie(other.inputIndirectBuffer,
other.numIndexBufferElements,
other.duplicateBaseVertexInstance, other.drawType);
}
bool IndirectDrawMetadata::IndexedIndirectConfig::operator==(
const IndexedIndirectConfig& other) const {
return std::tie(clientIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
drawType) == std::tie(other.clientIndirectBuffer,
return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
drawType) == std::tie(other.inputIndirectBuffer,
other.numIndexBufferElements,
other.duplicateBaseVertexInstance, other.drawType);
}

View File

@ -43,7 +43,7 @@ namespace dawn::native {
class IndirectDrawMetadata : public NonCopyable {
public:
struct IndirectDraw {
uint64_t clientBufferOffset;
uint64_t inputBufferOffset;
// This is a pointer to the command that should be populated with the validated
// indirect scratch buffer. It is only valid up until the encoded command buffer
// is submitted.
@ -97,7 +97,7 @@ namespace dawn::native {
Indexed,
};
struct IndexedIndirectConfig {
BufferBase* clientIndirectBuffer;
BufferBase* inputIndirectBuffer;
uint64_t numIndexBufferElements;
bool duplicateBaseVertexInstance;
DrawType drawType;

View File

@ -78,10 +78,10 @@ namespace dawn::native {
}
@group(0) @binding(0) var<storage, read> batch: BatchInfo;
@group(0) @binding(1) var<storage, read_write> clientParams: IndirectParams;
@group(0) @binding(2) var<storage, write> validatedParams: IndirectParams;
@group(0) @binding(1) var<storage, read_write> inputParams: IndirectParams;
@group(0) @binding(2) var<storage, write> outputParams: IndirectParams;
fn numIndirectParamsPerDrawCallClient() -> u32 {
fn numIndirectParamsPerDrawCallInput() -> u32 {
var numParams = kNumDrawIndirectParams;
// Indexed Draw has an extra parameter (firstIndex)
if (bool(batch.flags & kIndexedDraw)) {
@ -90,8 +90,8 @@ namespace dawn::native {
return numParams;
}
fn numIndirectParamsPerDrawCallValidated() -> u32 {
var numParams = numIndirectParamsPerDrawCallClient();
fn numIndirectParamsPerDrawCallOutput() -> u32 {
var numParams = numIndirectParamsPerDrawCallInput();
// 2 extra parameter for duplicated first/baseVexter and firstInstance
if (bool(batch.flags & kDuplicateBaseVertexInstance)) {
numParams = numParams + 2u;
@ -100,31 +100,31 @@ namespace dawn::native {
}
fn fail(drawIndex: u32) {
let numParams = numIndirectParamsPerDrawCallValidated();
let numParams = numIndirectParamsPerDrawCallOutput();
let index = drawIndex * numParams;
for(var i = 0u; i < numParams; i = i + 1u) {
validatedParams.data[index + i] = 0u;
outputParams.data[index + i] = 0u;
}
}
fn pass(drawIndex: u32) {
let numClientParams = numIndirectParamsPerDrawCallClient();
var vIndex = drawIndex * numIndirectParamsPerDrawCallValidated();
let cIndex = batch.indirectOffsets[drawIndex];
let numInputParams = numIndirectParamsPerDrawCallInput();
var outIndex = drawIndex * numIndirectParamsPerDrawCallOutput();
let inIndex = batch.indirectOffsets[drawIndex];
// The first 2 parameter is reserved for the duplicated first/baseVertex and firstInstance
if (bool(batch.flags & kDuplicateBaseVertexInstance)) {
// first/baseVertex and firstInstance are always last two parameters
let dupIndex = cIndex + numClientParams - 2u;
validatedParams.data[vIndex] = clientParams.data[dupIndex];
validatedParams.data[vIndex + 1u] = clientParams.data[dupIndex + 1u];
let dupIndex = inIndex + numInputParams - 2u;
outputParams.data[outIndex] = inputParams.data[dupIndex];
outputParams.data[outIndex + 1u] = inputParams.data[dupIndex + 1u];
vIndex = vIndex + 2u;
outIndex = outIndex + 2u;
}
for(var i = 0u; i < numClientParams; i = i + 1u) {
validatedParams.data[vIndex + i] = clientParams.data[cIndex + i];
for(var i = 0u; i < numInputParams; i = i + 1u) {
outputParams.data[outIndex + i] = inputParams.data[inIndex + i];
}
}
@ -139,9 +139,9 @@ namespace dawn::native {
return;
}
let clientIndex = batch.indirectOffsets[id.x];
let inputIndex = batch.indirectOffsets[id.x];
// firstInstance is always the last parameter
let firstInstance = clientParams.data[clientIndex + numIndirectParamsPerDrawCallClient() - 1u];
let firstInstance = inputParams.data[inputIndex + numIndirectParamsPerDrawCallInput() - 1u];
if (firstInstance != 0u) {
fail(id.x);
return;
@ -159,7 +159,7 @@ namespace dawn::native {
return;
}
let firstIndex = clientParams.data[clientIndex + kFirstIndexEntry];
let firstIndex = inputParams.data[inputIndex + kFirstIndexEntry];
if (batch.numIndexBufferElementsHigh == 0u &&
batch.numIndexBufferElementsLow < firstIndex) {
fail(id.x);
@ -169,7 +169,7 @@ namespace dawn::native {
// Note that this subtraction may underflow, but only when
// numIndexBufferElementsHigh is 1u. The result is still correct in that case.
let maxIndexCount = batch.numIndexBufferElementsLow - firstIndex;
let indexCount = clientParams.data[clientIndex + kIndexCountEntry];
let indexCount = inputParams.data[inputIndex + kIndexCountEntry];
if (indexCount > maxIndexCount) {
fail(id.x);
return;
@ -244,17 +244,17 @@ namespace dawn::native {
uint64_t numIndexBufferElements;
uint64_t dataBufferOffset;
uint64_t dataSize;
uint64_t clientIndirectOffset;
uint64_t clientIndirectSize;
uint64_t validatedParamsOffset;
uint64_t validatedParamsSize;
uint64_t inputIndirectOffset;
uint64_t inputIndirectSize;
uint64_t outputParamsOffset;
uint64_t outputParamsSize;
BatchInfo* batchInfo;
};
struct Pass {
uint32_t flags;
BufferBase* clientIndirectBuffer;
uint64_t validatedParamsSize = 0;
BufferBase* inputIndirectBuffer;
uint64_t outputParamsSize = 0;
uint64_t batchDataSize = 0;
std::unique_ptr<void, void (*)(void*)> batchData{nullptr, std::free};
std::vector<Batch> batches;
@ -264,7 +264,7 @@ namespace dawn::native {
// single pass as possible. Batches can be grouped together as long as they're validating
// data from the same indirect buffer, but they may still be split into multiple passes if
// the number of draw calls in a pass would exceed some (very high) upper bound.
uint64_t validatedParamsSize = 0;
uint64_t outputParamsSize = 0;
std::vector<Pass> passes;
IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap& bufferInfoMap =
*indirectDrawMetadata->GetIndexedIndirectBufferValidationInfo();
@ -283,9 +283,9 @@ namespace dawn::native {
? kDrawIndexedIndirectSize
: kDrawIndirectSize;
uint64_t validatedIndirectSize = indirectDrawCommandSize;
uint64_t outputIndirectSize = indirectDrawCommandSize;
if (config.duplicateBaseVertexInstance) {
validatedIndirectSize += 2 * sizeof(uint32_t);
outputIndirectSize += 2 * sizeof(uint32_t);
}
for (const IndirectDrawMetadata::IndirectValidationBatch& batch :
@ -299,21 +299,20 @@ namespace dawn::native {
newBatch.metadata = &batch;
newBatch.numIndexBufferElements = config.numIndexBufferElements;
newBatch.dataSize = GetBatchDataSize(batch.draws.size());
newBatch.clientIndirectOffset = minOffsetAlignedDown;
newBatch.clientIndirectSize =
newBatch.inputIndirectOffset = minOffsetAlignedDown;
newBatch.inputIndirectSize =
batch.maxOffset + indirectDrawCommandSize - minOffsetAlignedDown;
newBatch.validatedParamsSize = batch.draws.size() * validatedIndirectSize;
newBatch.validatedParamsOffset =
Align(validatedParamsSize, minStorageBufferOffsetAlignment);
validatedParamsSize = newBatch.validatedParamsOffset + newBatch.validatedParamsSize;
if (validatedParamsSize > maxStorageBufferBindingSize) {
newBatch.outputParamsSize = batch.draws.size() * outputIndirectSize;
newBatch.outputParamsOffset =
Align(outputParamsSize, minStorageBufferOffsetAlignment);
outputParamsSize = newBatch.outputParamsOffset + newBatch.outputParamsSize;
if (outputParamsSize > maxStorageBufferBindingSize) {
return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate");
}
Pass* currentPass = passes.empty() ? nullptr : &passes.back();
if (currentPass &&
currentPass->clientIndirectBuffer == config.clientIndirectBuffer) {
if (currentPass && currentPass->inputIndirectBuffer == config.inputIndirectBuffer) {
uint64_t nextBatchDataOffset =
Align(currentPass->batchDataSize, minStorageBufferOffsetAlignment);
uint64_t newPassBatchDataSize = nextBatchDataOffset + newBatch.dataSize;
@ -330,7 +329,7 @@ namespace dawn::native {
newBatch.dataBufferOffset = 0;
Pass newPass{};
newPass.clientIndirectBuffer = config.clientIndirectBuffer;
newPass.inputIndirectBuffer = config.inputIndirectBuffer;
newPass.batchDataSize = newBatch.dataSize;
newPass.batches.push_back(newBatch);
newPass.flags = 0;
@ -348,7 +347,7 @@ namespace dawn::native {
}
auto* const store = device->GetInternalPipelineStore();
ScratchBuffer& validatedParamsBuffer = store->scratchIndirectStorage;
ScratchBuffer& outputParamsBuffer = store->scratchIndirectStorage;
ScratchBuffer& batchDataBuffer = store->scratchStorage;
uint64_t requiredBatchDataBufferSize = 0;
@ -358,8 +357,8 @@ namespace dawn::native {
DAWN_TRY(batchDataBuffer.EnsureCapacity(requiredBatchDataBufferSize));
usageTracker->BufferUsedAs(batchDataBuffer.GetBuffer(), wgpu::BufferUsage::Storage);
DAWN_TRY(validatedParamsBuffer.EnsureCapacity(validatedParamsSize));
usageTracker->BufferUsedAs(validatedParamsBuffer.GetBuffer(), wgpu::BufferUsage::Indirect);
DAWN_TRY(outputParamsBuffer.EnsureCapacity(outputParamsSize));
usageTracker->BufferUsedAs(outputParamsBuffer.GetBuffer(), wgpu::BufferUsage::Indirect);
// Now we allocate and populate host-side batch data to be copied to the GPU.
for (Pass& pass : passes) {
@ -374,18 +373,18 @@ namespace dawn::native {
batch.batchInfo->flags = pass.flags;
uint32_t* indirectOffsets = reinterpret_cast<uint32_t*>(batch.batchInfo + 1);
uint64_t validatedParamsOffset = batch.validatedParamsOffset;
uint64_t outputParamsOffset = batch.outputParamsOffset;
for (auto& draw : batch.metadata->draws) {
// The shader uses this to index an array of u32, hence the division by 4 bytes.
*indirectOffsets++ = static_cast<uint32_t>(
(draw.clientBufferOffset - batch.clientIndirectOffset) / 4);
(draw.inputBufferOffset - batch.inputIndirectOffset) / 4);
draw.cmd->indirectBuffer = validatedParamsBuffer.GetBuffer();
draw.cmd->indirectOffset = validatedParamsOffset;
draw.cmd->indirectBuffer = outputParamsBuffer.GetBuffer();
draw.cmd->indirectOffset = outputParamsOffset;
if (pass.flags & kIndexedDraw) {
validatedParamsOffset += kDrawIndexedIndirectSize;
outputParamsOffset += kDrawIndexedIndirectSize;
} else {
validatedParamsOffset += kDrawIndirectSize;
outputParamsOffset += kDrawIndirectSize;
}
}
}
@ -402,12 +401,12 @@ namespace dawn::native {
bufferDataBinding.binding = 0;
bufferDataBinding.buffer = batchDataBuffer.GetBuffer();
BindGroupEntry& clientIndirectBinding = bindings[1];
clientIndirectBinding.binding = 1;
BindGroupEntry& inputIndirectBinding = bindings[1];
inputIndirectBinding.binding = 1;
BindGroupEntry& validatedParamsBinding = bindings[2];
validatedParamsBinding.binding = 2;
validatedParamsBinding.buffer = validatedParamsBuffer.GetBuffer();
BindGroupEntry& outputParamsBinding = bindings[2];
outputParamsBinding.binding = 2;
outputParamsBinding.buffer = outputParamsBuffer.GetBuffer();
BindGroupDescriptor bindGroupDescriptor = {};
bindGroupDescriptor.layout = layout.Get();
@ -426,15 +425,15 @@ namespace dawn::native {
Ref<ComputePassEncoder> passEncoder = commandEncoder->BeginComputePass();
passEncoder->APISetPipeline(pipeline);
clientIndirectBinding.buffer = pass.clientIndirectBuffer;
inputIndirectBinding.buffer = pass.inputIndirectBuffer;
for (const Batch& batch : pass.batches) {
bufferDataBinding.offset = batch.dataBufferOffset;
bufferDataBinding.size = batch.dataSize;
clientIndirectBinding.offset = batch.clientIndirectOffset;
clientIndirectBinding.size = batch.clientIndirectSize;
validatedParamsBinding.offset = batch.validatedParamsOffset;
validatedParamsBinding.size = batch.validatedParamsSize;
inputIndirectBinding.offset = batch.inputIndirectOffset;
inputIndirectBinding.size = batch.inputIndirectSize;
outputParamsBinding.offset = batch.outputParamsOffset;
outputParamsBinding.size = batch.outputParamsSize;
Ref<BindGroupBase> bindGroup;
DAWN_TRY_ASSIGN(bindGroup, device->CreateBindGroup(&bindGroupDescriptor));

View File

@ -145,8 +145,7 @@ namespace dawn::native::d3d12 {
RenderPipeline* pipeline,
uint32_t firstVertex,
uint32_t firstInstance) {
const FirstOffsetInfo& firstOffsetInfo = pipeline->GetFirstOffsetInfo();
if (!firstOffsetInfo.usesVertexIndex && !firstOffsetInfo.usesInstanceIndex) {
if (!pipeline->UsesVertexOrInstanceIndex()) {
return;
}
std::array<uint32_t, 2> offsets{firstVertex, firstInstance};

View File

@ -766,8 +766,7 @@ namespace dawn::native::d3d12 {
bool Device::ShouldDuplicateParametersForDrawIndirect(
const RenderPipelineBase* renderPipelineBase) const {
return ToBackend(renderPipelineBase)->GetFirstOffsetInfo().usesVertexIndex ||
ToBackend(renderPipelineBase)->GetFirstOffsetInfo().usesInstanceIndex;
return ToBackend(renderPipelineBase)->UsesVertexOrInstanceIndex();
}
} // namespace dawn::native::d3d12

View File

@ -367,7 +367,8 @@ namespace dawn::native::d3d12 {
*shaders[stage] = compiledShader[stage].GetD3D12ShaderBytecode();
}
mFirstOffsetInfo = compiledShader[SingleShaderStage::Vertex].firstOffsetInfo;
mUsesVertexOrInstanceIndex =
compiledShader[SingleShaderStage::Vertex].usesVertexOrInstanceIndex;
PipelineLayout* layout = ToBackend(GetLayout());
@ -455,8 +456,8 @@ namespace dawn::native::d3d12 {
return mPipelineState.Get();
}
const FirstOffsetInfo& RenderPipeline::GetFirstOffsetInfo() const {
return mFirstOffsetInfo;
bool RenderPipeline::UsesVertexOrInstanceIndex() const {
return mUsesVertexOrInstanceIndex;
}
void RenderPipeline::SetLabelImpl() {
@ -464,7 +465,7 @@ namespace dawn::native::d3d12 {
}
ComPtr<ID3D12CommandSignature> RenderPipeline::GetDrawIndirectCommandSignature() {
if (mFirstOffsetInfo.usesVertexIndex || mFirstOffsetInfo.usesInstanceIndex) {
if (mUsesVertexOrInstanceIndex) {
return ToBackend(GetLayout())
->GetDrawIndirectCommandSignatureWithInstanceVertexOffsets();
}
@ -473,7 +474,7 @@ namespace dawn::native::d3d12 {
}
ComPtr<ID3D12CommandSignature> RenderPipeline::GetDrawIndexedIndirectCommandSignature() {
if (mFirstOffsetInfo.usesVertexIndex || mFirstOffsetInfo.usesInstanceIndex) {
if (mUsesVertexOrInstanceIndex) {
return ToBackend(GetLayout())
->GetDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets();
}

View File

@ -38,7 +38,7 @@ namespace dawn::native::d3d12 {
D3D12_PRIMITIVE_TOPOLOGY GetD3D12PrimitiveTopology() const;
ID3D12PipelineState* GetPipelineState() const;
const FirstOffsetInfo& GetFirstOffsetInfo() const;
bool UsesVertexOrInstanceIndex() const;
// Dawn API
void SetLabelImpl() override;
@ -58,7 +58,7 @@ namespace dawn::native::d3d12 {
D3D12_PRIMITIVE_TOPOLOGY mD3d12PrimitiveTopology;
ComPtr<ID3D12PipelineState> mPipelineState;
FirstOffsetInfo mFirstOffsetInfo;
bool mUsesVertexOrInstanceIndex;
};
} // namespace dawn::native::d3d12

View File

@ -793,15 +793,7 @@ namespace dawn::native::d3d12 {
if (auto* data = transformOutputs.Get<tint::transform::FirstIndexOffset::Data>()) {
// TODO(dawn:549): Consider adding this information to the pipeline cache once we
// can store more than the shader blob in it.
compiledShader.firstOffsetInfo.usesVertexIndex = data->has_vertex_index;
if (compiledShader.firstOffsetInfo.usesVertexIndex) {
compiledShader.firstOffsetInfo.vertexIndexOffset = data->first_vertex_offset;
}
compiledShader.firstOffsetInfo.usesInstanceIndex = data->has_instance_index;
if (compiledShader.firstOffsetInfo.usesInstanceIndex) {
compiledShader.firstOffsetInfo.instanceIndexOffset =
data->first_instance_offset;
}
compiledShader.usesVertexOrInstanceIndex = data->has_vertex_or_instance_index;
}
}

View File

@ -29,13 +29,6 @@ namespace dawn::native::d3d12 {
class Device;
class PipelineLayout;
struct FirstOffsetInfo {
bool usesVertexIndex;
uint32_t vertexIndexOffset;
bool usesInstanceIndex;
uint32_t instanceIndexOffset;
};
// Manages a ref to one of the various representations of shader blobs and information used to
// emulate vertex/instance index starts
struct CompiledShader {
@ -44,7 +37,7 @@ namespace dawn::native::d3d12 {
ComPtr<IDxcBlob> compiledDXCShader;
D3D12_SHADER_BYTECODE GetD3D12ShaderBytecode() const;
FirstOffsetInfo firstOffsetInfo;
bool usesVertexOrInstanceIndex;
};
class ShaderModule final : public ShaderModuleBase {

View File

@ -42,14 +42,8 @@ FirstIndexOffset::BindingPoint::BindingPoint(uint32_t b, uint32_t g)
: binding(b), group(g) {}
FirstIndexOffset::BindingPoint::~BindingPoint() = default;
FirstIndexOffset::Data::Data(bool has_vtx_index,
bool has_inst_index,
uint32_t first_vtx_offset,
uint32_t first_inst_offset)
: has_vertex_index(has_vtx_index),
has_instance_index(has_inst_index),
first_vertex_offset(first_vtx_offset),
first_instance_offset(first_inst_offset) {}
FirstIndexOffset::Data::Data(bool has_vtx_or_inst_index)
: has_vertex_or_instance_index(has_vtx_or_inst_index) {}
FirstIndexOffset::Data::Data(const Data&) = default;
FirstIndexOffset::Data::~Data() = default;
@ -80,8 +74,7 @@ void FirstIndexOffset::Run(CloneContext& ctx,
std::unordered_map<const sem::Variable*, const char*> builtin_vars;
std::unordered_map<const sem::StructMember*, const char*> builtin_members;
bool has_vertex_index = false;
bool has_instance_index = false;
bool has_vertex_or_instance_index = false;
// Traverse the AST scanning for builtin accesses via variables (includes
// parameters) or structure member accesses.
@ -93,12 +86,12 @@ void FirstIndexOffset::Run(CloneContext& ctx,
if (builtin == ast::Builtin::kVertexIndex) {
auto* sem_var = ctx.src->Sem().Get(var);
builtin_vars.emplace(sem_var, kFirstVertexName);
has_vertex_index = true;
has_vertex_or_instance_index = true;
}
if (builtin == ast::Builtin::kInstanceIndex) {
auto* sem_var = ctx.src->Sem().Get(var);
builtin_vars.emplace(sem_var, kFirstInstanceName);
has_instance_index = true;
has_vertex_or_instance_index = true;
}
}
}
@ -110,29 +103,23 @@ void FirstIndexOffset::Run(CloneContext& ctx,
if (builtin == ast::Builtin::kVertexIndex) {
auto* sem_mem = ctx.src->Sem().Get(member);
builtin_members.emplace(sem_mem, kFirstVertexName);
has_vertex_index = true;
has_vertex_or_instance_index = true;
}
if (builtin == ast::Builtin::kInstanceIndex) {
auto* sem_mem = ctx.src->Sem().Get(member);
builtin_members.emplace(sem_mem, kFirstInstanceName);
has_instance_index = true;
has_vertex_or_instance_index = true;
}
}
}
}
}
// Byte offsets on the uniform buffer
uint32_t vertex_index_offset = 0;
uint32_t instance_index_offset = 0;
if (has_vertex_index || has_instance_index) {
if (has_vertex_or_instance_index) {
// Add uniform buffer members and calculate byte offsets
ast::StructMemberList members;
members.push_back(ctx.dst->Member(kFirstVertexName, ctx.dst->ty.u32()));
vertex_index_offset = 0;
members.push_back(ctx.dst->Member(kFirstInstanceName, ctx.dst->ty.u32()));
instance_index_offset = 4;
auto* struct_ = ctx.dst->Structure(ctx.dst->Sym(), std::move(members));
// Create a global to hold the uniform buffer
@ -172,8 +159,7 @@ void FirstIndexOffset::Run(CloneContext& ctx,
ctx.Clone();
outputs.Add<Data>(has_vertex_index, has_instance_index, vertex_index_offset,
instance_index_offset);
outputs.Add<Data>(has_vertex_or_instance_index);
}
} // namespace tint::transform

View File

@ -84,14 +84,9 @@ class FirstIndexOffset final : public Castable<FirstIndexOffset, Transform> {
/// Data holds information about shader usage and constant buffer offsets.
struct Data final : public Castable<Data, transform::Data> {
/// Constructor
/// @param has_vtx_index True if the shader uses vertex_index
/// @param has_inst_index True if the shader uses instance_index
/// @param first_vtx_offset Offset of first vertex into constant buffer
/// @param first_inst_offset Offset of first instance into constant buffer
Data(bool has_vtx_index,
bool has_inst_index,
uint32_t first_vtx_offset,
uint32_t first_inst_offset);
/// @param has_vtx_or_inst_index True if the shader uses vertex_index or
/// instance_index
explicit Data(bool has_vtx_or_inst_index);
/// Copy constructor
Data(const Data&);
@ -100,13 +95,7 @@ class FirstIndexOffset final : public Castable<FirstIndexOffset, Transform> {
~Data() override;
/// True if the shader uses vertex_index
const bool has_vertex_index;
/// True if the shader uses instance_index
const bool has_instance_index;
/// Offset of first vertex into constant buffer
const uint32_t first_vertex_offset;
/// Offset of first instance into constant buffer
const uint32_t first_instance_offset;
const bool has_vertex_or_instance_index;
};
/// Constructor

View File

@ -86,10 +86,7 @@ fn entry() -> @builtin(position) vec4<f32> {
auto* data = got.data.Get<FirstIndexOffset::Data>();
ASSERT_NE(data, nullptr);
EXPECT_EQ(data->has_vertex_index, false);
EXPECT_EQ(data->has_instance_index, false);
EXPECT_EQ(data->first_vertex_offset, 0u);
EXPECT_EQ(data->first_instance_offset, 0u);
EXPECT_EQ(data->has_vertex_or_instance_index, false);
}
TEST_F(FirstIndexOffsetTest, BasicModuleVertexIndex) {
@ -133,10 +130,7 @@ fn entry(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32>
auto* data = got.data.Get<FirstIndexOffset::Data>();
ASSERT_NE(data, nullptr);
EXPECT_EQ(data->has_vertex_index, true);
EXPECT_EQ(data->has_instance_index, false);
EXPECT_EQ(data->first_vertex_offset, 0u);
EXPECT_EQ(data->first_instance_offset, 4u);
EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, BasicModuleVertexIndex_OutOfOrder) {
@ -180,10 +174,7 @@ fn test(vert_idx : u32) -> u32 {
auto* data = got.data.Get<FirstIndexOffset::Data>();
ASSERT_NE(data, nullptr);
EXPECT_EQ(data->has_vertex_index, true);
EXPECT_EQ(data->has_instance_index, false);
EXPECT_EQ(data->first_vertex_offset, 0u);
EXPECT_EQ(data->first_instance_offset, 4u);
EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, BasicModuleInstanceIndex) {
@ -227,10 +218,7 @@ fn entry(@builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f32
auto* data = got.data.Get<FirstIndexOffset::Data>();
ASSERT_NE(data, nullptr);
EXPECT_EQ(data->has_vertex_index, false);
EXPECT_EQ(data->has_instance_index, true);
EXPECT_EQ(data->first_vertex_offset, 0u);
EXPECT_EQ(data->first_instance_offset, 4u);
EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, BasicModuleInstanceIndex_OutOfOrder) {
@ -274,10 +262,7 @@ fn test(inst_idx : u32) -> u32 {
auto* data = got.data.Get<FirstIndexOffset::Data>();
ASSERT_NE(data, nullptr);
EXPECT_EQ(data->has_vertex_index, false);
EXPECT_EQ(data->has_instance_index, true);
EXPECT_EQ(data->first_vertex_offset, 0u);
EXPECT_EQ(data->first_instance_offset, 4u);
EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, BasicModuleBothIndex) {
@ -333,10 +318,7 @@ fn entry(inputs : Inputs) -> @builtin(position) vec4<f32> {
auto* data = got.data.Get<FirstIndexOffset::Data>();
ASSERT_NE(data, nullptr);
EXPECT_EQ(data->has_vertex_index, true);
EXPECT_EQ(data->has_instance_index, true);
EXPECT_EQ(data->first_vertex_offset, 0u);
EXPECT_EQ(data->first_instance_offset, 4u);
EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, BasicModuleBothIndex_OutOfOrder) {
@ -392,10 +374,7 @@ fn test(instance_idx : u32, vert_idx : u32) -> u32 {
auto* data = got.data.Get<FirstIndexOffset::Data>();
ASSERT_NE(data, nullptr);
EXPECT_EQ(data->has_vertex_index, true);
EXPECT_EQ(data->has_instance_index, true);
EXPECT_EQ(data->first_vertex_offset, 0u);
EXPECT_EQ(data->first_instance_offset, 4u);
EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, NestedCalls) {
@ -447,10 +426,7 @@ fn entry(@builtin(vertex_index) vert_idx : u32) -> @builtin(position) vec4<f32>
auto* data = got.data.Get<FirstIndexOffset::Data>();
ASSERT_NE(data, nullptr);
EXPECT_EQ(data->has_vertex_index, true);
EXPECT_EQ(data->has_instance_index, false);
EXPECT_EQ(data->first_vertex_offset, 0u);
EXPECT_EQ(data->first_instance_offset, 4u);
EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, NestedCalls_OutOfOrder) {
@ -502,10 +478,7 @@ fn func1(vert_idx : u32) -> u32 {
auto* data = got.data.Get<FirstIndexOffset::Data>();
ASSERT_NE(data, nullptr);
EXPECT_EQ(data->has_vertex_index, true);
EXPECT_EQ(data->has_instance_index, false);
EXPECT_EQ(data->first_vertex_offset, 0u);
EXPECT_EQ(data->first_instance_offset, 4u);
EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, MultipleEntryPoints) {
@ -573,10 +546,7 @@ fn entry_c(@builtin(instance_index) inst_idx : u32) -> @builtin(position) vec4<f
auto* data = got.data.Get<FirstIndexOffset::Data>();
ASSERT_NE(data, nullptr);
EXPECT_EQ(data->has_vertex_index, true);
EXPECT_EQ(data->has_instance_index, true);
EXPECT_EQ(data->first_vertex_offset, 0u);
EXPECT_EQ(data->first_instance_offset, 4u);
EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
TEST_F(FirstIndexOffsetTest, MultipleEntryPoints_OutOfOrder) {
@ -644,10 +614,7 @@ fn func(i : u32) -> u32 {
auto* data = got.data.Get<FirstIndexOffset::Data>();
ASSERT_NE(data, nullptr);
EXPECT_EQ(data->has_vertex_index, true);
EXPECT_EQ(data->has_instance_index, true);
EXPECT_EQ(data->first_vertex_offset, 0u);
EXPECT_EQ(data->first_instance_offset, 4u);
EXPECT_EQ(data->has_vertex_or_instance_index, true);
}
} // namespace