Remove explicit usage transition from the API and validation

This removes the following for both Buffer and Texture:
 - The builder's SetInitialUsage
 - The object's FreezeUsage and TransitionUsage methods
 - The CommandBuffer Transition<Object>Usage methods

All samples and tests are simplified as a result. This also obsoletes
the UsageValidationTest which is removed.

Some validation was dependent on "current usage" and hasn't been
reintroduced for implicit transitions yet:
 - Buffers can be used while mapped
 - Swapchain textures can be used after they have been presented.

Validation for these will involve collecting all the resources used by a
command buffer and will be done in a follow-up patch.
This commit is contained in:
Corentin Wallez
2018-07-09 15:15:07 +02:00
committed by Corentin Wallez
parent d2312e8138
commit d8c068fb4f
67 changed files with 143 additions and 1012 deletions

View File

@@ -125,7 +125,6 @@ namespace {
.SetAllowedUsage(nxt::BufferUsageBit::Vertex | nxt::BufferUsageBit::Index)
.SetSize(256)
.GetResult();
defaultBuffer.FreezeUsage(nxt::BufferUsageBit::Vertex | nxt::BufferUsageBit::Index);
for (const auto& bv : scene.bufferViews) {
const auto& iBufferViewID = bv.first;
@@ -151,7 +150,7 @@ namespace {
size_t iBufferViewSize =
iBufferView.byteLength ? iBufferView.byteLength :
(iBuffer.data.size() - iBufferView.byteOffset);
auto oBuffer = utils::CreateFrozenBufferFromData(device, &iBuffer.data.at(iBufferView.byteOffset), static_cast<uint32_t>(iBufferViewSize), usage);
auto oBuffer = utils::CreateBufferFromData(device, &iBuffer.data.at(iBufferView.byteOffset), static_cast<uint32_t>(iBufferViewSize), usage);
buffers[iBufferViewID] = std::move(oBuffer);
}
}
@@ -299,7 +298,6 @@ namespace {
auto uniformBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::TransferDst | nxt::BufferUsageBit::Uniform)
.SetInitialUsage(nxt::BufferUsageBit::TransferDst)
.SetSize(sizeof(u_transform_block))
.GetResult();
@@ -440,13 +438,11 @@ namespace {
fprintf(stderr, "unsupported image.component %d\n", iImage.component);
}
nxt::Buffer staging = utils::CreateFrozenBufferFromData(device, data, rowPitch * iImage.height, nxt::BufferUsageBit::TransferSrc);
nxt::Buffer staging = utils::CreateBufferFromData(device, data, rowPitch * iImage.height, nxt::BufferUsageBit::TransferSrc);
auto cmdbuf = device.CreateCommandBufferBuilder()
.TransitionTextureUsage(oTexture, nxt::TextureUsageBit::TransferDst)
.CopyBufferToTexture(staging, 0, rowPitch, oTexture, 0, 0, 0, iImage.width, iImage.height, 1, 0)
.GetResult();
queue.Submit(1, &cmdbuf);
oTexture.FreezeUsage(nxt::TextureUsageBit::Sampled);
textures[iTextureID] = oTexture.CreateTextureViewBuilder().GetResult();
}
@@ -494,7 +490,6 @@ namespace {
}
}
const MaterialInfo& material = getMaterial(iPrim.material, strides[0], strides[1], strides[2]);
material.uniformBuffer.TransitionUsage(nxt::BufferUsageBit::TransferDst);
// TODO(cwallez@google.com): This is updating the uniform buffer with a device-level command
// but the draw is queue level command that is pipelined. This causes bad rendering for models
// that use the same part multiple time.
@@ -502,7 +497,6 @@ namespace {
sizeof(u_transform_block),
reinterpret_cast<const uint8_t*>(&transforms));
cmd.SetRenderPipeline(material.pipeline);
cmd.TransitionBufferUsage(material.uniformBuffer, nxt::BufferUsageBit::Uniform);
cmd.SetBindGroup(0, material.bindGroup0);
uint32_t vertexCount = 0;
@@ -591,7 +585,6 @@ namespace {
.GetResult();
queue.Submit(1, &commands);
backbuffer.TransitionUsage(nxt::TextureUsageBit::Present);
swapchain.Present(backbuffer);
DoFlush();
}