Make the Vulkan backend support implicit barriers.

With this commit the Vulkan backend completely ignores the explicit
barrier commands passed from the frontend, and generates its own
pipeline barriers.

Right now it encodes each barrier just before the resources are used,
which is quite bad but will be optimized later.

This commit also makes the frontend command buffer validation perform
the checks necessary for implicit barriers (although they are redundant
with checks for explicit barriers) because the tracking can pre-compute
pass usage information that's useful for the Vulkan backend.

Tests for usage validation inside passes will be added once the concept
of transition is removed from the API.
This commit is contained in:
Corentin Wallez 2018-07-10 18:03:22 +02:00 committed by Corentin Wallez
parent 117f2f0ad6
commit aa13be96e8
29 changed files with 409 additions and 77 deletions

View File

@ -364,6 +364,7 @@ list(APPEND BACKEND_SOURCES
${BACKEND_DIR}/InputState.h
${BACKEND_DIR}/RenderPipeline.cpp
${BACKEND_DIR}/RenderPipeline.h
${BACKEND_DIR}/PassResourceUsage.h
${BACKEND_DIR}/PerStage.cpp
${BACKEND_DIR}/PerStage.h
${BACKEND_DIR}/Pipeline.cpp

View File

@ -122,6 +122,142 @@ namespace backend {
return true;
}
enum class PassType {
Render,
Compute,
};
// Helper class to encapsulate the logic of tracking per-resource usage during the
// validation of command buffer passes. It is used both to know if there are validation
// errors, and to get a list of resources used per pass for backends that need the
// information.
class PassResourceUsageTracker {
public:
void BufferUsedAs(BufferBase* buffer, nxt::BufferUsageBit usage) {
// std::map's operator[] will create the key and return 0 if the key didn't exist
// before.
nxt::BufferUsageBit& storedUsage = mBufferUsages[buffer];
if (usage == nxt::BufferUsageBit::Storage &&
storedUsage & nxt::BufferUsageBit::Storage) {
mStorageUsedMultipleTimes = true;
}
storedUsage |= usage;
}
void TextureUsedAs(TextureBase* texture, nxt::TextureUsageBit usage) {
// std::map's operator[] will create the key and return 0 if the key didn't exist
// before.
nxt::TextureUsageBit& storedUsage = mTextureUsages[texture];
if (usage == nxt::TextureUsageBit::Storage &&
storedUsage & nxt::TextureUsageBit::Storage) {
mStorageUsedMultipleTimes = true;
}
storedUsage |= usage;
}
// Performs the per-pass usage validation checks
bool AreUsagesValid(PassType pass) const {
// Storage resources cannot be used twice in the same compute pass
if (pass == PassType::Compute && mStorageUsedMultipleTimes) {
return false;
}
// Buffers can only be used as single-write or multiple read.
for (auto& it : mBufferUsages) {
BufferBase* buffer = it.first;
nxt::BufferUsageBit usage = it.second;
if (usage & ~buffer->GetAllowedUsage()) {
return false;
}
bool readOnly = (usage & kReadOnlyBufferUsages) == usage;
bool singleUse = nxt::HasZeroOrOneBits(usage);
if (!readOnly && !singleUse) {
return false;
}
}
// Textures can only be used as single-write or multiple read.
// TODO(cwallez@chromium.org): implement per-subresource tracking
for (auto& it : mTextureUsages) {
TextureBase* texture = it.first;
nxt::TextureUsageBit usage = it.second;
if (usage & ~texture->GetAllowedUsage()) {
return false;
}
// For textures the only read-only usage in a pass is Sampled, so checking the
// usage constraint simplifies to checking a single usage bit is set.
if (!nxt::HasZeroOrOneBits(it.second)) {
return false;
}
}
return true;
}
// Returns the per-pass usage for use by backends for APIs with explicit barriers.
PassResourceUsage AcquireResourceUsage() {
PassResourceUsage result;
result.buffers.reserve(mBufferUsages.size());
result.bufferUsages.reserve(mBufferUsages.size());
result.textures.reserve(mTextureUsages.size());
result.textureUsages.reserve(mTextureUsages.size());
for (auto& it : mBufferUsages) {
result.buffers.push_back(it.first);
result.bufferUsages.push_back(it.second);
}
for (auto& it : mTextureUsages) {
result.textures.push_back(it.first);
result.textureUsages.push_back(it.second);
}
return result;
}
private:
std::map<BufferBase*, nxt::BufferUsageBit> mBufferUsages;
std::map<TextureBase*, nxt::TextureUsageBit> mTextureUsages;
bool mStorageUsedMultipleTimes = false;
};
void TrackBindGroupResourceUsage(BindGroupBase* group, PassResourceUsageTracker* tracker) {
const auto& layoutInfo = group->GetLayout()->GetBindingInfo();
for (uint32_t i : IterateBitSet(layoutInfo.mask)) {
nxt::BindingType type = layoutInfo.types[i];
switch (type) {
case nxt::BindingType::UniformBuffer: {
BufferBase* buffer = group->GetBindingAsBufferView(i)->GetBuffer();
tracker->BufferUsedAs(buffer, nxt::BufferUsageBit::Uniform);
} break;
case nxt::BindingType::StorageBuffer: {
BufferBase* buffer = group->GetBindingAsBufferView(i)->GetBuffer();
tracker->BufferUsedAs(buffer, nxt::BufferUsageBit::Storage);
} break;
case nxt::BindingType::SampledTexture: {
TextureBase* texture = group->GetBindingAsTextureView(i)->GetTexture();
tracker->TextureUsedAs(texture, nxt::TextureUsageBit::Sampled);
} break;
case nxt::BindingType::Sampler:
break;
}
}
}
} // namespace
// CommandBuffer
@ -171,6 +307,12 @@ namespace backend {
return std::move(mIterator);
}
std::vector<PassResourceUsage> CommandBufferBuilder::AcquirePassResourceUsage() {
ASSERT(!mWerePassUsagesAcquired);
mWerePassUsagesAcquired = true;
return std::move(mPassResourceUsages);
}
CommandBufferBase* CommandBufferBuilder::GetResultImpl() {
MoveToIterator();
return mDevice->CreateCommandBuffer(this);
@ -283,11 +425,19 @@ namespace backend {
}
bool CommandBufferBuilder::ValidateComputePass() {
PassResourceUsageTracker usageTracker;
Command type;
while (mIterator.NextCommandId(&type)) {
switch (type) {
case Command::EndComputePass: {
mIterator.NextCommand<EndComputePassCmd>();
if (!usageTracker.AreUsagesValid(PassType::Compute)) {
return false;
}
mPassResourceUsages.push_back(usageTracker.AcquireResourceUsage());
mState->EndPass();
return true;
} break;
@ -322,6 +472,8 @@ namespace backend {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = mIterator.NextCommand<SetBindGroupCmd>();
TrackBindGroupResourceUsage(cmd->group.Get(), &usageTracker);
if (!mState->SetBindGroup(cmd->index, cmd->group.Get())) {
return false;
}
@ -342,11 +494,30 @@ namespace backend {
return false;
}
PassResourceUsageTracker usageTracker;
// Track usage of the render pass attachments
for (uint32_t i : IterateBitSet(renderPass->GetColorAttachmentMask())) {
TextureBase* texture = renderPass->GetColorAttachment(i).view->GetTexture();
usageTracker.TextureUsedAs(texture, nxt::TextureUsageBit::OutputAttachment);
}
if (renderPass->HasDepthStencilAttachment()) {
TextureBase* texture = renderPass->GetDepthStencilAttachment().view->GetTexture();
usageTracker.TextureUsedAs(texture, nxt::TextureUsageBit::OutputAttachment);
}
Command type;
while (mIterator.NextCommandId(&type)) {
switch (type) {
case Command::EndRenderPass: {
mIterator.NextCommand<EndRenderPassCmd>();
if (!usageTracker.AreUsagesValid(PassType::Render)) {
return false;
}
mPassResourceUsages.push_back(usageTracker.AcquireResourceUsage());
mState->EndPass();
return true;
} break;
@ -408,6 +579,8 @@ namespace backend {
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = mIterator.NextCommand<SetBindGroupCmd>();
TrackBindGroupResourceUsage(cmd->group.Get(), &usageTracker);
if (!mState->SetBindGroup(cmd->index, cmd->group.Get())) {
return false;
}
@ -415,6 +588,8 @@ namespace backend {
case Command::SetIndexBuffer: {
SetIndexBufferCmd* cmd = mIterator.NextCommand<SetIndexBufferCmd>();
usageTracker.BufferUsedAs(cmd->buffer.Get(), nxt::BufferUsageBit::Index);
if (!mState->SetIndexBuffer(cmd->buffer.Get())) {
return false;
}
@ -426,6 +601,7 @@ namespace backend {
mIterator.NextData<uint32_t>(cmd->count);
for (uint32_t i = 0; i < cmd->count; ++i) {
usageTracker.BufferUsedAs(buffers[i].Get(), nxt::BufferUsageBit::Vertex);
mState->SetVertexBuffer(cmd->startSlot + i, buffers[i].Get());
}
} break;

View File

@ -19,6 +19,7 @@
#include "backend/Builder.h"
#include "backend/CommandAllocator.h"
#include "backend/PassResourceUsage.h"
#include "backend/RefCounted.h"
#include <memory>
@ -59,6 +60,7 @@ namespace backend {
bool ValidateGetResult();
CommandIterator AcquireCommands();
std::vector<PassResourceUsage> AcquirePassResourceUsage();
// NXT API
void BeginComputePass();
@ -144,6 +146,9 @@ namespace backend {
CommandIterator mIterator;
bool mWasMovedToIterator = false;
bool mWereCommandsAcquired = false;
bool mWerePassUsagesAcquired = false;
std::vector<PassResourceUsage> mPassResourceUsages;
};
} // namespace backend

View File

@ -28,6 +28,7 @@
#include "common/BitSetIterator.h"
namespace backend {
CommandBufferStateTracker::CommandBufferStateTracker(CommandBufferBuilder* mBuilder)
: mBuilder(mBuilder) {
}

View File

@ -24,6 +24,7 @@
#include <set>
namespace backend {
class CommandBufferStateTracker {
public:
explicit CommandBufferStateTracker(CommandBufferBuilder* builder);
@ -97,6 +98,7 @@ namespace backend {
std::map<TextureBase*, nxt::TextureUsageBit> mMostRecentTextureUsages;
std::set<TextureBase*> mTexturesAttached;
};
} // namespace backend
#endif // BACKEND_COMMANDBUFFERSTATETRACKER_H

View File

@ -0,0 +1,40 @@
// Copyright 2018 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_PASSRESOURCEUSAGE_H
#define BACKEND_PASSRESOURCEUSAGE_H
#include "nxt/nxtcpp.h"
#include <vector>
namespace backend {
class BufferBase;
class TextureBase;
// Which resources are used by pass and how they are used. The command buffer validation
// pre-computes this information so that backends with explicit barriers don't have to
// re-compute it.
struct PassResourceUsage {
std::vector<BufferBase*> buffers;
std::vector<nxt::BufferUsageBit> bufferUsages;
std::vector<TextureBase*> textures;
std::vector<nxt::TextureUsageBit> textureUsages;
};
} // namespace backend
#endif // BACKEND_PASSRESOURCEUSAGE_H

View File

@ -82,6 +82,8 @@ namespace backend {
return;
}
OnBeforePresent(texture);
mImplementation.Present(mImplementation.userData);
}

View File

@ -42,6 +42,7 @@ namespace backend {
protected:
const nxtSwapChainImplementation& GetImplementation();
virtual TextureBase* GetNextTextureImpl(TextureBuilder* builder) = 0;
virtual void OnBeforePresent(TextureBase* texture) = 0;
private:
DeviceBase* mDevice = nullptr;

View File

@ -28,6 +28,14 @@ namespace backend {
bool TextureFormatHasStencil(nxt::TextureFormat format);
bool TextureFormatHasDepthOrStencil(nxt::TextureFormat format);
static constexpr nxt::TextureUsageBit kReadOnlyTextureUsages =
nxt::TextureUsageBit::TransferSrc | nxt::TextureUsageBit::Sampled |
nxt::TextureUsageBit::Present;
static constexpr nxt::TextureUsageBit kWritableTextureUsages =
nxt::TextureUsageBit::TransferDst | nxt::TextureUsageBit::Storage |
nxt::TextureUsageBit::OutputAttachment;
class TextureBase : public RefCounted {
public:
TextureBase(TextureBuilder* builder);

View File

@ -51,7 +51,12 @@ namespace backend { namespace d3d12 {
nxtSwapChainImplementation CreateNativeSwapChainImpl(nxtDevice device, HWND window) {
Device* backendDevice = reinterpret_cast<Device*>(device);
return CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
nxtSwapChainImplementation impl;
impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
impl.textureUsage = NXT_TEXTURE_USAGE_BIT_PRESENT;
return impl;
}
nxtTextureFormat GetNativeSwapChainPreferredFormat(

View File

@ -44,4 +44,7 @@ namespace backend { namespace d3d12 {
return new Texture(builder, nativeTexture);
}
void SwapChain::OnBeforePresent(TextureBase*) {
}
}} // namespace backend::d3d12

View File

@ -26,6 +26,7 @@ namespace backend { namespace d3d12 {
protected:
TextureBase* GetNextTextureImpl(TextureBuilder* builder) override;
void OnBeforePresent(TextureBase* texture) override;
};
}} // namespace backend::d3d12

View File

@ -28,6 +28,7 @@ namespace backend { namespace metal {
protected:
TextureBase* GetNextTextureImpl(TextureBuilder* builder) override;
void OnBeforePresent(TextureBase* texture) override;
};
}} // namespace backend::metal

View File

@ -44,4 +44,7 @@ namespace backend { namespace metal {
return new Texture(builder, nativeTexture);
}
void SwapChain::OnBeforePresent(TextureBase*) {
}
}} // namespace backend::metal

View File

@ -251,4 +251,8 @@ namespace backend { namespace null {
TextureBase* SwapChain::GetNextTextureImpl(TextureBuilder* builder) {
return GetDevice()->CreateTexture(builder);
}
void SwapChain::OnBeforePresent(TextureBase*) {
}
}} // namespace backend::null

View File

@ -184,6 +184,7 @@ namespace backend { namespace null {
protected:
TextureBase* GetNextTextureImpl(TextureBuilder* builder) override;
void OnBeforePresent(TextureBase*) override;
};
}} // namespace backend::null

View File

@ -41,4 +41,7 @@ namespace backend { namespace opengl {
return new Texture(builder, nativeTexture);
}
void SwapChain::OnBeforePresent(TextureBase*) {
}
}} // namespace backend::opengl

View File

@ -30,6 +30,7 @@ namespace backend { namespace opengl {
protected:
TextureBase* GetNextTextureImpl(TextureBuilder* builder) override;
void OnBeforePresent(TextureBase* texture) override;
};
}} // namespace backend::opengl

View File

@ -162,17 +162,29 @@ namespace backend { namespace vulkan {
return mHandle;
}
void Buffer::RecordBarrier(VkCommandBuffer commands,
nxt::BufferUsageBit currentUsage,
nxt::BufferUsageBit targetUsage) const {
VkPipelineStageFlags srcStages = VulkanPipelineStage(currentUsage);
VkPipelineStageFlags dstStages = VulkanPipelineStage(targetUsage);
void Buffer::TransitionUsageNow(VkCommandBuffer commands, nxt::BufferUsageBit usage) {
bool lastIncludesTarget = (mLastUsage & usage) == usage;
bool lastReadOnly = (mLastUsage & kReadOnlyBufferUsages) == mLastUsage;
// We can skip transitions to already current read-only usages.
if (lastIncludesTarget && lastReadOnly) {
return;
}
// Special-case for the initial transition: Vulkan doesn't allow access flags to be 0.
if (mLastUsage == nxt::BufferUsageBit::None) {
mLastUsage = usage;
return;
}
VkPipelineStageFlags srcStages = VulkanPipelineStage(mLastUsage);
VkPipelineStageFlags dstStages = VulkanPipelineStage(usage);
VkBufferMemoryBarrier barrier;
barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
barrier.pNext = nullptr;
barrier.srcAccessMask = VulkanAccessFlags(currentUsage);
barrier.dstAccessMask = VulkanAccessFlags(targetUsage);
barrier.srcAccessMask = VulkanAccessFlags(mLastUsage);
barrier.dstAccessMask = VulkanAccessFlags(usage);
barrier.srcQueueFamilyIndex = 0;
barrier.dstQueueFamilyIndex = 0;
barrier.buffer = mHandle;
@ -182,26 +194,43 @@ namespace backend { namespace vulkan {
ToBackend(GetDevice())
->fn.CmdPipelineBarrier(commands, srcStages, dstStages, 0, 0, nullptr, 1, &barrier, 0,
nullptr);
mLastUsage = usage;
}
void Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const uint8_t* data) {
BufferUploader* uploader = ToBackend(GetDevice())->GetBufferUploader();
Device* device = ToBackend(GetDevice());
VkCommandBuffer commands = device->GetPendingCommandBuffer();
TransitionUsageNow(commands, nxt::BufferUsageBit::TransferDst);
BufferUploader* uploader = device->GetBufferUploader();
uploader->BufferSubData(mHandle, start, count, data);
}
void Buffer::MapReadAsyncImpl(uint32_t serial, uint32_t start, uint32_t /*count*/) {
Device* device = ToBackend(GetDevice());
VkCommandBuffer commands = device->GetPendingCommandBuffer();
TransitionUsageNow(commands, nxt::BufferUsageBit::MapRead);
uint8_t* memory = mMemoryAllocation.GetMappedPointer();
ASSERT(memory != nullptr);
MapRequestTracker* tracker = ToBackend(GetDevice())->GetMapRequestTracker();
MapRequestTracker* tracker = device->GetMapRequestTracker();
tracker->Track(this, serial, memory + start, false);
}
void Buffer::MapWriteAsyncImpl(uint32_t serial, uint32_t start, uint32_t /*count*/) {
Device* device = ToBackend(GetDevice());
VkCommandBuffer commands = device->GetPendingCommandBuffer();
TransitionUsageNow(commands, nxt::BufferUsageBit::MapWrite);
uint8_t* memory = mMemoryAllocation.GetMappedPointer();
ASSERT(memory != nullptr);
MapRequestTracker* tracker = ToBackend(GetDevice())->GetMapRequestTracker();
MapRequestTracker* tracker = device->GetMapRequestTracker();
tracker->Track(this, serial, memory + start, true);
}
@ -209,12 +238,11 @@ namespace backend { namespace vulkan {
// No need to do anything, we keep CPU-visible memory mapped at all time.
}
void Buffer::TransitionUsageImpl(nxt::BufferUsageBit currentUsage,
nxt::BufferUsageBit targetUsage) {
VkCommandBuffer commands = ToBackend(GetDevice())->GetPendingCommandBuffer();
RecordBarrier(commands, currentUsage, targetUsage);
void Buffer::TransitionUsageImpl(nxt::BufferUsageBit, nxt::BufferUsageBit) {
}
// MapRequestTracker
MapRequestTracker::MapRequestTracker(Device* device) : mDevice(device) {
}

View File

@ -35,9 +35,10 @@ namespace backend { namespace vulkan {
VkBuffer GetHandle() const;
void RecordBarrier(VkCommandBuffer commands,
nxt::BufferUsageBit currentUsage,
nxt::BufferUsageBit targetUsage) const;
// Transitions the buffer to be used as `usage`, recording any necessary barrier in
// `commands`.
// TODO(cwallez@chromium.org): coalesce barriers and do them early when possible.
void TransitionUsageNow(VkCommandBuffer commands, nxt::BufferUsageBit usage);
private:
void SetSubDataImpl(uint32_t start, uint32_t count, const uint8_t* data) override;
@ -49,6 +50,8 @@ namespace backend { namespace vulkan {
VkBuffer mHandle = VK_NULL_HANDLE;
DeviceMemoryAllocation mMemoryAllocation;
nxt::BufferUsageBit mLastUsage = nxt::BufferUsageBit::None;
};
using BufferView = BufferViewBase;

View File

@ -111,7 +111,9 @@ namespace backend { namespace vulkan {
} // anonymous namespace
CommandBuffer::CommandBuffer(CommandBufferBuilder* builder)
: CommandBufferBase(builder), mCommands(builder->AcquireCommands()) {
: CommandBufferBase(builder),
mCommands(builder->AcquireCommands()),
mPassResourceUsages(builder->AcquirePassResourceUsage()) {
}
CommandBuffer::~CommandBuffer() {
@ -121,6 +123,20 @@ namespace backend { namespace vulkan {
void CommandBuffer::RecordCommands(VkCommandBuffer commands) {
Device* device = ToBackend(GetDevice());
// Records the necessary barriers for the resource usage pre-computed by the frontend
auto TransitionForPass = [](VkCommandBuffer commands, const PassResourceUsage& usages) {
for (size_t i = 0; i < usages.buffers.size(); ++i) {
Buffer* buffer = ToBackend(usages.buffers[i]);
buffer->TransitionUsageNow(commands, usages.bufferUsages[i]);
}
for (size_t i = 0; i < usages.textures.size(); ++i) {
Texture* texture = ToBackend(usages.textures[i]);
texture->TransitionUsageNow(commands, usages.textureUsages[i]);
}
};
size_t nextPassNumber = 0;
Command type;
while (mCommands.NextCommandId(&type)) {
switch (type) {
@ -129,6 +145,11 @@ namespace backend { namespace vulkan {
auto& src = copy->source;
auto& dst = copy->destination;
ToBackend(src.buffer)
->TransitionUsageNow(commands, nxt::BufferUsageBit::TransferSrc);
ToBackend(dst.buffer)
->TransitionUsageNow(commands, nxt::BufferUsageBit::TransferDst);
VkBufferCopy region;
region.srcOffset = src.offset;
region.dstOffset = dst.offset;
@ -144,8 +165,14 @@ namespace backend { namespace vulkan {
auto& src = copy->source;
auto& dst = copy->destination;
ToBackend(src.buffer)
->TransitionUsageNow(commands, nxt::BufferUsageBit::TransferSrc);
ToBackend(dst.texture)
->TransitionUsageNow(commands, nxt::TextureUsageBit::TransferDst);
VkBuffer srcBuffer = ToBackend(src.buffer)->GetHandle();
VkImage dstImage = ToBackend(dst.texture)->GetHandle();
VkBufferImageCopy region =
ComputeBufferImageCopyRegion(copy->rowPitch, src, dst);
@ -161,8 +188,14 @@ namespace backend { namespace vulkan {
auto& src = copy->source;
auto& dst = copy->destination;
ToBackend(src.texture)
->TransitionUsageNow(commands, nxt::TextureUsageBit::TransferSrc);
ToBackend(dst.buffer)
->TransitionUsageNow(commands, nxt::BufferUsageBit::TransferDst);
VkImage srcImage = ToBackend(src.texture)->GetHandle();
VkBuffer dstBuffer = ToBackend(dst.buffer)->GetHandle();
VkBufferImageCopy region =
ComputeBufferImageCopyRegion(copy->rowPitch, dst, src);
@ -173,12 +206,20 @@ namespace backend { namespace vulkan {
case Command::BeginRenderPass: {
BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
TransitionForPass(commands, mPassResourceUsages[nextPassNumber]);
RecordRenderPass(commands, ToBackend(cmd->info.Get()));
nextPassNumber++;
} break;
case Command::BeginComputePass: {
mCommands.NextCommand<BeginComputePassCmd>();
TransitionForPass(commands, mPassResourceUsages[nextPassNumber]);
RecordComputePass(commands);
nextPassNumber++;
} break;
case Command::TransitionBufferUsage: {
@ -186,7 +227,6 @@ namespace backend { namespace vulkan {
mCommands.NextCommand<TransitionBufferUsageCmd>();
Buffer* buffer = ToBackend(cmd->buffer.Get());
buffer->RecordBarrier(commands, buffer->GetUsage(), cmd->usage);
buffer->UpdateUsageInternal(cmd->usage);
} break;
@ -195,7 +235,6 @@ namespace backend { namespace vulkan {
mCommands.NextCommand<TransitionTextureUsageCmd>();
Texture* texture = ToBackend(cmd->texture.Get());
texture->RecordBarrier(commands, texture->GetUsage(), cmd->usage);
texture->UpdateUsageInternal(cmd->usage);
} break;
@ -250,31 +289,6 @@ namespace backend { namespace vulkan {
RenderPassDescriptor* renderPass) {
Device* device = ToBackend(GetDevice());
// NXT has an implicit transition to color attachment on render passes.
// Transition the attachments now before we start the render pass.
{
for (uint32_t i : IterateBitSet(renderPass->GetColorAttachmentMask())) {
Texture* attachment =
ToBackend(renderPass->GetColorAttachment(i).view->GetTexture());
if (!(attachment->GetUsage() & nxt::TextureUsageBit::OutputAttachment)) {
attachment->RecordBarrier(commands, attachment->GetUsage(),
nxt::TextureUsageBit::OutputAttachment);
attachment->UpdateUsageInternal(nxt::TextureUsageBit::OutputAttachment);
}
}
if (renderPass->HasDepthStencilAttachment()) {
Texture* attachment =
ToBackend(renderPass->GetDepthStencilAttachment().view->GetTexture());
if (!(attachment->GetUsage() & nxt::TextureUsageBit::OutputAttachment)) {
attachment->RecordBarrier(commands, attachment->GetUsage(),
nxt::TextureUsageBit::OutputAttachment);
attachment->UpdateUsageInternal(nxt::TextureUsageBit::OutputAttachment);
}
}
}
renderPass->RecordBeginRenderPass(commands);
// Set the default value for the dynamic state

View File

@ -35,6 +35,7 @@ namespace backend { namespace vulkan {
void RecordRenderPass(VkCommandBuffer commands, RenderPassDescriptor* renderPass);
CommandIterator mCommands;
std::vector<PassResourceUsage> mPassResourceUsages;
};
}} // namespace backend::vulkan

View File

@ -69,8 +69,14 @@ namespace backend { namespace vulkan {
nxtSwapChainImplementation CreateNativeSwapChainImpl(nxtDevice device, VkSurfaceKHR surface) {
Device* backendDevice = reinterpret_cast<Device*>(device);
return CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
nxtSwapChainImplementation impl;
impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
impl.textureUsage = NXT_TEXTURE_USAGE_BIT_PRESENT;
return impl;
}
nxtTextureFormat GetNativeSwapChainPreferredFormat(
const nxtSwapChainImplementation* swapChain) {
NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);

View File

@ -178,13 +178,12 @@ namespace backend { namespace vulkan {
}
nxtSwapChainError NativeSwapChainImpl::Present() {
// Since we're going to do a queue operations we need to flush pending commands such as
// layout transitions of the swapchain images to the PRESENT layout.
mDevice->SubmitPendingCommands();
// This assumes that the image has already been transitioned to the PRESENT layout and
// writes were made available to the stage.
// Assuming that the present queue is the same as the graphics queue, the proper
// synchronization has already been done by the usage transition to present so we don't
// need to wait on any semaphores.
// synchronization has already been done on the queue so we don't need to wait on any
// semaphores.
VkPresentInfoKHR presentInfo;
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
presentInfo.pNext = nullptr;

View File

@ -23,6 +23,9 @@ namespace backend { namespace vulkan {
const auto& im = GetImplementation();
nxtWSIContextVulkan wsiContext = {};
im.Init(im.userData, &wsiContext);
ASSERT(im.textureUsage != NXT_TEXTURE_USAGE_BIT_NONE);
mTextureUsage = static_cast<nxt::TextureUsageBit>(im.textureUsage);
}
SwapChain::~SwapChain() {
@ -32,6 +35,7 @@ namespace backend { namespace vulkan {
const auto& im = GetImplementation();
nxtSwapChainNextTexture next = {};
nxtSwapChainError error = im.GetNextTexture(im.userData, &next);
if (error) {
GetDevice()->HandleError(error);
return nullptr;
@ -41,4 +45,15 @@ namespace backend { namespace vulkan {
return new Texture(builder, nativeTexture);
}
void SwapChain::OnBeforePresent(TextureBase* texture) {
Device* device = ToBackend(GetDevice());
// Perform the necessary pipeline barriers for the texture to be used with the usage
// requested by the implementation.
VkCommandBuffer commands = device->GetPendingCommandBuffer();
ToBackend(texture)->TransitionUsageNow(commands, mTextureUsage);
device->SubmitPendingCommands();
}
}} // namespace backend::vulkan

View File

@ -28,6 +28,10 @@ namespace backend { namespace vulkan {
protected:
TextureBase* GetNextTextureImpl(TextureBuilder* builder) override;
void OnBeforePresent(TextureBase* texture) override;
private:
nxt::TextureUsageBit mTextureUsage;
};
}} // namespace backend::vulkan

View File

@ -276,12 +276,6 @@ namespace backend { namespace vulkan {
mMemoryAllocation.GetMemoryOffset()) != VK_SUCCESS) {
ASSERT(false);
}
// Vulkan requires images to be transitioned to their first usage. Do the transition if the
// texture has an initial usage.
if (GetUsage() != nxt::TextureUsageBit::None) {
TransitionUsageImpl(nxt::TextureUsageBit::None, GetUsage());
}
}
Texture::Texture(TextureBuilder* builder, VkImage nativeImage)
@ -312,22 +306,25 @@ namespace backend { namespace vulkan {
return VulkanAspectMask(GetFormat());
}
// Helper function to add a texture barrier to a command buffer. This is inefficient because we
// should be coalescing barriers as much as possible.
void Texture::RecordBarrier(VkCommandBuffer commands,
nxt::TextureUsageBit currentUsage,
nxt::TextureUsageBit targetUsage) const {
void Texture::TransitionUsageNow(VkCommandBuffer commands, nxt::TextureUsageBit usage) {
// Avoid encoding barriers when it isn't needed.
bool lastReadOnly = (mLastUsage & kReadOnlyTextureUsages) == mLastUsage;
if (lastReadOnly && mLastUsage == usage) {
return;
}
nxt::TextureFormat format = GetFormat();
VkPipelineStageFlags srcStages = VulkanPipelineStage(currentUsage, format);
VkPipelineStageFlags dstStages = VulkanPipelineStage(targetUsage, format);
VkPipelineStageFlags srcStages = VulkanPipelineStage(mLastUsage, format);
VkPipelineStageFlags dstStages = VulkanPipelineStage(usage, format);
VkImageMemoryBarrier barrier;
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
barrier.srcAccessMask = VulkanAccessFlags(currentUsage, format);
barrier.dstAccessMask = VulkanAccessFlags(targetUsage, format);
barrier.oldLayout = VulkanImageLayout(currentUsage, format);
barrier.newLayout = VulkanImageLayout(targetUsage, format);
barrier.srcAccessMask = VulkanAccessFlags(mLastUsage, format);
barrier.dstAccessMask = VulkanAccessFlags(usage, format);
barrier.oldLayout = VulkanImageLayout(mLastUsage, format);
barrier.newLayout = VulkanImageLayout(usage, format);
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = mHandle;
@ -342,12 +339,11 @@ namespace backend { namespace vulkan {
ToBackend(GetDevice())
->fn.CmdPipelineBarrier(commands, srcStages, dstStages, 0, 0, nullptr, 0, nullptr, 1,
&barrier);
mLastUsage = usage;
}
void Texture::TransitionUsageImpl(nxt::TextureUsageBit currentUsage,
nxt::TextureUsageBit targetUsage) {
VkCommandBuffer commands = ToBackend(GetDevice())->GetPendingCommandBuffer();
RecordBarrier(commands, currentUsage, targetUsage);
void Texture::TransitionUsageImpl(nxt::TextureUsageBit, nxt::TextureUsageBit) {
}
TextureView::TextureView(TextureViewBuilder* builder) : TextureViewBase(builder) {

View File

@ -34,9 +34,10 @@ namespace backend { namespace vulkan {
VkImage GetHandle() const;
VkImageAspectFlags GetVkAspectMask() const;
void RecordBarrier(VkCommandBuffer commands,
nxt::TextureUsageBit currentUsage,
nxt::TextureUsageBit targetUsage) const;
// Transitions the texture to be used as `usage`, recording any necessary barrier in
// `commands`.
// TODO(cwallez@chromium.org): coalesce barriers and do them early when possible.
void TransitionUsageNow(VkCommandBuffer commands, nxt::TextureUsageBit usage);
private:
void TransitionUsageImpl(nxt::TextureUsageBit currentUsage,
@ -44,6 +45,10 @@ namespace backend { namespace vulkan {
VkImage mHandle = VK_NULL_HANDLE;
DeviceMemoryAllocation mMemoryAllocation;
// A usage of none will make sure the texture is transitioned before its first use as
// required by the spec.
nxt::TextureUsageBit mLastUsage = nxt::TextureUsageBit::None;
};
class TextureView : public TextureViewBase {

View File

@ -48,7 +48,10 @@ typedef struct {
nxtSwapChainError (*Present)(void* userData);
/// Each function is called with userData as its first argument.
void* userData = nullptr;
void* userData;
/// For use by the D3D12 and Vulkan backends: how the swapchain will use the texture.
nxtTextureUsageBit textureUsage;
} nxtSwapChainImplementation;
#if defined(NXT_ENABLE_BACKEND_D3D12) && defined(__cplusplus)