Vulkan: Combine all the barriers before dispatch() in one call

This patch combines all the resource barriers added before each
dispatch() into one call to reduce the number of
vkCmdPipelineBarrier() in the Vulkan command buffer.

BUG=dawn:522

Change-Id: I1b5943e62eb0a09db96de12196fcabb3448e9e4d
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/28283
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Reviewed-by: Austin Eng <enga@chromium.org>
Commit-Queue: Jiawei Shao <jiawei.shao@intel.com>
This commit is contained in:
Jiawei Shao 2020-09-10 00:26:07 +00:00 committed by Commit Bot service account
parent 39d1cc9e9c
commit 87f25134a8
5 changed files with 109 additions and 61 deletions

View File

@ -186,23 +186,20 @@ namespace dawn_native { namespace vulkan {
void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext, void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext,
wgpu::BufferUsage usage) { wgpu::BufferUsage usage) {
std::vector<VkBufferMemoryBarrier> barriers; VkBufferMemoryBarrier barrier;
VkPipelineStageFlags srcStages = 0; VkPipelineStageFlags srcStages = 0;
VkPipelineStageFlags dstStages = 0; VkPipelineStageFlags dstStages = 0;
TransitionUsageNow(recordingContext, usage, &barriers, &srcStages, &dstStages); if (TransitionUsageAndGetResourceBarrier(usage, &barrier, &srcStages, &dstStages)) {
ASSERT(srcStages != 0 && dstStages != 0);
if (barriers.size() > 0) {
ASSERT(barriers.size() == 1);
ToBackend(GetDevice()) ToBackend(GetDevice())
->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0, ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
nullptr, barriers.size(), barriers.data(), 0, nullptr); nullptr, 1u, &barrier, 0, nullptr);
} }
} }
void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext, bool Buffer::TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
wgpu::BufferUsage usage, VkBufferMemoryBarrier* barrier,
std::vector<VkBufferMemoryBarrier>* bufferBarriers,
VkPipelineStageFlags* srcStages, VkPipelineStageFlags* srcStages,
VkPipelineStageFlags* dstStages) { VkPipelineStageFlags* dstStages) {
bool lastIncludesTarget = (mLastUsage & usage) == usage; bool lastIncludesTarget = (mLastUsage & usage) == usage;
@ -210,32 +207,31 @@ namespace dawn_native { namespace vulkan {
// We can skip transitions to already current read-only usages. // We can skip transitions to already current read-only usages.
if (lastIncludesTarget && lastReadOnly) { if (lastIncludesTarget && lastReadOnly) {
return; return false;
} }
// Special-case for the initial transition: Vulkan doesn't allow access flags to be 0. // Special-case for the initial transition: Vulkan doesn't allow access flags to be 0.
if (mLastUsage == wgpu::BufferUsage::None) { if (mLastUsage == wgpu::BufferUsage::None) {
mLastUsage = usage; mLastUsage = usage;
return; return false;
} }
*srcStages |= VulkanPipelineStage(mLastUsage); *srcStages |= VulkanPipelineStage(mLastUsage);
*dstStages |= VulkanPipelineStage(usage); *dstStages |= VulkanPipelineStage(usage);
VkBufferMemoryBarrier barrier; barrier->sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; barrier->pNext = nullptr;
barrier.pNext = nullptr; barrier->srcAccessMask = VulkanAccessFlags(mLastUsage);
barrier.srcAccessMask = VulkanAccessFlags(mLastUsage); barrier->dstAccessMask = VulkanAccessFlags(usage);
barrier.dstAccessMask = VulkanAccessFlags(usage); barrier->srcQueueFamilyIndex = 0;
barrier.srcQueueFamilyIndex = 0; barrier->dstQueueFamilyIndex = 0;
barrier.dstQueueFamilyIndex = 0; barrier->buffer = mHandle;
barrier.buffer = mHandle; barrier->offset = 0;
barrier.offset = 0; barrier->size = GetSize();
barrier.size = GetSize();
bufferBarriers->push_back(barrier);
mLastUsage = usage; mLastUsage = usage;
return true;
} }
bool Buffer::IsCPUWritableAtCreation() const { bool Buffer::IsCPUWritableAtCreation() const {

View File

@ -37,9 +37,8 @@ namespace dawn_native { namespace vulkan {
// `commands`. // `commands`.
// TODO(cwallez@chromium.org): coalesce barriers and do them early when possible. // TODO(cwallez@chromium.org): coalesce barriers and do them early when possible.
void TransitionUsageNow(CommandRecordingContext* recordingContext, wgpu::BufferUsage usage); void TransitionUsageNow(CommandRecordingContext* recordingContext, wgpu::BufferUsage usage);
void TransitionUsageNow(CommandRecordingContext* recordingContext, bool TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
wgpu::BufferUsage usage, VkBufferMemoryBarrier* barrier,
std::vector<VkBufferMemoryBarrier>* bufferBarriers,
VkPipelineStageFlags* srcStages, VkPipelineStageFlags* srcStages,
VkPipelineStageFlags* dstStages); VkPipelineStageFlags* dstStages);

View File

@ -146,43 +146,59 @@ namespace dawn_native { namespace vulkan {
mDirtyBindGroupsObjectChangedOrIsDynamic, mBindGroups, mDirtyBindGroupsObjectChangedOrIsDynamic, mBindGroups,
mDynamicOffsetCounts, mDynamicOffsets); mDynamicOffsetCounts, mDynamicOffsets);
// TODO(jiawei.shao@intel.com): combine the following barriers in one std::vector<VkBufferMemoryBarrier> bufferBarriers;
// vkCmdPipelineBarrier() call. std::vector<VkImageMemoryBarrier> imageBarriers;
VkPipelineStageFlags srcStages = 0;
VkPipelineStageFlags dstStages = 0;
for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) { for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
BindGroupLayoutBase* layout = mBindGroups[index]->GetLayout(); BindGroupLayoutBase* layout = mBindGroups[index]->GetLayout();
for (BindingIndex binding{0}; binding < layout->GetBindingCount(); ++binding) { for (BindingIndex binding{0}; binding < layout->GetBindingCount(); ++binding) {
switch (layout->GetBindingInfo(binding).type) { switch (layout->GetBindingInfo(binding).type) {
case wgpu::BindingType::StorageBuffer: case wgpu::BindingType::StorageBuffer:
case wgpu::BindingType::ReadonlyStorageBuffer: case wgpu::BindingType::ReadonlyStorageBuffer: {
ToBackend( VkBufferMemoryBarrier bufferBarrier;
mBindGroups[index]->GetBindingAsBufferBinding(binding).buffer) if (ToBackend(mBindGroups[index]
->TransitionUsageNow(recordingContext, ->GetBindingAsBufferBinding(binding)
wgpu::BufferUsage::Storage); .buffer)
->TransitionUsageAndGetResourceBarrier(
wgpu::BufferUsage::Storage, &bufferBarrier, &srcStages,
&dstStages)) {
bufferBarriers.push_back(bufferBarrier);
}
break; break;
}
case wgpu::BindingType::ReadonlyStorageTexture: case wgpu::BindingType::ReadonlyStorageTexture:
case wgpu::BindingType::WriteonlyStorageTexture: { case wgpu::BindingType::WriteonlyStorageTexture: {
TextureViewBase* view = TextureViewBase* view =
mBindGroups[index]->GetBindingAsTextureView(binding); mBindGroups[index]->GetBindingAsTextureView(binding);
ToBackend(view->GetTexture()) ToBackend(view->GetTexture())
->TransitionUsageNow(recordingContext, ->TransitionUsageAndGetResourceBarrier(
wgpu::TextureUsage::Storage, wgpu::TextureUsage::Storage, view->GetSubresourceRange(),
view->GetSubresourceRange()); &imageBarriers, &srcStages, &dstStages);
break; break;
} }
case wgpu::BindingType::UniformBuffer: case wgpu::BindingType::UniformBuffer: {
ToBackend( VkBufferMemoryBarrier bufferBarrier;
mBindGroups[index]->GetBindingAsBufferBinding(binding).buffer) if (ToBackend(mBindGroups[index]
->TransitionUsageNow(recordingContext, ->GetBindingAsBufferBinding(binding)
wgpu::BufferUsage::Uniform); .buffer)
->TransitionUsageAndGetResourceBarrier(
wgpu::BufferUsage::Uniform, &bufferBarrier, &srcStages,
&dstStages)) {
bufferBarriers.push_back(bufferBarrier);
}
break; break;
}
case wgpu::BindingType::SampledTexture: { case wgpu::BindingType::SampledTexture: {
TextureViewBase* view = TextureViewBase* view =
mBindGroups[index]->GetBindingAsTextureView(binding); mBindGroups[index]->GetBindingAsTextureView(binding);
ToBackend(view->GetTexture()) ToBackend(view->GetTexture())
->TransitionUsageNow(recordingContext, ->TransitionUsageAndGetResourceBarrier(
wgpu::TextureUsage::Sampled, wgpu::TextureUsage::Sampled, view->GetSubresourceRange(),
view->GetSubresourceRange()); &imageBarriers, &srcStages, &dstStages);
break; break;
} }
@ -200,6 +216,15 @@ namespace dawn_native { namespace vulkan {
} }
} }
} }
if (!bufferBarriers.empty() || !imageBarriers.empty()) {
ASSERT(srcStages != 0 && dstStages != 0);
device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages,
dstStages, 0, 0, nullptr, bufferBarriers.size(),
bufferBarriers.data(), imageBarriers.size(),
imageBarriers.data());
}
DidApply(); DidApply();
} }
}; };
@ -459,8 +484,12 @@ namespace dawn_native { namespace vulkan {
for (size_t i = 0; i < usages.buffers.size(); ++i) { for (size_t i = 0; i < usages.buffers.size(); ++i) {
Buffer* buffer = ToBackend(usages.buffers[i]); Buffer* buffer = ToBackend(usages.buffers[i]);
buffer->EnsureDataInitialized(recordingContext); buffer->EnsureDataInitialized(recordingContext);
buffer->TransitionUsageNow(recordingContext, usages.bufferUsages[i],
&bufferBarriers, &srcStages, &dstStages); VkBufferMemoryBarrier bufferBarrier;
if (buffer->TransitionUsageAndGetResourceBarrier(
usages.bufferUsages[i], &bufferBarrier, &srcStages, &dstStages)) {
bufferBarriers.push_back(bufferBarrier);
}
} }
for (size_t i = 0; i < usages.textures.size(); ++i) { for (size_t i = 0; i < usages.textures.size(); ++i) {

View File

@ -818,6 +818,32 @@ namespace dawn_native { namespace vulkan {
wgpu::TextureUsage usage, wgpu::TextureUsage usage,
const SubresourceRange& range) { const SubresourceRange& range) {
std::vector<VkImageMemoryBarrier> barriers; std::vector<VkImageMemoryBarrier> barriers;
VkPipelineStageFlags srcStages = 0;
VkPipelineStageFlags dstStages = 0;
TransitionUsageAndGetResourceBarrier(usage, range, &barriers, &srcStages, &dstStages);
if (mExternalState != ExternalState::InternalOnly) {
TweakTransitionForExternalUsage(recordingContext, &barriers, 0);
}
if (!barriers.empty()) {
ASSERT(srcStages != 0 && dstStages != 0);
ToBackend(GetDevice())
->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
nullptr, 0, nullptr, barriers.size(), barriers.data());
}
}
void Texture::TransitionUsageAndGetResourceBarrier(
wgpu::TextureUsage usage,
const SubresourceRange& range,
std::vector<VkImageMemoryBarrier>* imageBarriers,
VkPipelineStageFlags* srcStages,
VkPipelineStageFlags* dstStages) {
ASSERT(imageBarriers != nullptr);
const Format& format = GetFormat(); const Format& format = GetFormat();
wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None; wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
@ -837,7 +863,7 @@ namespace dawn_native { namespace vulkan {
if (CanReuseWithoutBarrier(mSubresourceLastUsages[0], usage)) { if (CanReuseWithoutBarrier(mSubresourceLastUsages[0], usage)) {
return; return;
} }
barriers.push_back( imageBarriers->push_back(
BuildMemoryBarrier(format, mHandle, mSubresourceLastUsages[0], usage, range)); BuildMemoryBarrier(format, mHandle, mSubresourceLastUsages[0], usage, range));
allLastUsages = mSubresourceLastUsages[0]; allLastUsages = mSubresourceLastUsages[0];
for (uint32_t i = 0; i < GetSubresourceCount(); ++i) { for (uint32_t i = 0; i < GetSubresourceCount(); ++i) {
@ -868,22 +894,15 @@ namespace dawn_native { namespace vulkan {
mSubresourceLastUsages[index] = usage; mSubresourceLastUsages[index] = usage;
} }
barriers.push_back(BuildMemoryBarrier( imageBarriers->push_back(BuildMemoryBarrier(
format, mHandle, lastUsage, usage, format, mHandle, lastUsage, usage,
SubresourceRange::SingleMipAndLayer(level, layer, format.aspects))); SubresourceRange::SingleMipAndLayer(level, layer, format.aspects)));
} }
} }
} }
if (mExternalState != ExternalState::InternalOnly) { *srcStages |= VulkanPipelineStage(allLastUsages, format);
TweakTransitionForExternalUsage(recordingContext, &barriers, 0); *dstStages |= VulkanPipelineStage(usage, format);
}
VkPipelineStageFlags srcStages = VulkanPipelineStage(allLastUsages, format);
VkPipelineStageFlags dstStages = VulkanPipelineStage(usage, format);
ToBackend(GetDevice())
->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
nullptr, 0, nullptr, barriers.size(), barriers.data());
mSameLastUsagesAcrossSubresources = areAllSubresourcesCovered; mSameLastUsagesAcrossSubresources = areAllSubresourcesCovered;
} }

View File

@ -70,6 +70,11 @@ namespace dawn_native { namespace vulkan {
void TransitionUsageNow(CommandRecordingContext* recordingContext, void TransitionUsageNow(CommandRecordingContext* recordingContext,
wgpu::TextureUsage usage, wgpu::TextureUsage usage,
const SubresourceRange& range); const SubresourceRange& range);
void TransitionUsageAndGetResourceBarrier(wgpu::TextureUsage usage,
const SubresourceRange& range,
std::vector<VkImageMemoryBarrier>* imageBarriers,
VkPipelineStageFlags* srcStages,
VkPipelineStageFlags* dstStages);
void TransitionUsageForPass(CommandRecordingContext* recordingContext, void TransitionUsageForPass(CommandRecordingContext* recordingContext,
const PassTextureUsage& textureUsages, const PassTextureUsage& textureUsages,
std::vector<VkImageMemoryBarrier>* imageBarriers, std::vector<VkImageMemoryBarrier>* imageBarriers,