Implementing Queue::WriteTexture in Vulkan
Added implementation of writeTexture in Vulkan. Bug: dawn:483 Change-Id: Id74b6518c46caf59e07a9d16dd51d8c28340fd50 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/24660 Commit-Queue: Tomek Ponitka <tommek@google.com> Reviewed-by: Austin Eng <enga@chromium.org>
This commit is contained in:
parent
1b9b53a395
commit
e28cc55cac
|
@ -16,7 +16,6 @@
|
|||
|
||||
#include "common/Platform.h"
|
||||
#include "dawn_native/BackendConnection.h"
|
||||
#include "dawn_native/Commands.h"
|
||||
#include "dawn_native/Error.h"
|
||||
#include "dawn_native/ErrorData.h"
|
||||
#include "dawn_native/VulkanBackend.h"
|
||||
|
@ -595,12 +594,9 @@ namespace dawn_native { namespace vulkan {
|
|||
ToBackend(destination)
|
||||
->EnsureDataInitializedAsDestination(recordingContext, destinationOffset, size);
|
||||
|
||||
// Insert memory barrier to ensure host write operations are made visible before
|
||||
// copying from the staging buffer. However, this barrier can be removed (see note below).
|
||||
//
|
||||
// Note: Depending on the spec understanding, an explicit barrier may not be required when
|
||||
// used with HOST_COHERENT as vkQueueSubmit does an implicit barrier between host and
|
||||
// device. See "Availability, Visibility, and Domain Operations" in Vulkan spec for details.
|
||||
// There is no need of a barrier to make host writes available and visible to the copy
|
||||
// operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
|
||||
// does an implicit availability, visibility and domain operation.
|
||||
|
||||
// Insert pipeline barrier to ensure correct ordering with previous memory operations on the
|
||||
// buffer.
|
||||
|
@ -618,6 +614,42 @@ namespace dawn_native { namespace vulkan {
|
|||
return {};
|
||||
}
|
||||
|
||||
MaybeError Device::CopyFromStagingToTexture(StagingBufferBase* source,
|
||||
const TextureDataLayout& src,
|
||||
TextureCopy* dst,
|
||||
const Extent3D copySize) {
|
||||
// There is no need of a barrier to make host writes available and visible to the copy
|
||||
// operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
|
||||
// does an implicit availability, visibility and domain operation.
|
||||
|
||||
CommandRecordingContext* recordingContext = GetPendingRecordingContext();
|
||||
|
||||
VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, *dst, copySize);
|
||||
VkImageSubresourceLayers subresource = region.imageSubresource;
|
||||
|
||||
ASSERT(dst->texture->GetDimension() == wgpu::TextureDimension::e2D);
|
||||
SubresourceRange range = {subresource.mipLevel, 1, subresource.baseArrayLayer,
|
||||
subresource.layerCount};
|
||||
if (IsCompleteSubresourceCopiedTo(dst->texture.Get(), copySize, subresource.mipLevel)) {
|
||||
// Since texture has been overwritten, it has been "initialized"
|
||||
dst->texture->SetIsSubresourceContentInitialized(true, range);
|
||||
} else {
|
||||
ToBackend(dst->texture)->EnsureSubresourceContentInitialized(recordingContext, range);
|
||||
}
|
||||
// Insert pipeline barrier to ensure correct ordering with previous memory operations on the
|
||||
// texture.
|
||||
ToBackend(dst->texture)
|
||||
->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
|
||||
VkImage dstImage = ToBackend(dst->texture)->GetHandle();
|
||||
|
||||
// Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
|
||||
// copy command.
|
||||
this->fn.CmdCopyBufferToImage(recordingContext->commandBuffer,
|
||||
ToBackend(source)->GetBufferHandle(), dstImage,
|
||||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
|
||||
return {};
|
||||
}
|
||||
|
||||
MaybeError Device::ImportExternalImage(const ExternalImageDescriptor* descriptor,
|
||||
ExternalMemoryHandle memoryHandle,
|
||||
VkImage image,
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include "common/Serial.h"
|
||||
#include "common/SerialQueue.h"
|
||||
#include "dawn_native/Commands.h"
|
||||
#include "dawn_native/Device.h"
|
||||
#include "dawn_native/vulkan/CommandRecordingContext.h"
|
||||
#include "dawn_native/vulkan/Forward.h"
|
||||
|
@ -87,6 +88,10 @@ namespace dawn_native { namespace vulkan {
|
|||
BufferBase* destination,
|
||||
uint64_t destinationOffset,
|
||||
uint64_t size) override;
|
||||
MaybeError CopyFromStagingToTexture(StagingBufferBase* source,
|
||||
const TextureDataLayout& src,
|
||||
TextureCopy* dst,
|
||||
const Extent3D copySize);
|
||||
|
||||
ResultOrError<ResourceMemoryAllocation> AllocateMemory(VkMemoryRequirements requirements,
|
||||
bool mappable);
|
||||
|
|
|
@ -14,6 +14,11 @@
|
|||
|
||||
#include "dawn_native/vulkan/QueueVk.h"
|
||||
|
||||
#include "common/Math.h"
|
||||
#include "dawn_native/Buffer.h"
|
||||
#include "dawn_native/CommandValidation.h"
|
||||
#include "dawn_native/Commands.h"
|
||||
#include "dawn_native/DynamicUploader.h"
|
||||
#include "dawn_native/vulkan/CommandBufferVk.h"
|
||||
#include "dawn_native/vulkan/CommandRecordingContext.h"
|
||||
#include "dawn_native/vulkan/DeviceVk.h"
|
||||
|
@ -22,6 +27,52 @@
|
|||
|
||||
namespace dawn_native { namespace vulkan {
|
||||
|
||||
namespace {
|
||||
ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRow(
|
||||
DeviceBase* device,
|
||||
const void* data,
|
||||
size_t dataSize,
|
||||
uint32_t alignedBytesPerRow,
|
||||
uint32_t alignedRowsPerImage,
|
||||
const TextureDataLayout* dataLayout,
|
||||
const Format& textureFormat,
|
||||
const Extent3D* writeSize) {
|
||||
uint32_t newDataSize = ComputeRequiredBytesInCopy(
|
||||
textureFormat, *writeSize, alignedBytesPerRow, alignedRowsPerImage);
|
||||
|
||||
UploadHandle uploadHandle;
|
||||
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
|
||||
newDataSize, device->GetPendingCommandSerial()));
|
||||
ASSERT(uploadHandle.mappedBuffer != nullptr);
|
||||
|
||||
// TODO(tommek@google.com): Add an optimization to do a single memcpy if the data
|
||||
// is already correctly packed.
|
||||
uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
|
||||
const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
|
||||
srcPointer += dataLayout->offset;
|
||||
|
||||
uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / textureFormat.blockHeight;
|
||||
uint32_t dataRowsPerImageInBlock = dataLayout->rowsPerImage / textureFormat.blockHeight;
|
||||
if (dataRowsPerImageInBlock == 0) {
|
||||
dataRowsPerImageInBlock = writeSize->height / textureFormat.blockHeight;
|
||||
}
|
||||
|
||||
ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
|
||||
uint64_t imageAdditionalStride =
|
||||
dataLayout->bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
|
||||
for (uint32_t d = 0; d < writeSize->depth; ++d) {
|
||||
for (uint32_t h = 0; h < alignedRowsPerImageInBlock; ++h) {
|
||||
memcpy(dstPointer, srcPointer, alignedBytesPerRow);
|
||||
dstPointer += alignedBytesPerRow;
|
||||
srcPointer += dataLayout->bytesPerRow;
|
||||
}
|
||||
srcPointer += imageAdditionalStride;
|
||||
}
|
||||
|
||||
return uploadHandle;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// static
|
||||
Queue* Queue::Create(Device* device) {
|
||||
return new Queue(device);
|
||||
|
@ -48,4 +99,39 @@ namespace dawn_native { namespace vulkan {
|
|||
return {};
|
||||
}
|
||||
|
||||
MaybeError Queue::WriteTextureImpl(const TextureCopyView* destination,
|
||||
const void* data,
|
||||
size_t dataSize,
|
||||
const TextureDataLayout* dataLayout,
|
||||
const Extent3D* writeSize) {
|
||||
uint32_t blockSize = destination->texture->GetFormat().blockByteSize;
|
||||
uint32_t blockWidth = destination->texture->GetFormat().blockWidth;
|
||||
// We are only copying the part of the data that will appear in the texture.
|
||||
// Note that validating texture copy range ensures that writeSize->width and
|
||||
// writeSize->height are multiples of blockWidth and blockHeight respectively.
|
||||
// TODO(tommek@google.com): Add an optimization to align bytesPerRow to
|
||||
// VkPhysicalDeviceLimits::optimalBufferCopyRowPitch.
|
||||
uint32_t alignedBytesPerRow = (writeSize->width) / blockWidth * blockSize;
|
||||
uint32_t alignedRowsPerImage = writeSize->height;
|
||||
|
||||
UploadHandle uploadHandle;
|
||||
DAWN_TRY_ASSIGN(uploadHandle,
|
||||
UploadTextureDataAligningBytesPerRow(
|
||||
GetDevice(), data, dataSize, alignedBytesPerRow, alignedRowsPerImage,
|
||||
dataLayout, destination->texture->GetFormat(), writeSize));
|
||||
|
||||
TextureDataLayout passDataLayout = *dataLayout;
|
||||
passDataLayout.offset = uploadHandle.startOffset;
|
||||
passDataLayout.bytesPerRow = alignedBytesPerRow;
|
||||
passDataLayout.rowsPerImage = alignedRowsPerImage;
|
||||
|
||||
TextureCopy textureCopy;
|
||||
textureCopy.texture = destination->texture;
|
||||
textureCopy.mipLevel = destination->mipLevel;
|
||||
textureCopy.origin = destination->origin;
|
||||
|
||||
return ToBackend(GetDevice())
|
||||
->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout, &textureCopy,
|
||||
*writeSize);
|
||||
}
|
||||
}} // namespace dawn_native::vulkan
|
|
@ -31,6 +31,11 @@ namespace dawn_native { namespace vulkan {
|
|||
using QueueBase::QueueBase;
|
||||
|
||||
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
||||
MaybeError WriteTextureImpl(const TextureCopyView* destination,
|
||||
const void* data,
|
||||
size_t dataSize,
|
||||
const TextureDataLayout* dataLayout,
|
||||
const Extent3D* writeSize) override;
|
||||
};
|
||||
|
||||
}} // namespace dawn_native::vulkan
|
||||
|
|
|
@ -68,16 +68,26 @@ namespace dawn_native { namespace vulkan {
|
|||
VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
|
||||
const TextureCopy& textureCopy,
|
||||
const Extent3D& copySize) {
|
||||
TextureDataLayout passDataLayout;
|
||||
passDataLayout.offset = bufferCopy.offset;
|
||||
passDataLayout.rowsPerImage = bufferCopy.rowsPerImage;
|
||||
passDataLayout.bytesPerRow = bufferCopy.bytesPerRow;
|
||||
return ComputeBufferImageCopyRegion(passDataLayout, textureCopy, copySize);
|
||||
}
|
||||
|
||||
VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
|
||||
const TextureCopy& textureCopy,
|
||||
const Extent3D& copySize) {
|
||||
const Texture* texture = ToBackend(textureCopy.texture.Get());
|
||||
|
||||
VkBufferImageCopy region;
|
||||
|
||||
region.bufferOffset = bufferCopy.offset;
|
||||
region.bufferOffset = dataLayout.offset;
|
||||
// In Vulkan the row length is in texels while it is in bytes for Dawn
|
||||
const Format& format = texture->GetFormat();
|
||||
ASSERT(bufferCopy.bytesPerRow % format.blockByteSize == 0);
|
||||
region.bufferRowLength = bufferCopy.bytesPerRow / format.blockByteSize * format.blockWidth;
|
||||
region.bufferImageHeight = bufferCopy.rowsPerImage;
|
||||
ASSERT(dataLayout.bytesPerRow % format.blockByteSize == 0);
|
||||
region.bufferRowLength = dataLayout.bytesPerRow / format.blockByteSize * format.blockWidth;
|
||||
region.bufferImageHeight = dataLayout.rowsPerImage;
|
||||
|
||||
region.imageSubresource.aspectMask = texture->GetVkAspectMask();
|
||||
region.imageSubresource.mipLevel = textureCopy.mipLevel;
|
||||
|
|
|
@ -89,9 +89,13 @@ namespace dawn_native { namespace vulkan {
|
|||
VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op);
|
||||
|
||||
Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize);
|
||||
|
||||
VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
|
||||
const TextureCopy& textureCopy,
|
||||
const Extent3D& copySize);
|
||||
VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
|
||||
const TextureCopy& textureCopy,
|
||||
const Extent3D& copySize);
|
||||
|
||||
}} // namespace dawn_native::vulkan
|
||||
|
||||
|
|
|
@ -1171,4 +1171,4 @@ TEST_P(CompressedTextureWriteTextureTest,
|
|||
}
|
||||
}
|
||||
|
||||
DAWN_INSTANTIATE_TEST(CompressedTextureWriteTextureTest, MetalBackend());
|
||||
DAWN_INSTANTIATE_TEST(CompressedTextureWriteTextureTest, MetalBackend(), VulkanBackend());
|
|
@ -294,6 +294,8 @@ class QueueWriteTextureTests : public DawnTest {
|
|||
|
||||
// Test writing the whole texture for varying texture sizes.
|
||||
TEST_P(QueueWriteTextureTests, VaryingTextureSize) {
|
||||
DAWN_SKIP_TEST_IF(IsSwiftshader());
|
||||
|
||||
for (unsigned int w : {127, 128}) {
|
||||
for (unsigned int h : {63, 64}) {
|
||||
for (unsigned int d : {1, 3, 4}) {
|
||||
|
@ -490,4 +492,4 @@ TEST_P(QueueWriteTextureTests, VaryingArrayBytesPerRow) {
|
|||
}
|
||||
}
|
||||
|
||||
DAWN_INSTANTIATE_TEST(QueueWriteTextureTests, MetalBackend());
|
||||
DAWN_INSTANTIATE_TEST(QueueWriteTextureTests, MetalBackend(), VulkanBackend());
|
|
@ -1147,7 +1147,7 @@ TEST_P(TextureZeroInitTest, CopyTextureToBufferNonRenderableUnaligned) {
|
|||
// In this test WriteTexture fully overwrites a texture
|
||||
TEST_P(TextureZeroInitTest, WriteWholeTexture) {
|
||||
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsVulkan() || IsD3D12());
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsD3D12());
|
||||
|
||||
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
||||
1, 1, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
|
||||
|
@ -1182,7 +1182,7 @@ TEST_P(TextureZeroInitTest, WriteWholeTexture) {
|
|||
// half.
|
||||
TEST_P(TextureZeroInitTest, WriteTextureHalf) {
|
||||
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsVulkan() || IsD3D12());
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsD3D12());
|
||||
|
||||
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
||||
4, 1,
|
||||
|
@ -1222,7 +1222,7 @@ TEST_P(TextureZeroInitTest, WriteTextureHalf) {
|
|||
// is needed for neither the subresources involved in the write nor the other subresources.
|
||||
TEST_P(TextureZeroInitTest, WriteWholeTextureArray) {
|
||||
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsVulkan() || IsD3D12());
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsD3D12());
|
||||
|
||||
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
||||
1, 6, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
|
||||
|
@ -1265,7 +1265,7 @@ TEST_P(TextureZeroInitTest, WriteWholeTextureArray) {
|
|||
// half.
|
||||
TEST_P(TextureZeroInitTest, WriteTextureArrayHalf) {
|
||||
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsVulkan() || IsD3D12());
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsD3D12());
|
||||
|
||||
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
||||
4, 6,
|
||||
|
@ -1312,7 +1312,7 @@ TEST_P(TextureZeroInitTest, WriteTextureArrayHalf) {
|
|||
// In this test WriteTexture fully overwrites a texture at mip level.
|
||||
TEST_P(TextureZeroInitTest, WriteWholeTextureAtMipLevel) {
|
||||
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsVulkan() || IsD3D12());
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsD3D12());
|
||||
|
||||
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
||||
4, 1, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
|
||||
|
@ -1351,7 +1351,7 @@ TEST_P(TextureZeroInitTest, WriteWholeTextureAtMipLevel) {
|
|||
// other half.
|
||||
TEST_P(TextureZeroInitTest, WriteTextureHalfAtMipLevel) {
|
||||
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsVulkan() || IsD3D12());
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsD3D12());
|
||||
|
||||
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
||||
4, 1,
|
||||
|
|
Loading…
Reference in New Issue