Implement Queue::WriteTexture in D3D12
Bug: dawn:483 Change-Id: I9e5f54abc6675acbb11a021a3d38aea7195017c5 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/26320 Commit-Queue: Natasha Lee <natlee@microsoft.com> Reviewed-by: Austin Eng <enga@chromium.org>
This commit is contained in:
parent
8fe202eccb
commit
cbec3179ef
|
@ -158,14 +158,13 @@ namespace dawn_native {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
return WriteTextureImpl(destination, data, dataSize, dataLayout, writeSize);
|
return WriteTextureImpl(*destination, data, *dataLayout, *writeSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeError QueueBase::WriteTextureImpl(const TextureCopyView* destination,
|
MaybeError QueueBase::WriteTextureImpl(const TextureCopyView& destination,
|
||||||
const void* data,
|
const void* data,
|
||||||
size_t dataSize,
|
const TextureDataLayout& dataLayout,
|
||||||
const TextureDataLayout* dataLayout,
|
const Extent3D& writeSize) {
|
||||||
const Extent3D* writeSize) {
|
|
||||||
// TODO(tommek@google.com): This should be implemented.
|
// TODO(tommek@google.com): This should be implemented.
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,11 +58,10 @@ namespace dawn_native {
|
||||||
uint64_t bufferOffset,
|
uint64_t bufferOffset,
|
||||||
const void* data,
|
const void* data,
|
||||||
size_t size);
|
size_t size);
|
||||||
virtual MaybeError WriteTextureImpl(const TextureCopyView* destination,
|
virtual MaybeError WriteTextureImpl(const TextureCopyView& destination,
|
||||||
const void* data,
|
const void* data,
|
||||||
size_t dataSize,
|
const TextureDataLayout& dataLayout,
|
||||||
const TextureDataLayout* dataLayout,
|
const Extent3D& writeSize);
|
||||||
const Extent3D* writeSize);
|
|
||||||
|
|
||||||
MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
|
MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
|
||||||
MaybeError ValidateSignal(const Fence* fence, uint64_t signalValue) const;
|
MaybeError ValidateSignal(const Fence* fence, uint64_t signalValue) const;
|
||||||
|
|
|
@ -95,39 +95,6 @@ namespace dawn_native { namespace d3d12 {
|
||||||
copySize.depth == srcSize.depth;
|
copySize.depth == srcSize.depth;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RecordCopyBufferToTextureFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
|
|
||||||
const Texture2DCopySplit& baseCopySplit,
|
|
||||||
Buffer* buffer,
|
|
||||||
uint64_t baseOffset,
|
|
||||||
uint64_t bufferBytesPerRow,
|
|
||||||
Texture* texture,
|
|
||||||
uint32_t textureMiplevel,
|
|
||||||
uint32_t textureSlice,
|
|
||||||
Aspect aspect) {
|
|
||||||
const D3D12_TEXTURE_COPY_LOCATION textureLocation =
|
|
||||||
ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureSlice,
|
|
||||||
aspect);
|
|
||||||
|
|
||||||
const uint64_t offset = baseCopySplit.offset + baseOffset;
|
|
||||||
|
|
||||||
for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
|
|
||||||
const Texture2DCopySplit::CopyInfo& info = baseCopySplit.copies[i];
|
|
||||||
|
|
||||||
// TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
|
|
||||||
// members in Texture2DCopySplit::CopyInfo.
|
|
||||||
const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
|
|
||||||
ComputeBufferLocationForCopyTextureRegion(texture, buffer->GetD3D12Resource(),
|
|
||||||
info.bufferSize, offset,
|
|
||||||
bufferBytesPerRow);
|
|
||||||
const D3D12_BOX sourceRegion =
|
|
||||||
ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
|
|
||||||
|
|
||||||
commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
|
|
||||||
info.textureOffset.y, info.textureOffset.z,
|
|
||||||
&bufferLocation, &sourceRegion);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void RecordCopyTextureToBufferFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
|
void RecordCopyTextureToBufferFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
|
||||||
const Texture2DCopySplit& baseCopySplit,
|
const Texture2DCopySplit& baseCopySplit,
|
||||||
Buffer* buffer,
|
Buffer* buffer,
|
||||||
|
@ -713,41 +680,11 @@ namespace dawn_native { namespace d3d12 {
|
||||||
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
|
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
|
||||||
subresources);
|
subresources);
|
||||||
|
|
||||||
// See comments in ComputeTextureCopySplits() for more details.
|
// compute the copySplits and record the CopyTextureRegion commands
|
||||||
const TextureCopySplits copySplits = ComputeTextureCopySplits(
|
CopyBufferToTextureWithCopySplit(
|
||||||
copy->destination.origin, copy->copySize, texture->GetFormat(),
|
commandContext, copy->destination, copy->copySize, texture,
|
||||||
copy->source.offset, copy->source.bytesPerRow, copy->source.rowsPerImage);
|
buffer->GetD3D12Resource(), copy->source.offset, copy->source.bytesPerRow,
|
||||||
|
copy->source.rowsPerImage, subresources.aspects);
|
||||||
const uint64_t bytesPerSlice =
|
|
||||||
copy->source.bytesPerRow *
|
|
||||||
(copy->source.rowsPerImage / texture->GetFormat().blockHeight);
|
|
||||||
|
|
||||||
// copySplits.copies2D[1] is always calculated for the second copy slice with
|
|
||||||
// extra "bytesPerSlice" copy offset compared with the first copy slice. So
|
|
||||||
// here we use an array bufferOffsetsForNextSlice to record the extra offsets
|
|
||||||
// for each copy slice: bufferOffsetsForNextSlice[0] is the extra offset for
|
|
||||||
// the next copy slice that uses copySplits.copies2D[0], and
|
|
||||||
// bufferOffsetsForNextSlice[1] is the extra offset for the next copy slice
|
|
||||||
// that uses copySplits.copies2D[1].
|
|
||||||
std::array<uint64_t, TextureCopySplits::kMaxTextureCopySplits>
|
|
||||||
bufferOffsetsForNextSlice = {{0u, 0u}};
|
|
||||||
for (uint32_t copySlice = 0; copySlice < copy->copySize.depth; ++copySlice) {
|
|
||||||
const uint32_t splitIndex = copySlice % copySplits.copies2D.size();
|
|
||||||
|
|
||||||
const Texture2DCopySplit& copySplitPerLayerBase =
|
|
||||||
copySplits.copies2D[splitIndex];
|
|
||||||
const uint64_t bufferOffsetForNextSlice =
|
|
||||||
bufferOffsetsForNextSlice[splitIndex];
|
|
||||||
const uint32_t copyTextureLayer = copySlice + copy->destination.origin.z;
|
|
||||||
|
|
||||||
RecordCopyBufferToTextureFromTextureCopySplit(
|
|
||||||
commandList, copySplitPerLayerBase, buffer, bufferOffsetForNextSlice,
|
|
||||||
copy->source.bytesPerRow, texture, copy->destination.mipLevel,
|
|
||||||
copyTextureLayer, subresources.aspects);
|
|
||||||
|
|
||||||
bufferOffsetsForNextSlice[splitIndex] +=
|
|
||||||
bytesPerSlice * copySplits.copies2D.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,6 +42,7 @@
|
||||||
#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
|
#include "dawn_native/d3d12/StagingDescriptorAllocatorD3D12.h"
|
||||||
#include "dawn_native/d3d12/SwapChainD3D12.h"
|
#include "dawn_native/d3d12/SwapChainD3D12.h"
|
||||||
#include "dawn_native/d3d12/TextureD3D12.h"
|
#include "dawn_native/d3d12/TextureD3D12.h"
|
||||||
|
#include "dawn_native/d3d12/UtilsD3D12.h"
|
||||||
|
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
|
@ -364,6 +365,33 @@ namespace dawn_native { namespace d3d12 {
|
||||||
sourceOffset, size);
|
sourceOffset, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||||
|
const TextureDataLayout& src,
|
||||||
|
TextureCopy* dst,
|
||||||
|
const Extent3D& copySizePixels) {
|
||||||
|
CommandRecordingContext* commandContext;
|
||||||
|
DAWN_TRY_ASSIGN(commandContext, GetPendingCommandContext());
|
||||||
|
Texture* texture = ToBackend(dst->texture.Get());
|
||||||
|
ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
|
||||||
|
|
||||||
|
SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
|
||||||
|
|
||||||
|
if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst->mipLevel)) {
|
||||||
|
texture->SetIsSubresourceContentInitialized(true, range);
|
||||||
|
} else {
|
||||||
|
texture->EnsureSubresourceContentInitialized(commandContext, range);
|
||||||
|
}
|
||||||
|
|
||||||
|
texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);
|
||||||
|
|
||||||
|
// compute the copySplits and record the CopyTextureRegion commands
|
||||||
|
CopyBufferToTextureWithCopySplit(commandContext, *dst, copySizePixels, texture,
|
||||||
|
ToBackend(source)->GetResource(), src.offset,
|
||||||
|
src.bytesPerRow, src.rowsPerImage, range.aspects);
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
void Device::DeallocateMemory(ResourceHeapAllocation& allocation) {
|
void Device::DeallocateMemory(ResourceHeapAllocation& allocation) {
|
||||||
mResourceAllocatorManager->DeallocateMemory(allocation);
|
mResourceAllocatorManager->DeallocateMemory(allocation);
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include "common/Constants.h"
|
#include "common/Constants.h"
|
||||||
#include "common/SerialQueue.h"
|
#include "common/SerialQueue.h"
|
||||||
#include "dawn_native/BindingInfo.h"
|
#include "dawn_native/BindingInfo.h"
|
||||||
|
#include "dawn_native/Commands.h"
|
||||||
#include "dawn_native/Device.h"
|
#include "dawn_native/Device.h"
|
||||||
#include "dawn_native/d3d12/CommandRecordingContext.h"
|
#include "dawn_native/d3d12/CommandRecordingContext.h"
|
||||||
#include "dawn_native/d3d12/D3D12Info.h"
|
#include "dawn_native/d3d12/D3D12Info.h"
|
||||||
|
@ -99,6 +100,11 @@ namespace dawn_native { namespace d3d12 {
|
||||||
uint64_t destinationOffset,
|
uint64_t destinationOffset,
|
||||||
uint64_t size);
|
uint64_t size);
|
||||||
|
|
||||||
|
MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||||
|
const TextureDataLayout& src,
|
||||||
|
TextureCopy* dst,
|
||||||
|
const Extent3D& copySizePixels);
|
||||||
|
|
||||||
ResultOrError<ResourceHeapAllocation> AllocateMemory(
|
ResultOrError<ResourceHeapAllocation> AllocateMemory(
|
||||||
D3D12_HEAP_TYPE heapType,
|
D3D12_HEAP_TYPE heapType,
|
||||||
const D3D12_RESOURCE_DESC& resourceDescriptor,
|
const D3D12_RESOURCE_DESC& resourceDescriptor,
|
||||||
|
|
|
@ -14,6 +14,11 @@
|
||||||
|
|
||||||
#include "dawn_native/d3d12/QueueD3D12.h"
|
#include "dawn_native/d3d12/QueueD3D12.h"
|
||||||
|
|
||||||
|
#include "common/Math.h"
|
||||||
|
#include "dawn_native/Buffer.h"
|
||||||
|
#include "dawn_native/CommandValidation.h"
|
||||||
|
#include "dawn_native/Commands.h"
|
||||||
|
#include "dawn_native/DynamicUploader.h"
|
||||||
#include "dawn_native/d3d12/CommandBufferD3D12.h"
|
#include "dawn_native/d3d12/CommandBufferD3D12.h"
|
||||||
#include "dawn_native/d3d12/D3D12Error.h"
|
#include "dawn_native/d3d12/D3D12Error.h"
|
||||||
#include "dawn_native/d3d12/DeviceD3D12.h"
|
#include "dawn_native/d3d12/DeviceD3D12.h"
|
||||||
|
@ -22,6 +27,46 @@
|
||||||
|
|
||||||
namespace dawn_native { namespace d3d12 {
|
namespace dawn_native { namespace d3d12 {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRow(
|
||||||
|
DeviceBase* device,
|
||||||
|
const void* data,
|
||||||
|
uint32_t alignedBytesPerRow,
|
||||||
|
uint32_t optimallyAlignedBytesPerRow,
|
||||||
|
uint32_t alignedRowsPerImage,
|
||||||
|
const TextureDataLayout& dataLayout,
|
||||||
|
const Format& textureFormat,
|
||||||
|
const Extent3D& writeSizePixel) {
|
||||||
|
uint32_t newDataSizeBytes = ComputeRequiredBytesInCopy(
|
||||||
|
textureFormat, writeSizePixel, optimallyAlignedBytesPerRow, alignedRowsPerImage);
|
||||||
|
|
||||||
|
UploadHandle uploadHandle;
|
||||||
|
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
|
||||||
|
newDataSizeBytes, device->GetPendingCommandSerial()));
|
||||||
|
ASSERT(uploadHandle.mappedBuffer != nullptr);
|
||||||
|
|
||||||
|
uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
|
||||||
|
const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
|
||||||
|
srcPointer += dataLayout.offset;
|
||||||
|
|
||||||
|
uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / textureFormat.blockHeight;
|
||||||
|
uint32_t dataRowsPerImageInBlock = dataLayout.rowsPerImage / textureFormat.blockHeight;
|
||||||
|
if (dataRowsPerImageInBlock == 0) {
|
||||||
|
dataRowsPerImageInBlock = writeSizePixel.height / textureFormat.blockHeight;
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
|
||||||
|
uint64_t imageAdditionalStride =
|
||||||
|
dataLayout.bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
|
||||||
|
|
||||||
|
CopyTextureData(dstPointer, srcPointer, writeSizePixel.depth,
|
||||||
|
alignedRowsPerImageInBlock, imageAdditionalStride, alignedBytesPerRow,
|
||||||
|
optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
|
||||||
|
|
||||||
|
return uploadHandle;
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
Queue::Queue(Device* device) : QueueBase(device) {
|
Queue::Queue(Device* device) : QueueBase(device) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,4 +92,43 @@ namespace dawn_native { namespace d3d12 {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MaybeError Queue::WriteTextureImpl(const TextureCopyView& destination,
|
||||||
|
const void* data,
|
||||||
|
const TextureDataLayout& dataLayout,
|
||||||
|
const Extent3D& writeSizePixel) {
|
||||||
|
const TexelBlockInfo& blockInfo =
|
||||||
|
destination.texture->GetFormat().GetTexelBlockInfo(destination.aspect);
|
||||||
|
|
||||||
|
// We are only copying the part of the data that will appear in the texture.
|
||||||
|
// Note that validating texture copy range ensures that writeSizePixel->width and
|
||||||
|
// writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
|
||||||
|
uint32_t alignedBytesPerRow =
|
||||||
|
(writeSizePixel.width) / blockInfo.blockWidth * blockInfo.blockByteSize;
|
||||||
|
uint32_t alignedRowsPerImage = writeSizePixel.height;
|
||||||
|
uint32_t optimallyAlignedBytesPerRow =
|
||||||
|
Align(alignedBytesPerRow, D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
|
||||||
|
|
||||||
|
UploadHandle uploadHandle;
|
||||||
|
DAWN_TRY_ASSIGN(
|
||||||
|
uploadHandle,
|
||||||
|
UploadTextureDataAligningBytesPerRow(
|
||||||
|
GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
|
||||||
|
alignedRowsPerImage, dataLayout, destination.texture->GetFormat(), writeSizePixel));
|
||||||
|
|
||||||
|
TextureDataLayout passDataLayout = dataLayout;
|
||||||
|
passDataLayout.offset = uploadHandle.startOffset;
|
||||||
|
passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
|
||||||
|
passDataLayout.rowsPerImage = alignedRowsPerImage;
|
||||||
|
|
||||||
|
TextureCopy textureCopy;
|
||||||
|
textureCopy.texture = destination.texture;
|
||||||
|
textureCopy.mipLevel = destination.mipLevel;
|
||||||
|
textureCopy.origin = destination.origin;
|
||||||
|
textureCopy.aspect = ConvertAspect(destination.texture->GetFormat(), destination.aspect);
|
||||||
|
|
||||||
|
return ToBackend(GetDevice())
|
||||||
|
->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout, &textureCopy,
|
||||||
|
writeSizePixel);
|
||||||
|
}
|
||||||
|
|
||||||
}} // namespace dawn_native::d3d12
|
}} // namespace dawn_native::d3d12
|
||||||
|
|
|
@ -31,6 +31,10 @@ namespace dawn_native { namespace d3d12 {
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
||||||
|
MaybeError WriteTextureImpl(const TextureCopyView& destination,
|
||||||
|
const void* data,
|
||||||
|
const TextureDataLayout& dataLayout,
|
||||||
|
const Extent3D& writeSizePixel) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
}} // namespace dawn_native::d3d12
|
}} // namespace dawn_native::d3d12
|
||||||
|
|
|
@ -15,6 +15,9 @@
|
||||||
#include "dawn_native/d3d12/UtilsD3D12.h"
|
#include "dawn_native/d3d12/UtilsD3D12.h"
|
||||||
|
|
||||||
#include "common/Assert.h"
|
#include "common/Assert.h"
|
||||||
|
#include "dawn_native/Format.h"
|
||||||
|
#include "dawn_native/d3d12/BufferD3D12.h"
|
||||||
|
#include "dawn_native/d3d12/CommandRecordingContext.h"
|
||||||
|
|
||||||
#include <stringapiset.h>
|
#include <stringapiset.h>
|
||||||
|
|
||||||
|
@ -65,7 +68,7 @@ namespace dawn_native { namespace d3d12 {
|
||||||
D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
|
D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
|
||||||
uint32_t level,
|
uint32_t level,
|
||||||
uint32_t slice,
|
uint32_t slice,
|
||||||
Aspect aspect) {
|
const Aspect& aspect) {
|
||||||
D3D12_TEXTURE_COPY_LOCATION copyLocation;
|
D3D12_TEXTURE_COPY_LOCATION copyLocation;
|
||||||
copyLocation.pResource = texture->GetD3D12Resource();
|
copyLocation.pResource = texture->GetD3D12Resource();
|
||||||
copyLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
|
copyLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
|
||||||
|
@ -136,4 +139,78 @@ namespace dawn_native { namespace d3d12 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void RecordCopyBufferToTextureFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
|
||||||
|
const Texture2DCopySplit& baseCopySplit,
|
||||||
|
ID3D12Resource* bufferResource,
|
||||||
|
uint64_t baseOffsetBytes,
|
||||||
|
uint64_t bufferBytesPerRow,
|
||||||
|
Texture* texture,
|
||||||
|
uint32_t textureMiplevel,
|
||||||
|
uint32_t textureSlice,
|
||||||
|
const Aspect& aspect) {
|
||||||
|
const D3D12_TEXTURE_COPY_LOCATION textureLocation =
|
||||||
|
ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureSlice, aspect);
|
||||||
|
|
||||||
|
const uint64_t offsetBytes = baseCopySplit.offset + baseOffsetBytes;
|
||||||
|
|
||||||
|
for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
|
||||||
|
const Texture2DCopySplit::CopyInfo& info = baseCopySplit.copies[i];
|
||||||
|
|
||||||
|
// TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
|
||||||
|
// members in Texture2DCopySplit::CopyInfo.
|
||||||
|
const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
|
||||||
|
ComputeBufferLocationForCopyTextureRegion(texture, bufferResource, info.bufferSize,
|
||||||
|
offsetBytes, bufferBytesPerRow);
|
||||||
|
const D3D12_BOX sourceRegion =
|
||||||
|
ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
|
||||||
|
|
||||||
|
commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
|
||||||
|
info.textureOffset.y, info.textureOffset.z,
|
||||||
|
&bufferLocation, &sourceRegion);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CopyBufferToTextureWithCopySplit(CommandRecordingContext* commandContext,
|
||||||
|
const TextureCopy& textureCopy,
|
||||||
|
const Extent3D& copySize,
|
||||||
|
Texture* texture,
|
||||||
|
ID3D12Resource* bufferResource,
|
||||||
|
const uint64_t offsetBytes,
|
||||||
|
const uint32_t bytesPerRow,
|
||||||
|
const uint32_t rowsPerImage,
|
||||||
|
const Aspect& aspects) {
|
||||||
|
// See comments in ComputeTextureCopySplits() for more details.
|
||||||
|
const TextureCopySplits copySplits =
|
||||||
|
ComputeTextureCopySplits(textureCopy.origin, copySize, texture->GetFormat(),
|
||||||
|
offsetBytes, bytesPerRow, rowsPerImage);
|
||||||
|
|
||||||
|
const uint64_t bytesPerSlice =
|
||||||
|
bytesPerRow * (rowsPerImage / texture->GetFormat().blockHeight);
|
||||||
|
|
||||||
|
// copySplits.copies2D[1] is always calculated for the second copy slice with
|
||||||
|
// extra "bytesPerSlice" copy offset compared with the first copy slice. So
|
||||||
|
// here we use an array bufferOffsetsForNextSlice to record the extra offsets
|
||||||
|
// for each copy slice: bufferOffsetsForNextSlice[0] is the extra offset for
|
||||||
|
// the next copy slice that uses copySplits.copies2D[0], and
|
||||||
|
// bufferOffsetsForNextSlice[1] is the extra offset for the next copy slice
|
||||||
|
// that uses copySplits.copies2D[1].
|
||||||
|
std::array<uint64_t, TextureCopySplits::kMaxTextureCopySplits> bufferOffsetsForNextSlice = {
|
||||||
|
{0u, 0u}};
|
||||||
|
|
||||||
|
for (uint32_t copySlice = 0; copySlice < copySize.depth; ++copySlice) {
|
||||||
|
const uint32_t splitIndex = copySlice % copySplits.copies2D.size();
|
||||||
|
|
||||||
|
const Texture2DCopySplit& copySplitPerLayerBase = copySplits.copies2D[splitIndex];
|
||||||
|
const uint64_t bufferOffsetForNextSlice = bufferOffsetsForNextSlice[splitIndex];
|
||||||
|
const uint32_t copyTextureLayer = copySlice + textureCopy.origin.z;
|
||||||
|
|
||||||
|
RecordCopyBufferToTextureFromTextureCopySplit(
|
||||||
|
commandContext->GetCommandList(), copySplitPerLayerBase, bufferResource,
|
||||||
|
bufferOffsetForNextSlice, bytesPerRow, texture, textureCopy.mipLevel,
|
||||||
|
copyTextureLayer, aspects);
|
||||||
|
|
||||||
|
bufferOffsetsForNextSlice[splitIndex] += bytesPerSlice * copySplits.copies2D.size();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}} // namespace dawn_native::d3d12
|
}} // namespace dawn_native::d3d12
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#ifndef DAWNNATIVE_D3D12_UTILSD3D12_H_
|
#ifndef DAWNNATIVE_D3D12_UTILSD3D12_H_
|
||||||
#define DAWNNATIVE_D3D12_UTILSD3D12_H_
|
#define DAWNNATIVE_D3D12_UTILSD3D12_H_
|
||||||
|
|
||||||
|
#include "dawn_native/Commands.h"
|
||||||
#include "dawn_native/d3d12/BufferD3D12.h"
|
#include "dawn_native/d3d12/BufferD3D12.h"
|
||||||
#include "dawn_native/d3d12/TextureCopySplitter.h"
|
#include "dawn_native/d3d12/TextureCopySplitter.h"
|
||||||
#include "dawn_native/d3d12/TextureD3D12.h"
|
#include "dawn_native/d3d12/TextureD3D12.h"
|
||||||
|
@ -30,7 +31,7 @@ namespace dawn_native { namespace d3d12 {
|
||||||
D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
|
D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
|
||||||
uint32_t level,
|
uint32_t level,
|
||||||
uint32_t slice,
|
uint32_t slice,
|
||||||
Aspect aspect);
|
const Aspect& aspect);
|
||||||
|
|
||||||
D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
|
D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
|
||||||
const Texture* texture,
|
const Texture* texture,
|
||||||
|
@ -42,6 +43,26 @@ namespace dawn_native { namespace d3d12 {
|
||||||
|
|
||||||
bool IsTypeless(DXGI_FORMAT format);
|
bool IsTypeless(DXGI_FORMAT format);
|
||||||
|
|
||||||
|
void RecordCopyBufferToTextureFromTextureCopySplit(ID3D12GraphicsCommandList* commandList,
|
||||||
|
const Texture2DCopySplit& baseCopySplit,
|
||||||
|
Buffer* buffer,
|
||||||
|
uint64_t baseOffset,
|
||||||
|
uint64_t bufferBytesPerRow,
|
||||||
|
Texture* texture,
|
||||||
|
uint32_t textureMiplevel,
|
||||||
|
uint32_t textureSlice,
|
||||||
|
const Aspect& aspect);
|
||||||
|
|
||||||
|
void CopyBufferToTextureWithCopySplit(CommandRecordingContext* commandContext,
|
||||||
|
const TextureCopy& textureCopy,
|
||||||
|
const Extent3D& copySize,
|
||||||
|
Texture* texture,
|
||||||
|
ID3D12Resource* bufferResource,
|
||||||
|
const uint64_t offset,
|
||||||
|
const uint32_t bytesPerRow,
|
||||||
|
const uint32_t rowsPerImage,
|
||||||
|
const Aspect& aspect);
|
||||||
|
|
||||||
}} // namespace dawn_native::d3d12
|
}} // namespace dawn_native::d3d12
|
||||||
|
|
||||||
#endif // DAWNNATIVE_D3D12_UTILSD3D12_H_
|
#endif // DAWNNATIVE_D3D12_UTILSD3D12_H_
|
||||||
|
|
|
@ -67,7 +67,7 @@ namespace dawn_native { namespace metal {
|
||||||
MaybeError CopyFromStagingToTexture(StagingBufferBase* source,
|
MaybeError CopyFromStagingToTexture(StagingBufferBase* source,
|
||||||
const TextureDataLayout& dataLayout,
|
const TextureDataLayout& dataLayout,
|
||||||
TextureCopy* dst,
|
TextureCopy* dst,
|
||||||
const Extent3D copySize);
|
const Extent3D& copySizePixels);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Device(AdapterBase* adapter, id<MTLDevice> mtlDevice, const DeviceDescriptor* descriptor);
|
Device(AdapterBase* adapter, id<MTLDevice> mtlDevice, const DeviceDescriptor* descriptor);
|
||||||
|
|
|
@ -271,7 +271,7 @@ namespace dawn_native { namespace metal {
|
||||||
MaybeError Device::CopyFromStagingToTexture(StagingBufferBase* source,
|
MaybeError Device::CopyFromStagingToTexture(StagingBufferBase* source,
|
||||||
const TextureDataLayout& dataLayout,
|
const TextureDataLayout& dataLayout,
|
||||||
TextureCopy* dst,
|
TextureCopy* dst,
|
||||||
const Extent3D copySize) {
|
const Extent3D& copySizePixels) {
|
||||||
Texture* texture = ToBackend(dst->texture.Get());
|
Texture* texture = ToBackend(dst->texture.Get());
|
||||||
|
|
||||||
// This function assumes data is perfectly aligned. Otherwise, it might be necessary
|
// This function assumes data is perfectly aligned. Otherwise, it might be necessary
|
||||||
|
@ -280,19 +280,19 @@ namespace dawn_native { namespace metal {
|
||||||
uint32_t blockSize = blockInfo.blockByteSize;
|
uint32_t blockSize = blockInfo.blockByteSize;
|
||||||
uint32_t blockWidth = blockInfo.blockWidth;
|
uint32_t blockWidth = blockInfo.blockWidth;
|
||||||
uint32_t blockHeight = blockInfo.blockHeight;
|
uint32_t blockHeight = blockInfo.blockHeight;
|
||||||
ASSERT(dataLayout.rowsPerImage == (copySize.height));
|
ASSERT(dataLayout.rowsPerImage == (copySizePixels.height));
|
||||||
ASSERT(dataLayout.bytesPerRow == (copySize.width) / blockWidth * blockSize);
|
ASSERT(dataLayout.bytesPerRow == (copySizePixels.width) / blockWidth * blockSize);
|
||||||
|
|
||||||
EnsureDestinationTextureInitialized(texture, *dst, copySize);
|
EnsureDestinationTextureInitialized(texture, *dst, copySizePixels);
|
||||||
|
|
||||||
// Metal validation layer requires that if the texture's pixel format is a compressed
|
// Metal validation layer requires that if the texture's pixel format is a compressed
|
||||||
// format, the sourceSize must be a multiple of the pixel format's block size or be
|
// format, the sourceSize must be a multiple of the pixel format's block size or be
|
||||||
// clamped to the edge of the texture if the block extends outside the bounds of a
|
// clamped to the edge of the texture if the block extends outside the bounds of a
|
||||||
// texture.
|
// texture.
|
||||||
const Extent3D clampedSize =
|
const Extent3D clampedSize =
|
||||||
texture->ClampToMipLevelVirtualSize(dst->mipLevel, dst->origin, copySize);
|
texture->ClampToMipLevelVirtualSize(dst->mipLevel, dst->origin, copySizePixels);
|
||||||
const uint32_t copyBaseLayer = dst->origin.z;
|
const uint32_t copyBaseLayer = dst->origin.z;
|
||||||
const uint32_t copyLayerCount = copySize.depth;
|
const uint32_t copyLayerCount = copySizePixels.depth;
|
||||||
const uint64_t bytesPerImage =
|
const uint64_t bytesPerImage =
|
||||||
dataLayout.rowsPerImage * dataLayout.bytesPerRow / blockHeight;
|
dataLayout.rowsPerImage * dataLayout.bytesPerRow / blockHeight;
|
||||||
|
|
||||||
|
|
|
@ -28,11 +28,10 @@ namespace dawn_native { namespace metal {
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
||||||
MaybeError WriteTextureImpl(const TextureCopyView* destination,
|
MaybeError WriteTextureImpl(const TextureCopyView& destination,
|
||||||
const void* data,
|
const void* data,
|
||||||
size_t dataSize,
|
const TextureDataLayout& dataLayout,
|
||||||
const TextureDataLayout* dataLayout,
|
const Extent3D& writeSizePixel) override;
|
||||||
const Extent3D* writeSize) override;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}} // namespace dawn_native::metal
|
}} // namespace dawn_native::metal
|
||||||
|
|
|
@ -29,37 +29,36 @@ namespace dawn_native { namespace metal {
|
||||||
ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRow(
|
ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRow(
|
||||||
DeviceBase* device,
|
DeviceBase* device,
|
||||||
const void* data,
|
const void* data,
|
||||||
size_t dataSize,
|
|
||||||
uint32_t alignedBytesPerRow,
|
uint32_t alignedBytesPerRow,
|
||||||
uint32_t alignedRowsPerImage,
|
uint32_t alignedRowsPerImage,
|
||||||
const TextureDataLayout* dataLayout,
|
const TextureDataLayout& dataLayout,
|
||||||
const TexelBlockInfo& blockInfo,
|
const TexelBlockInfo& blockInfo,
|
||||||
const Extent3D* writeSize) {
|
const Extent3D& writeSizePixel) {
|
||||||
uint32_t newDataSize = ComputeRequiredBytesInCopy(
|
uint32_t newDataSizeBytes = ComputeRequiredBytesInCopy(
|
||||||
blockInfo, *writeSize, alignedBytesPerRow, alignedRowsPerImage);
|
blockInfo, writeSizePixel, alignedBytesPerRow, alignedRowsPerImage);
|
||||||
|
|
||||||
UploadHandle uploadHandle;
|
UploadHandle uploadHandle;
|
||||||
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
|
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
|
||||||
newDataSize, device->GetPendingCommandSerial()));
|
newDataSizeBytes, device->GetPendingCommandSerial()));
|
||||||
ASSERT(uploadHandle.mappedBuffer != nullptr);
|
ASSERT(uploadHandle.mappedBuffer != nullptr);
|
||||||
|
|
||||||
uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
|
uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
|
||||||
const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
|
const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
|
||||||
srcPointer += dataLayout->offset;
|
srcPointer += dataLayout.offset;
|
||||||
|
|
||||||
uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / blockInfo.blockHeight;
|
uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / blockInfo.blockHeight;
|
||||||
uint32_t dataRowsPerImageInBlock = dataLayout->rowsPerImage / blockInfo.blockHeight;
|
uint32_t dataRowsPerImageInBlock = dataLayout.rowsPerImage / blockInfo.blockHeight;
|
||||||
if (dataRowsPerImageInBlock == 0) {
|
if (dataRowsPerImageInBlock == 0) {
|
||||||
dataRowsPerImageInBlock = writeSize->height / blockInfo.blockHeight;
|
dataRowsPerImageInBlock = writeSizePixel.height / blockInfo.blockHeight;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
|
ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
|
||||||
uint64_t imageAdditionalStride =
|
uint64_t imageAdditionalStride =
|
||||||
dataLayout->bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
|
dataLayout.bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
|
||||||
|
|
||||||
CopyTextureData(dstPointer, srcPointer, writeSize->depth, alignedRowsPerImageInBlock,
|
CopyTextureData(dstPointer, srcPointer, writeSizePixel.depth,
|
||||||
imageAdditionalStride, alignedBytesPerRow, alignedBytesPerRow,
|
alignedRowsPerImageInBlock, imageAdditionalStride, alignedBytesPerRow,
|
||||||
dataLayout->bytesPerRow);
|
alignedBytesPerRow, dataLayout.bytesPerRow);
|
||||||
|
|
||||||
return uploadHandle;
|
return uploadHandle;
|
||||||
}
|
}
|
||||||
|
@ -86,40 +85,40 @@ namespace dawn_native { namespace metal {
|
||||||
// We don't write from the CPU to the texture directly which can be done in Metal using the
|
// We don't write from the CPU to the texture directly which can be done in Metal using the
|
||||||
// replaceRegion function, because the function requires a non-private storage mode and Dawn
|
// replaceRegion function, because the function requires a non-private storage mode and Dawn
|
||||||
// sets the private storage mode by default for all textures except IOSurfaces on macOS.
|
// sets the private storage mode by default for all textures except IOSurfaces on macOS.
|
||||||
MaybeError Queue::WriteTextureImpl(const TextureCopyView* destination,
|
MaybeError Queue::WriteTextureImpl(const TextureCopyView& destination,
|
||||||
const void* data,
|
const void* data,
|
||||||
size_t dataSize,
|
const TextureDataLayout& dataLayout,
|
||||||
const TextureDataLayout* dataLayout,
|
const Extent3D& writeSizePixel) {
|
||||||
const Extent3D* writeSize) {
|
|
||||||
const TexelBlockInfo& blockInfo =
|
const TexelBlockInfo& blockInfo =
|
||||||
destination->texture->GetFormat().GetTexelBlockInfo(destination->aspect);
|
destination.texture->GetFormat().GetTexelBlockInfo(destination.aspect);
|
||||||
|
|
||||||
// We are only copying the part of the data that will appear in the texture.
|
// We are only copying the part of the data that will appear in the texture.
|
||||||
// Note that validating texture copy range ensures that writeSize->width and
|
// Note that validating texture copy range ensures that writeSizePixel->width and
|
||||||
// writeSize->height are multiples of blockWidth and blockHeight respectively.
|
// writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
|
||||||
uint32_t alignedBytesPerRow =
|
uint32_t alignedBytesPerRow =
|
||||||
(writeSize->width) / blockInfo.blockWidth * blockInfo.blockByteSize;
|
(writeSizePixel.width) / blockInfo.blockWidth * blockInfo.blockByteSize;
|
||||||
uint32_t alignedRowsPerImage = writeSize->height;
|
uint32_t alignedRowsPerImage = writeSizePixel.height;
|
||||||
|
|
||||||
UploadHandle uploadHandle;
|
UploadHandle uploadHandle;
|
||||||
DAWN_TRY_ASSIGN(uploadHandle, UploadTextureDataAligningBytesPerRow(
|
DAWN_TRY_ASSIGN(uploadHandle,
|
||||||
GetDevice(), data, dataSize, alignedBytesPerRow,
|
UploadTextureDataAligningBytesPerRow(GetDevice(), data, alignedBytesPerRow,
|
||||||
alignedRowsPerImage, dataLayout, blockInfo, writeSize));
|
alignedRowsPerImage, dataLayout,
|
||||||
|
blockInfo, writeSizePixel));
|
||||||
|
|
||||||
TextureDataLayout passDataLayout = *dataLayout;
|
TextureDataLayout passDataLayout = dataLayout;
|
||||||
passDataLayout.offset = uploadHandle.startOffset;
|
passDataLayout.offset = uploadHandle.startOffset;
|
||||||
passDataLayout.bytesPerRow = alignedBytesPerRow;
|
passDataLayout.bytesPerRow = alignedBytesPerRow;
|
||||||
passDataLayout.rowsPerImage = alignedRowsPerImage;
|
passDataLayout.rowsPerImage = alignedRowsPerImage;
|
||||||
|
|
||||||
TextureCopy textureCopy;
|
TextureCopy textureCopy;
|
||||||
textureCopy.texture = destination->texture;
|
textureCopy.texture = destination.texture;
|
||||||
textureCopy.mipLevel = destination->mipLevel;
|
textureCopy.mipLevel = destination.mipLevel;
|
||||||
textureCopy.origin = destination->origin;
|
textureCopy.origin = destination.origin;
|
||||||
textureCopy.aspect = ConvertAspect(destination->texture->GetFormat(), destination->aspect);
|
textureCopy.aspect = ConvertAspect(destination.texture->GetFormat(), destination.aspect);
|
||||||
|
|
||||||
return ToBackend(GetDevice())
|
return ToBackend(GetDevice())
|
||||||
->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout, &textureCopy,
|
->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout, &textureCopy,
|
||||||
*writeSize);
|
writeSizePixel);
|
||||||
}
|
}
|
||||||
|
|
||||||
}} // namespace dawn_native::metal
|
}} // namespace dawn_native::metal
|
||||||
|
|
|
@ -617,20 +617,21 @@ namespace dawn_native { namespace vulkan {
|
||||||
MaybeError Device::CopyFromStagingToTexture(StagingBufferBase* source,
|
MaybeError Device::CopyFromStagingToTexture(StagingBufferBase* source,
|
||||||
const TextureDataLayout& src,
|
const TextureDataLayout& src,
|
||||||
TextureCopy* dst,
|
TextureCopy* dst,
|
||||||
const Extent3D copySize) {
|
const Extent3D& copySizePixels) {
|
||||||
// There is no need of a barrier to make host writes available and visible to the copy
|
// There is no need of a barrier to make host writes available and visible to the copy
|
||||||
// operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
|
// operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
|
||||||
// does an implicit availability, visibility and domain operation.
|
// does an implicit availability, visibility and domain operation.
|
||||||
|
|
||||||
CommandRecordingContext* recordingContext = GetPendingRecordingContext();
|
CommandRecordingContext* recordingContext = GetPendingRecordingContext();
|
||||||
|
|
||||||
VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, *dst, copySize);
|
VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, *dst, copySizePixels);
|
||||||
VkImageSubresourceLayers subresource = region.imageSubresource;
|
VkImageSubresourceLayers subresource = region.imageSubresource;
|
||||||
|
|
||||||
ASSERT(dst->texture->GetDimension() == wgpu::TextureDimension::e2D);
|
ASSERT(dst->texture->GetDimension() == wgpu::TextureDimension::e2D);
|
||||||
SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySize);
|
SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
|
||||||
|
|
||||||
if (IsCompleteSubresourceCopiedTo(dst->texture.Get(), copySize, subresource.mipLevel)) {
|
if (IsCompleteSubresourceCopiedTo(dst->texture.Get(), copySizePixels,
|
||||||
|
subresource.mipLevel)) {
|
||||||
// Since texture has been overwritten, it has been "initialized"
|
// Since texture has been overwritten, it has been "initialized"
|
||||||
dst->texture->SetIsSubresourceContentInitialized(true, range);
|
dst->texture->SetIsSubresourceContentInitialized(true, range);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -91,7 +91,7 @@ namespace dawn_native { namespace vulkan {
|
||||||
MaybeError CopyFromStagingToTexture(StagingBufferBase* source,
|
MaybeError CopyFromStagingToTexture(StagingBufferBase* source,
|
||||||
const TextureDataLayout& src,
|
const TextureDataLayout& src,
|
||||||
TextureCopy* dst,
|
TextureCopy* dst,
|
||||||
const Extent3D copySize);
|
const Extent3D& copySizePixels);
|
||||||
|
|
||||||
ResultOrError<ResourceMemoryAllocation> AllocateMemory(VkMemoryRequirements requirements,
|
ResultOrError<ResourceMemoryAllocation> AllocateMemory(VkMemoryRequirements requirements,
|
||||||
bool mappable);
|
bool mappable);
|
||||||
|
|
|
@ -31,15 +31,14 @@ namespace dawn_native { namespace vulkan {
|
||||||
ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRow(
|
ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRow(
|
||||||
DeviceBase* device,
|
DeviceBase* device,
|
||||||
const void* data,
|
const void* data,
|
||||||
size_t dataSize,
|
|
||||||
uint32_t alignedBytesPerRow,
|
uint32_t alignedBytesPerRow,
|
||||||
uint32_t optimallyAlignedBytesPerRow,
|
uint32_t optimallyAlignedBytesPerRow,
|
||||||
uint32_t alignedRowsPerImage,
|
uint32_t alignedRowsPerImage,
|
||||||
const TextureDataLayout* dataLayout,
|
const TextureDataLayout& dataLayout,
|
||||||
const TexelBlockInfo& blockInfo,
|
const TexelBlockInfo& blockInfo,
|
||||||
const Extent3D* writeSize) {
|
const Extent3D& writeSizePixel) {
|
||||||
uint32_t newDataSize = ComputeRequiredBytesInCopy(
|
uint32_t newDataSizeBytes = ComputeRequiredBytesInCopy(
|
||||||
blockInfo, *writeSize, optimallyAlignedBytesPerRow, alignedRowsPerImage);
|
blockInfo, writeSizePixel, optimallyAlignedBytesPerRow, alignedRowsPerImage);
|
||||||
|
|
||||||
uint64_t optimalOffsetAlignment =
|
uint64_t optimalOffsetAlignment =
|
||||||
ToBackend(device)
|
ToBackend(device)
|
||||||
|
@ -48,18 +47,18 @@ namespace dawn_native { namespace vulkan {
|
||||||
|
|
||||||
UploadHandle uploadHandle;
|
UploadHandle uploadHandle;
|
||||||
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
|
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
|
||||||
newDataSize + optimalOffsetAlignment - 1,
|
newDataSizeBytes + optimalOffsetAlignment - 1,
|
||||||
device->GetPendingCommandSerial()));
|
device->GetPendingCommandSerial()));
|
||||||
ASSERT(uploadHandle.mappedBuffer != nullptr);
|
ASSERT(uploadHandle.mappedBuffer != nullptr);
|
||||||
|
|
||||||
uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
|
uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
|
||||||
const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
|
const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
|
||||||
srcPointer += dataLayout->offset;
|
srcPointer += dataLayout.offset;
|
||||||
|
|
||||||
uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / blockInfo.blockHeight;
|
uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / blockInfo.blockHeight;
|
||||||
uint32_t dataRowsPerImageInBlock = dataLayout->rowsPerImage / blockInfo.blockHeight;
|
uint32_t dataRowsPerImageInBlock = dataLayout.rowsPerImage / blockInfo.blockHeight;
|
||||||
if (dataRowsPerImageInBlock == 0) {
|
if (dataRowsPerImageInBlock == 0) {
|
||||||
dataRowsPerImageInBlock = writeSize->height / blockInfo.blockHeight;
|
dataRowsPerImageInBlock = writeSizePixel.height / blockInfo.blockHeight;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t additionalOffset =
|
uint64_t additionalOffset =
|
||||||
|
@ -69,11 +68,11 @@ namespace dawn_native { namespace vulkan {
|
||||||
|
|
||||||
ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
|
ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
|
||||||
uint64_t imageAdditionalStride =
|
uint64_t imageAdditionalStride =
|
||||||
dataLayout->bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
|
dataLayout.bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
|
||||||
|
|
||||||
CopyTextureData(dstPointer, srcPointer, writeSize->depth, alignedRowsPerImageInBlock,
|
CopyTextureData(dstPointer, srcPointer, writeSizePixel.depth,
|
||||||
imageAdditionalStride, alignedBytesPerRow, optimallyAlignedBytesPerRow,
|
alignedRowsPerImageInBlock, imageAdditionalStride, alignedBytesPerRow,
|
||||||
dataLayout->bytesPerRow);
|
optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
|
||||||
|
|
||||||
return uploadHandle;
|
return uploadHandle;
|
||||||
}
|
}
|
||||||
|
@ -105,20 +104,19 @@ namespace dawn_native { namespace vulkan {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeError Queue::WriteTextureImpl(const TextureCopyView* destination,
|
MaybeError Queue::WriteTextureImpl(const TextureCopyView& destination,
|
||||||
const void* data,
|
const void* data,
|
||||||
size_t dataSize,
|
const TextureDataLayout& dataLayout,
|
||||||
const TextureDataLayout* dataLayout,
|
const Extent3D& writeSizePixel) {
|
||||||
const Extent3D* writeSize) {
|
|
||||||
const TexelBlockInfo& blockInfo =
|
const TexelBlockInfo& blockInfo =
|
||||||
destination->texture->GetFormat().GetTexelBlockInfo(destination->aspect);
|
destination.texture->GetFormat().GetTexelBlockInfo(destination.aspect);
|
||||||
|
|
||||||
// We are only copying the part of the data that will appear in the texture.
|
// We are only copying the part of the data that will appear in the texture.
|
||||||
// Note that validating texture copy range ensures that writeSize->width and
|
// Note that validating texture copy range ensures that writeSizePixel->width and
|
||||||
// writeSize->height are multiples of blockWidth and blockHeight respectively.
|
// writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
|
||||||
uint32_t alignedBytesPerRow =
|
uint32_t alignedBytesPerRow =
|
||||||
(writeSize->width) / blockInfo.blockWidth * blockInfo.blockByteSize;
|
(writeSizePixel.width) / blockInfo.blockWidth * blockInfo.blockByteSize;
|
||||||
uint32_t alignedRowsPerImage = writeSize->height;
|
uint32_t alignedRowsPerImage = writeSizePixel.height;
|
||||||
|
|
||||||
uint32_t optimalBytesPerRowAlignment =
|
uint32_t optimalBytesPerRowAlignment =
|
||||||
ToBackend(GetDevice())
|
ToBackend(GetDevice())
|
||||||
|
@ -128,24 +126,24 @@ namespace dawn_native { namespace vulkan {
|
||||||
Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
|
Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
|
||||||
|
|
||||||
UploadHandle uploadHandle;
|
UploadHandle uploadHandle;
|
||||||
DAWN_TRY_ASSIGN(uploadHandle, UploadTextureDataAligningBytesPerRow(
|
DAWN_TRY_ASSIGN(uploadHandle,
|
||||||
GetDevice(), data, dataSize, alignedBytesPerRow,
|
UploadTextureDataAligningBytesPerRow(
|
||||||
optimallyAlignedBytesPerRow, alignedRowsPerImage,
|
GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
|
||||||
dataLayout, blockInfo, writeSize));
|
alignedRowsPerImage, dataLayout, blockInfo, writeSizePixel));
|
||||||
|
|
||||||
TextureDataLayout passDataLayout = *dataLayout;
|
TextureDataLayout passDataLayout = dataLayout;
|
||||||
passDataLayout.offset = uploadHandle.startOffset;
|
passDataLayout.offset = uploadHandle.startOffset;
|
||||||
passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
|
passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
|
||||||
passDataLayout.rowsPerImage = alignedRowsPerImage;
|
passDataLayout.rowsPerImage = alignedRowsPerImage;
|
||||||
|
|
||||||
TextureCopy textureCopy;
|
TextureCopy textureCopy;
|
||||||
textureCopy.texture = destination->texture;
|
textureCopy.texture = destination.texture;
|
||||||
textureCopy.mipLevel = destination->mipLevel;
|
textureCopy.mipLevel = destination.mipLevel;
|
||||||
textureCopy.origin = destination->origin;
|
textureCopy.origin = destination.origin;
|
||||||
textureCopy.aspect = ConvertAspect(destination->texture->GetFormat(), destination->aspect);
|
textureCopy.aspect = ConvertAspect(destination.texture->GetFormat(), destination.aspect);
|
||||||
|
|
||||||
return ToBackend(GetDevice())
|
return ToBackend(GetDevice())
|
||||||
->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout, &textureCopy,
|
->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout, &textureCopy,
|
||||||
*writeSize);
|
writeSizePixel);
|
||||||
}
|
}
|
||||||
}} // namespace dawn_native::vulkan
|
}} // namespace dawn_native::vulkan
|
||||||
|
|
|
@ -31,11 +31,10 @@ namespace dawn_native { namespace vulkan {
|
||||||
using QueueBase::QueueBase;
|
using QueueBase::QueueBase;
|
||||||
|
|
||||||
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
||||||
MaybeError WriteTextureImpl(const TextureCopyView* destination,
|
MaybeError WriteTextureImpl(const TextureCopyView& destination,
|
||||||
const void* data,
|
const void* data,
|
||||||
size_t dataSize,
|
const TextureDataLayout& dataLayout,
|
||||||
const TextureDataLayout* dataLayout,
|
const Extent3D& writeSizePixel) override;
|
||||||
const Extent3D* writeSize) override;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}} // namespace dawn_native::vulkan
|
}} // namespace dawn_native::vulkan
|
||||||
|
|
|
@ -1179,4 +1179,7 @@ TEST_P(CompressedTextureWriteTextureTest,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
DAWN_INSTANTIATE_TEST(CompressedTextureWriteTextureTest, MetalBackend(), VulkanBackend());
|
DAWN_INSTANTIATE_TEST(CompressedTextureWriteTextureTest,
|
||||||
|
MetalBackend(),
|
||||||
|
VulkanBackend(),
|
||||||
|
D3D12Backend());
|
|
@ -522,4 +522,4 @@ TEST_P(QueueWriteTextureTests, VaryingArrayBytesPerRow) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
DAWN_INSTANTIATE_TEST(QueueWriteTextureTests, MetalBackend(), VulkanBackend());
|
DAWN_INSTANTIATE_TEST(QueueWriteTextureTests, MetalBackend(), VulkanBackend(), D3D12Backend());
|
||||||
|
|
|
@ -1382,7 +1382,7 @@ TEST_P(TextureZeroInitTest, CopyTextureToBufferNonRenderableUnaligned) {
|
||||||
// In this test WriteTexture fully overwrites a texture
|
// In this test WriteTexture fully overwrites a texture
|
||||||
TEST_P(TextureZeroInitTest, WriteWholeTexture) {
|
TEST_P(TextureZeroInitTest, WriteWholeTexture) {
|
||||||
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
||||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsD3D12());
|
DAWN_SKIP_TEST_IF(IsOpenGL());
|
||||||
|
|
||||||
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
||||||
1, 1, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
|
1, 1, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
|
||||||
|
@ -1417,7 +1417,7 @@ TEST_P(TextureZeroInitTest, WriteWholeTexture) {
|
||||||
// half.
|
// half.
|
||||||
TEST_P(TextureZeroInitTest, WriteTextureHalf) {
|
TEST_P(TextureZeroInitTest, WriteTextureHalf) {
|
||||||
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
||||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsD3D12());
|
DAWN_SKIP_TEST_IF(IsOpenGL());
|
||||||
|
|
||||||
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
||||||
4, 1,
|
4, 1,
|
||||||
|
@ -1457,7 +1457,7 @@ TEST_P(TextureZeroInitTest, WriteTextureHalf) {
|
||||||
// is needed for neither the subresources involved in the write nor the other subresources.
|
// is needed for neither the subresources involved in the write nor the other subresources.
|
||||||
TEST_P(TextureZeroInitTest, WriteWholeTextureArray) {
|
TEST_P(TextureZeroInitTest, WriteWholeTextureArray) {
|
||||||
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
||||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsD3D12());
|
DAWN_SKIP_TEST_IF(IsOpenGL());
|
||||||
|
|
||||||
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
||||||
1, 6, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
|
1, 6, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
|
||||||
|
@ -1500,7 +1500,7 @@ TEST_P(TextureZeroInitTest, WriteWholeTextureArray) {
|
||||||
// half.
|
// half.
|
||||||
TEST_P(TextureZeroInitTest, WriteTextureArrayHalf) {
|
TEST_P(TextureZeroInitTest, WriteTextureArrayHalf) {
|
||||||
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
||||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsD3D12());
|
DAWN_SKIP_TEST_IF(IsOpenGL());
|
||||||
|
|
||||||
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
||||||
4, 6,
|
4, 6,
|
||||||
|
@ -1547,7 +1547,7 @@ TEST_P(TextureZeroInitTest, WriteTextureArrayHalf) {
|
||||||
// In this test WriteTexture fully overwrites a texture at mip level.
|
// In this test WriteTexture fully overwrites a texture at mip level.
|
||||||
TEST_P(TextureZeroInitTest, WriteWholeTextureAtMipLevel) {
|
TEST_P(TextureZeroInitTest, WriteWholeTextureAtMipLevel) {
|
||||||
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
||||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsD3D12());
|
DAWN_SKIP_TEST_IF(IsOpenGL());
|
||||||
|
|
||||||
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
||||||
4, 1, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
|
4, 1, wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc, kColorFormat);
|
||||||
|
@ -1586,7 +1586,7 @@ TEST_P(TextureZeroInitTest, WriteWholeTextureAtMipLevel) {
|
||||||
// other half.
|
// other half.
|
||||||
TEST_P(TextureZeroInitTest, WriteTextureHalfAtMipLevel) {
|
TEST_P(TextureZeroInitTest, WriteTextureHalfAtMipLevel) {
|
||||||
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
// TODO(dawn:483): Remove this condition after implementing WriteTexture in those backends.
|
||||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsD3D12());
|
DAWN_SKIP_TEST_IF(IsOpenGL());
|
||||||
|
|
||||||
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
wgpu::TextureDescriptor descriptor = CreateTextureDescriptor(
|
||||||
4, 1,
|
4, 1,
|
||||||
|
|
Loading…
Reference in New Issue