Refactoring Queue::WriteTexture implementation
More code is now shared across backends. Bug: dawn:483 Change-Id: I7ca1b8cbc2f12e408c94fbe5bca9fd29e47e0004 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/27021 Commit-Queue: Tomek Ponitka <tommek@google.com> Reviewed-by: Austin Eng <enga@chromium.org>
This commit is contained in:
parent
7f265d1d40
commit
d720785616
|
@ -16,6 +16,7 @@
|
||||||
#define DAWNNATIVE_DEVICE_H_
|
#define DAWNNATIVE_DEVICE_H_
|
||||||
|
|
||||||
#include "common/Serial.h"
|
#include "common/Serial.h"
|
||||||
|
#include "dawn_native/Commands.h"
|
||||||
#include "dawn_native/Error.h"
|
#include "dawn_native/Error.h"
|
||||||
#include "dawn_native/Extensions.h"
|
#include "dawn_native/Extensions.h"
|
||||||
#include "dawn_native/Format.h"
|
#include "dawn_native/Format.h"
|
||||||
|
@ -187,6 +188,10 @@ namespace dawn_native {
|
||||||
BufferBase* destination,
|
BufferBase* destination,
|
||||||
uint64_t destinationOffset,
|
uint64_t destinationOffset,
|
||||||
uint64_t size) = 0;
|
uint64_t size) = 0;
|
||||||
|
virtual MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||||
|
const TextureDataLayout& src,
|
||||||
|
TextureCopy* dst,
|
||||||
|
const Extent3D& copySizePixels) = 0;
|
||||||
|
|
||||||
DynamicUploader* GetDynamicUploader() const;
|
DynamicUploader* GetDynamicUploader() const;
|
||||||
|
|
||||||
|
@ -224,6 +229,9 @@ namespace dawn_native {
|
||||||
void LoseForTesting();
|
void LoseForTesting();
|
||||||
void AddFutureCallbackSerial(Serial serial);
|
void AddFutureCallbackSerial(Serial serial);
|
||||||
|
|
||||||
|
virtual uint32_t GetOptimalBytesPerRowAlignment() const = 0;
|
||||||
|
virtual uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const = 0;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void SetToggle(Toggle toggle, bool isEnabled);
|
void SetToggle(Toggle toggle, bool isEnabled);
|
||||||
void ForceSetToggle(Toggle toggle, bool isEnabled);
|
void ForceSetToggle(Toggle toggle, bool isEnabled);
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include "dawn_native/Buffer.h"
|
#include "dawn_native/Buffer.h"
|
||||||
#include "dawn_native/CommandBuffer.h"
|
#include "dawn_native/CommandBuffer.h"
|
||||||
#include "dawn_native/CommandValidation.h"
|
#include "dawn_native/CommandValidation.h"
|
||||||
|
#include "dawn_native/Commands.h"
|
||||||
#include "dawn_native/Device.h"
|
#include "dawn_native/Device.h"
|
||||||
#include "dawn_native/DynamicUploader.h"
|
#include "dawn_native/DynamicUploader.h"
|
||||||
#include "dawn_native/ErrorScope.h"
|
#include "dawn_native/ErrorScope.h"
|
||||||
|
@ -32,7 +33,93 @@
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
namespace dawn_native {
|
namespace dawn_native {
|
||||||
|
namespace {
|
||||||
|
void CopyTextureData(uint8_t* dstPointer,
|
||||||
|
const uint8_t* srcPointer,
|
||||||
|
uint32_t depth,
|
||||||
|
uint32_t rowsPerImageInBlock,
|
||||||
|
uint64_t imageAdditionalStride,
|
||||||
|
uint32_t actualBytesPerRow,
|
||||||
|
uint32_t dstBytesPerRow,
|
||||||
|
uint32_t srcBytesPerRow) {
|
||||||
|
bool copyWholeLayer =
|
||||||
|
actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
|
||||||
|
bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0;
|
||||||
|
|
||||||
|
if (!copyWholeLayer) { // copy row by row
|
||||||
|
for (uint32_t d = 0; d < depth; ++d) {
|
||||||
|
for (uint32_t h = 0; h < rowsPerImageInBlock; ++h) {
|
||||||
|
memcpy(dstPointer, srcPointer, actualBytesPerRow);
|
||||||
|
dstPointer += dstBytesPerRow;
|
||||||
|
srcPointer += srcBytesPerRow;
|
||||||
|
}
|
||||||
|
srcPointer += imageAdditionalStride;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
uint64_t layerSize = uint64_t(rowsPerImageInBlock) * actualBytesPerRow;
|
||||||
|
if (!copyWholeData) { // copy layer by layer
|
||||||
|
for (uint32_t d = 0; d < depth; ++d) {
|
||||||
|
memcpy(dstPointer, srcPointer, layerSize);
|
||||||
|
dstPointer += layerSize;
|
||||||
|
srcPointer += layerSize + imageAdditionalStride;
|
||||||
|
}
|
||||||
|
} else { // do a single copy
|
||||||
|
memcpy(dstPointer, srcPointer, layerSize * depth);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRowAndOffset(
|
||||||
|
DeviceBase* device,
|
||||||
|
const void* data,
|
||||||
|
uint32_t alignedBytesPerRow,
|
||||||
|
uint32_t optimallyAlignedBytesPerRow,
|
||||||
|
uint32_t alignedRowsPerImage,
|
||||||
|
const TextureDataLayout& dataLayout,
|
||||||
|
const TexelBlockInfo& blockInfo,
|
||||||
|
const Extent3D& writeSizePixel) {
|
||||||
|
uint64_t newDataSizeBytes;
|
||||||
|
DAWN_TRY_ASSIGN(
|
||||||
|
newDataSizeBytes,
|
||||||
|
ComputeRequiredBytesInCopy(blockInfo, writeSizePixel, optimallyAlignedBytesPerRow,
|
||||||
|
alignedRowsPerImage));
|
||||||
|
|
||||||
|
uint64_t optimalOffsetAlignment =
|
||||||
|
device->GetOptimalBufferToTextureCopyOffsetAlignment();
|
||||||
|
ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
|
||||||
|
ASSERT(IsPowerOfTwo(blockInfo.blockByteSize));
|
||||||
|
// We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
|
||||||
|
// since both of them are powers of two, we only need to align to the max value.
|
||||||
|
uint64_t offsetAlignment =
|
||||||
|
std::max(optimalOffsetAlignment, uint64_t(blockInfo.blockByteSize));
|
||||||
|
|
||||||
|
UploadHandle uploadHandle;
|
||||||
|
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
|
||||||
|
newDataSizeBytes, device->GetPendingCommandSerial(),
|
||||||
|
offsetAlignment));
|
||||||
|
ASSERT(uploadHandle.mappedBuffer != nullptr);
|
||||||
|
|
||||||
|
uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
|
||||||
|
const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
|
||||||
|
srcPointer += dataLayout.offset;
|
||||||
|
|
||||||
|
uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / blockInfo.blockHeight;
|
||||||
|
uint32_t dataRowsPerImageInBlock = dataLayout.rowsPerImage / blockInfo.blockHeight;
|
||||||
|
if (dataRowsPerImageInBlock == 0) {
|
||||||
|
dataRowsPerImageInBlock = writeSizePixel.height / blockInfo.blockHeight;
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
|
||||||
|
uint64_t imageAdditionalStride =
|
||||||
|
dataLayout.bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
|
||||||
|
|
||||||
|
CopyTextureData(dstPointer, srcPointer, writeSizePixel.depth,
|
||||||
|
alignedRowsPerImageInBlock, imageAdditionalStride, alignedBytesPerRow,
|
||||||
|
optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
|
||||||
|
|
||||||
|
return uploadHandle;
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
// QueueBase
|
// QueueBase
|
||||||
|
|
||||||
QueueBase::QueueBase(DeviceBase* device) : ObjectBase(device) {
|
QueueBase::QueueBase(DeviceBase* device) : ObjectBase(device) {
|
||||||
|
@ -147,11 +234,41 @@ namespace dawn_native {
|
||||||
MaybeError QueueBase::WriteTextureImpl(const TextureCopyView& destination,
|
MaybeError QueueBase::WriteTextureImpl(const TextureCopyView& destination,
|
||||||
const void* data,
|
const void* data,
|
||||||
const TextureDataLayout& dataLayout,
|
const TextureDataLayout& dataLayout,
|
||||||
const Extent3D& writeSize) {
|
const Extent3D& writeSizePixel) {
|
||||||
// TODO(tommek@google.com): This should be implemented.
|
const TexelBlockInfo& blockInfo =
|
||||||
return {};
|
destination.texture->GetFormat().GetTexelBlockInfo(destination.aspect);
|
||||||
}
|
|
||||||
|
|
||||||
|
// We are only copying the part of the data that will appear in the texture.
|
||||||
|
// Note that validating texture copy range ensures that writeSizePixel->width and
|
||||||
|
// writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
|
||||||
|
uint32_t alignedBytesPerRow =
|
||||||
|
(writeSizePixel.width) / blockInfo.blockWidth * blockInfo.blockByteSize;
|
||||||
|
uint32_t alignedRowsPerImage = writeSizePixel.height;
|
||||||
|
|
||||||
|
uint32_t optimalBytesPerRowAlignment = GetDevice()->GetOptimalBytesPerRowAlignment();
|
||||||
|
uint32_t optimallyAlignedBytesPerRow =
|
||||||
|
Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
|
||||||
|
|
||||||
|
UploadHandle uploadHandle;
|
||||||
|
DAWN_TRY_ASSIGN(uploadHandle,
|
||||||
|
UploadTextureDataAligningBytesPerRowAndOffset(
|
||||||
|
GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
|
||||||
|
alignedRowsPerImage, dataLayout, blockInfo, writeSizePixel));
|
||||||
|
|
||||||
|
TextureDataLayout passDataLayout = dataLayout;
|
||||||
|
passDataLayout.offset = uploadHandle.startOffset;
|
||||||
|
passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
|
||||||
|
passDataLayout.rowsPerImage = alignedRowsPerImage;
|
||||||
|
|
||||||
|
TextureCopy textureCopy;
|
||||||
|
textureCopy.texture = destination.texture;
|
||||||
|
textureCopy.mipLevel = destination.mipLevel;
|
||||||
|
textureCopy.origin = destination.origin;
|
||||||
|
textureCopy.aspect = ConvertAspect(destination.texture->GetFormat(), destination.aspect);
|
||||||
|
|
||||||
|
return GetDevice()->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout,
|
||||||
|
&textureCopy, writeSizePixel);
|
||||||
|
}
|
||||||
MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
|
MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
|
||||||
CommandBufferBase* const* commands) const {
|
CommandBufferBase* const* commands) const {
|
||||||
TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
|
TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
|
||||||
|
|
|
@ -77,16 +77,6 @@ namespace dawn_native {
|
||||||
void SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands);
|
void SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands);
|
||||||
};
|
};
|
||||||
|
|
||||||
// A helper function used in Queue::WriteTexture. The destination data layout must not
|
|
||||||
// contain any additional rows per image.
|
|
||||||
void CopyTextureData(uint8_t* dstPointer,
|
|
||||||
const uint8_t* srcPointer,
|
|
||||||
uint32_t depth,
|
|
||||||
uint32_t rowsPerImageInBlock,
|
|
||||||
uint64_t imageAdditionalStride,
|
|
||||||
uint32_t actualBytesPerRow,
|
|
||||||
uint32_t dstBytesPerRow,
|
|
||||||
uint32_t srcBytesPerRow);
|
|
||||||
} // namespace dawn_native
|
} // namespace dawn_native
|
||||||
|
|
||||||
#endif // DAWNNATIVE_QUEUE_H_
|
#endif // DAWNNATIVE_QUEUE_H_
|
||||||
|
|
|
@ -616,4 +616,16 @@ namespace dawn_native { namespace d3d12 {
|
||||||
return mSamplerHeapCache.get();
|
return mSamplerHeapCache.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint32_t Device::GetOptimalBytesPerRowAlignment() const {
|
||||||
|
return D3D12_TEXTURE_DATA_PITCH_ALIGNMENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(dawn:512): Once we optimize DynamicUploader allocation with offsets we
|
||||||
|
// should make this return D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT = 512.
|
||||||
|
// Current implementations would try to allocate additional 511 bytes,
|
||||||
|
// so we return 1 and let ComputeTextureCopySplits take care of the alignment.
|
||||||
|
uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
}} // namespace dawn_native::d3d12
|
}} // namespace dawn_native::d3d12
|
||||||
|
|
|
@ -103,7 +103,7 @@ namespace dawn_native { namespace d3d12 {
|
||||||
MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
|
MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||||
const TextureDataLayout& src,
|
const TextureDataLayout& src,
|
||||||
TextureCopy* dst,
|
TextureCopy* dst,
|
||||||
const Extent3D& copySizePixels);
|
const Extent3D& copySizePixels) override;
|
||||||
|
|
||||||
ResultOrError<ResourceHeapAllocation> AllocateMemory(
|
ResultOrError<ResourceHeapAllocation> AllocateMemory(
|
||||||
D3D12_HEAP_TYPE heapType,
|
D3D12_HEAP_TYPE heapType,
|
||||||
|
@ -138,6 +138,9 @@ namespace dawn_native { namespace d3d12 {
|
||||||
|
|
||||||
void InitTogglesFromDriver();
|
void InitTogglesFromDriver();
|
||||||
|
|
||||||
|
uint32_t GetOptimalBytesPerRowAlignment() const override;
|
||||||
|
uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using DeviceBase::DeviceBase;
|
using DeviceBase::DeviceBase;
|
||||||
|
|
||||||
|
|
|
@ -26,50 +26,6 @@
|
||||||
|
|
||||||
namespace dawn_native { namespace d3d12 {
|
namespace dawn_native { namespace d3d12 {
|
||||||
|
|
||||||
namespace {
|
|
||||||
ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRow(
|
|
||||||
DeviceBase* device,
|
|
||||||
const void* data,
|
|
||||||
uint32_t alignedBytesPerRow,
|
|
||||||
uint32_t optimallyAlignedBytesPerRow,
|
|
||||||
uint32_t alignedRowsPerImage,
|
|
||||||
const TextureDataLayout& dataLayout,
|
|
||||||
const Format& textureFormat,
|
|
||||||
const Extent3D& writeSizePixel) {
|
|
||||||
uint64_t newDataSizeBytes;
|
|
||||||
DAWN_TRY_ASSIGN(
|
|
||||||
newDataSizeBytes,
|
|
||||||
ComputeRequiredBytesInCopy(textureFormat, writeSizePixel,
|
|
||||||
optimallyAlignedBytesPerRow, alignedRowsPerImage));
|
|
||||||
|
|
||||||
UploadHandle uploadHandle;
|
|
||||||
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
|
|
||||||
newDataSizeBytes, device->GetPendingCommandSerial(),
|
|
||||||
textureFormat.blockByteSize));
|
|
||||||
ASSERT(uploadHandle.mappedBuffer != nullptr);
|
|
||||||
|
|
||||||
uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
|
|
||||||
const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
|
|
||||||
srcPointer += dataLayout.offset;
|
|
||||||
|
|
||||||
uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / textureFormat.blockHeight;
|
|
||||||
uint32_t dataRowsPerImageInBlock = dataLayout.rowsPerImage / textureFormat.blockHeight;
|
|
||||||
if (dataRowsPerImageInBlock == 0) {
|
|
||||||
dataRowsPerImageInBlock = writeSizePixel.height / textureFormat.blockHeight;
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
|
|
||||||
uint64_t imageAdditionalStride =
|
|
||||||
dataLayout.bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
|
|
||||||
|
|
||||||
CopyTextureData(dstPointer, srcPointer, writeSizePixel.depth,
|
|
||||||
alignedRowsPerImageInBlock, imageAdditionalStride, alignedBytesPerRow,
|
|
||||||
optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
|
|
||||||
|
|
||||||
return uploadHandle;
|
|
||||||
}
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
Queue::Queue(Device* device) : QueueBase(device) {
|
Queue::Queue(Device* device) : QueueBase(device) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,43 +51,4 @@ namespace dawn_native { namespace d3d12 {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeError Queue::WriteTextureImpl(const TextureCopyView& destination,
|
|
||||||
const void* data,
|
|
||||||
const TextureDataLayout& dataLayout,
|
|
||||||
const Extent3D& writeSizePixel) {
|
|
||||||
const TexelBlockInfo& blockInfo =
|
|
||||||
destination.texture->GetFormat().GetTexelBlockInfo(destination.aspect);
|
|
||||||
|
|
||||||
// We are only copying the part of the data that will appear in the texture.
|
|
||||||
// Note that validating texture copy range ensures that writeSizePixel->width and
|
|
||||||
// writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
|
|
||||||
uint32_t alignedBytesPerRow =
|
|
||||||
(writeSizePixel.width) / blockInfo.blockWidth * blockInfo.blockByteSize;
|
|
||||||
uint32_t alignedRowsPerImage = writeSizePixel.height;
|
|
||||||
uint32_t optimallyAlignedBytesPerRow =
|
|
||||||
Align(alignedBytesPerRow, D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
|
|
||||||
|
|
||||||
UploadHandle uploadHandle;
|
|
||||||
DAWN_TRY_ASSIGN(
|
|
||||||
uploadHandle,
|
|
||||||
UploadTextureDataAligningBytesPerRow(
|
|
||||||
GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
|
|
||||||
alignedRowsPerImage, dataLayout, destination.texture->GetFormat(), writeSizePixel));
|
|
||||||
|
|
||||||
TextureDataLayout passDataLayout = dataLayout;
|
|
||||||
passDataLayout.offset = uploadHandle.startOffset;
|
|
||||||
passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
|
|
||||||
passDataLayout.rowsPerImage = alignedRowsPerImage;
|
|
||||||
|
|
||||||
TextureCopy textureCopy;
|
|
||||||
textureCopy.texture = destination.texture;
|
|
||||||
textureCopy.mipLevel = destination.mipLevel;
|
|
||||||
textureCopy.origin = destination.origin;
|
|
||||||
textureCopy.aspect = ConvertAspect(destination.texture->GetFormat(), destination.aspect);
|
|
||||||
|
|
||||||
return ToBackend(GetDevice())
|
|
||||||
->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout, &textureCopy,
|
|
||||||
writeSizePixel);
|
|
||||||
}
|
|
||||||
|
|
||||||
}} // namespace dawn_native::d3d12
|
}} // namespace dawn_native::d3d12
|
||||||
|
|
|
@ -31,10 +31,6 @@ namespace dawn_native { namespace d3d12 {
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
||||||
MaybeError WriteTextureImpl(const TextureCopyView& destination,
|
|
||||||
const void* data,
|
|
||||||
const TextureDataLayout& dataLayout,
|
|
||||||
const Extent3D& writeSizePixel) override;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}} // namespace dawn_native::d3d12
|
}} // namespace dawn_native::d3d12
|
||||||
|
|
|
@ -64,10 +64,13 @@ namespace dawn_native { namespace metal {
|
||||||
BufferBase* destination,
|
BufferBase* destination,
|
||||||
uint64_t destinationOffset,
|
uint64_t destinationOffset,
|
||||||
uint64_t size) override;
|
uint64_t size) override;
|
||||||
MaybeError CopyFromStagingToTexture(StagingBufferBase* source,
|
MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||||
const TextureDataLayout& dataLayout,
|
const TextureDataLayout& dataLayout,
|
||||||
TextureCopy* dst,
|
TextureCopy* dst,
|
||||||
const Extent3D& copySizePixels);
|
const Extent3D& copySizePixels) override;
|
||||||
|
|
||||||
|
uint32_t GetOptimalBytesPerRowAlignment() const override;
|
||||||
|
uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Device(AdapterBase* adapter, id<MTLDevice> mtlDevice, const DeviceDescriptor* descriptor);
|
Device(AdapterBase* adapter, id<MTLDevice> mtlDevice, const DeviceDescriptor* descriptor);
|
||||||
|
|
|
@ -276,7 +276,10 @@ namespace dawn_native { namespace metal {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeError Device::CopyFromStagingToTexture(StagingBufferBase* source,
|
// In Metal we don't write from the CPU to the texture directly which can be done using the
|
||||||
|
// replaceRegion function, because the function requires a non-private storage mode and Dawn
|
||||||
|
// sets the private storage mode by default for all textures except IOSurfaces on macOS.
|
||||||
|
MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||||
const TextureDataLayout& dataLayout,
|
const TextureDataLayout& dataLayout,
|
||||||
TextureCopy* dst,
|
TextureCopy* dst,
|
||||||
const Extent3D& copySizePixels) {
|
const Extent3D& copySizePixels) {
|
||||||
|
@ -374,4 +377,12 @@ namespace dawn_native { namespace metal {
|
||||||
mMtlDevice = nil;
|
mMtlDevice = nil;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint32_t Device::GetOptimalBytesPerRowAlignment() const {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
}} // namespace dawn_native::metal
|
}} // namespace dawn_native::metal
|
||||||
|
|
|
@ -28,10 +28,6 @@ namespace dawn_native { namespace metal {
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
||||||
MaybeError WriteTextureImpl(const TextureCopyView& destination,
|
|
||||||
const void* data,
|
|
||||||
const TextureDataLayout& dataLayout,
|
|
||||||
const Extent3D& writeSizePixel) override;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}} // namespace dawn_native::metal
|
}} // namespace dawn_native::metal
|
||||||
|
|
|
@ -25,47 +25,6 @@
|
||||||
#include "dawn_platform/tracing/TraceEvent.h"
|
#include "dawn_platform/tracing/TraceEvent.h"
|
||||||
|
|
||||||
namespace dawn_native { namespace metal {
|
namespace dawn_native { namespace metal {
|
||||||
namespace {
|
|
||||||
ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRow(
|
|
||||||
DeviceBase* device,
|
|
||||||
const void* data,
|
|
||||||
uint32_t alignedBytesPerRow,
|
|
||||||
uint32_t alignedRowsPerImage,
|
|
||||||
const TextureDataLayout& dataLayout,
|
|
||||||
const TexelBlockInfo& blockInfo,
|
|
||||||
const Extent3D& writeSizePixel) {
|
|
||||||
uint64_t newDataSizeBytes;
|
|
||||||
DAWN_TRY_ASSIGN(newDataSizeBytes,
|
|
||||||
ComputeRequiredBytesInCopy(blockInfo, writeSizePixel,
|
|
||||||
alignedBytesPerRow, alignedRowsPerImage));
|
|
||||||
|
|
||||||
UploadHandle uploadHandle;
|
|
||||||
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
|
|
||||||
newDataSizeBytes, device->GetPendingCommandSerial(),
|
|
||||||
blockInfo.blockByteSize));
|
|
||||||
ASSERT(uploadHandle.mappedBuffer != nullptr);
|
|
||||||
|
|
||||||
uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
|
|
||||||
const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
|
|
||||||
srcPointer += dataLayout.offset;
|
|
||||||
|
|
||||||
uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / blockInfo.blockHeight;
|
|
||||||
uint32_t dataRowsPerImageInBlock = dataLayout.rowsPerImage / blockInfo.blockHeight;
|
|
||||||
if (dataRowsPerImageInBlock == 0) {
|
|
||||||
dataRowsPerImageInBlock = writeSizePixel.height / blockInfo.blockHeight;
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
|
|
||||||
uint64_t imageAdditionalStride =
|
|
||||||
dataLayout.bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
|
|
||||||
|
|
||||||
CopyTextureData(dstPointer, srcPointer, writeSizePixel.depth,
|
|
||||||
alignedRowsPerImageInBlock, imageAdditionalStride, alignedBytesPerRow,
|
|
||||||
alignedBytesPerRow, dataLayout.bytesPerRow);
|
|
||||||
|
|
||||||
return uploadHandle;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Queue::Queue(Device* device) : QueueBase(device) {
|
Queue::Queue(Device* device) : QueueBase(device) {
|
||||||
}
|
}
|
||||||
|
@ -85,43 +44,4 @@ namespace dawn_native { namespace metal {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
// We don't write from the CPU to the texture directly which can be done in Metal using the
|
|
||||||
// replaceRegion function, because the function requires a non-private storage mode and Dawn
|
|
||||||
// sets the private storage mode by default for all textures except IOSurfaces on macOS.
|
|
||||||
MaybeError Queue::WriteTextureImpl(const TextureCopyView& destination,
|
|
||||||
const void* data,
|
|
||||||
const TextureDataLayout& dataLayout,
|
|
||||||
const Extent3D& writeSizePixel) {
|
|
||||||
const TexelBlockInfo& blockInfo =
|
|
||||||
destination.texture->GetFormat().GetTexelBlockInfo(destination.aspect);
|
|
||||||
|
|
||||||
// We are only copying the part of the data that will appear in the texture.
|
|
||||||
// Note that validating texture copy range ensures that writeSizePixel->width and
|
|
||||||
// writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
|
|
||||||
uint32_t alignedBytesPerRow =
|
|
||||||
(writeSizePixel.width) / blockInfo.blockWidth * blockInfo.blockByteSize;
|
|
||||||
uint32_t alignedRowsPerImage = writeSizePixel.height;
|
|
||||||
|
|
||||||
UploadHandle uploadHandle;
|
|
||||||
DAWN_TRY_ASSIGN(uploadHandle,
|
|
||||||
UploadTextureDataAligningBytesPerRow(GetDevice(), data, alignedBytesPerRow,
|
|
||||||
alignedRowsPerImage, dataLayout,
|
|
||||||
blockInfo, writeSizePixel));
|
|
||||||
|
|
||||||
TextureDataLayout passDataLayout = dataLayout;
|
|
||||||
passDataLayout.offset = uploadHandle.startOffset;
|
|
||||||
passDataLayout.bytesPerRow = alignedBytesPerRow;
|
|
||||||
passDataLayout.rowsPerImage = alignedRowsPerImage;
|
|
||||||
|
|
||||||
TextureCopy textureCopy;
|
|
||||||
textureCopy.texture = destination.texture;
|
|
||||||
textureCopy.mipLevel = destination.mipLevel;
|
|
||||||
textureCopy.origin = destination.origin;
|
|
||||||
textureCopy.aspect = ConvertAspect(destination.texture->GetFormat(), destination.aspect);
|
|
||||||
|
|
||||||
return ToBackend(GetDevice())
|
|
||||||
->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout, &textureCopy,
|
|
||||||
writeSizePixel);
|
|
||||||
}
|
|
||||||
|
|
||||||
}} // namespace dawn_native::metal
|
}} // namespace dawn_native::metal
|
||||||
|
|
|
@ -213,6 +213,13 @@ namespace dawn_native { namespace null {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||||
|
const TextureDataLayout& src,
|
||||||
|
TextureCopy* dst,
|
||||||
|
const Extent3D& copySizePixels) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
|
MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
|
||||||
static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max(), "");
|
static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max(), "");
|
||||||
if (bytes > kMaxMemoryUsage || mMemoryUsage + bytes > kMaxMemoryUsage) {
|
if (bytes > kMaxMemoryUsage || mMemoryUsage + bytes > kMaxMemoryUsage) {
|
||||||
|
@ -468,4 +475,12 @@ namespace dawn_native { namespace null {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint32_t Device::GetOptimalBytesPerRowAlignment() const {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
}} // namespace dawn_native::null
|
}} // namespace dawn_native::null
|
||||||
|
|
|
@ -105,10 +105,17 @@ namespace dawn_native { namespace null {
|
||||||
BufferBase* destination,
|
BufferBase* destination,
|
||||||
uint64_t destinationOffset,
|
uint64_t destinationOffset,
|
||||||
uint64_t size) override;
|
uint64_t size) override;
|
||||||
|
MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||||
|
const TextureDataLayout& src,
|
||||||
|
TextureCopy* dst,
|
||||||
|
const Extent3D& copySizePixels) override;
|
||||||
|
|
||||||
MaybeError IncrementMemoryUsage(uint64_t bytes);
|
MaybeError IncrementMemoryUsage(uint64_t bytes);
|
||||||
void DecrementMemoryUsage(uint64_t bytes);
|
void DecrementMemoryUsage(uint64_t bytes);
|
||||||
|
|
||||||
|
uint32_t GetOptimalBytesPerRowAlignment() const override;
|
||||||
|
uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using DeviceBase::DeviceBase;
|
using DeviceBase::DeviceBase;
|
||||||
|
|
||||||
|
|
|
@ -196,6 +196,13 @@ namespace dawn_native { namespace opengl {
|
||||||
return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer.");
|
return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||||
|
const TextureDataLayout& src,
|
||||||
|
TextureCopy* dst,
|
||||||
|
const Extent3D& copySizePixels) {
|
||||||
|
return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer to texture.");
|
||||||
|
}
|
||||||
|
|
||||||
void Device::ShutDownImpl() {
|
void Device::ShutDownImpl() {
|
||||||
ASSERT(GetState() == State::Disconnected);
|
ASSERT(GetState() == State::Disconnected);
|
||||||
}
|
}
|
||||||
|
@ -208,4 +215,12 @@ namespace dawn_native { namespace opengl {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint32_t Device::GetOptimalBytesPerRowAlignment() const {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
}} // namespace dawn_native::opengl
|
}} // namespace dawn_native::opengl
|
||||||
|
|
|
@ -62,6 +62,14 @@ namespace dawn_native { namespace opengl {
|
||||||
uint64_t destinationOffset,
|
uint64_t destinationOffset,
|
||||||
uint64_t size) override;
|
uint64_t size) override;
|
||||||
|
|
||||||
|
MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||||
|
const TextureDataLayout& src,
|
||||||
|
TextureCopy* dst,
|
||||||
|
const Extent3D& copySizePixels) override;
|
||||||
|
|
||||||
|
uint32_t GetOptimalBytesPerRowAlignment() const override;
|
||||||
|
uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Device(AdapterBase* adapter,
|
Device(AdapterBase* adapter,
|
||||||
const DeviceDescriptor* descriptor,
|
const DeviceDescriptor* descriptor,
|
||||||
|
|
|
@ -51,4 +51,11 @@ namespace dawn_native { namespace opengl {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MaybeError Queue::WriteTextureImpl(const TextureCopyView& destination,
|
||||||
|
const void* data,
|
||||||
|
const TextureDataLayout& dataLayout,
|
||||||
|
const Extent3D& writeSizePixel) {
|
||||||
|
return DAWN_UNIMPLEMENTED_ERROR("Unable to write to texture\n");
|
||||||
|
}
|
||||||
|
|
||||||
}} // namespace dawn_native::opengl
|
}} // namespace dawn_native::opengl
|
||||||
|
|
|
@ -32,6 +32,10 @@ namespace dawn_native { namespace opengl {
|
||||||
uint64_t bufferOffset,
|
uint64_t bufferOffset,
|
||||||
const void* data,
|
const void* data,
|
||||||
size_t size) override;
|
size_t size) override;
|
||||||
|
MaybeError WriteTextureImpl(const TextureCopyView& destination,
|
||||||
|
const void* data,
|
||||||
|
const TextureDataLayout& dataLayout,
|
||||||
|
const Extent3D& writeSizePixel) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
}} // namespace dawn_native::opengl
|
}} // namespace dawn_native::opengl
|
||||||
|
|
|
@ -621,7 +621,7 @@ namespace dawn_native { namespace vulkan {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeError Device::CopyFromStagingToTexture(StagingBufferBase* source,
|
MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||||
const TextureDataLayout& src,
|
const TextureDataLayout& src,
|
||||||
TextureCopy* dst,
|
TextureCopy* dst,
|
||||||
const Extent3D& copySizePixels) {
|
const Extent3D& copySizePixels) {
|
||||||
|
@ -917,4 +917,12 @@ namespace dawn_native { namespace vulkan {
|
||||||
mVkDevice = VK_NULL_HANDLE;
|
mVkDevice = VK_NULL_HANDLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint32_t Device::GetOptimalBytesPerRowAlignment() const {
|
||||||
|
return mDeviceInfo.properties.limits.optimalBufferCopyRowPitchAlignment;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
|
||||||
|
return mDeviceInfo.properties.limits.optimalBufferCopyOffsetAlignment;
|
||||||
|
}
|
||||||
|
|
||||||
}} // namespace dawn_native::vulkan
|
}} // namespace dawn_native::vulkan
|
||||||
|
|
|
@ -88,10 +88,10 @@ namespace dawn_native { namespace vulkan {
|
||||||
BufferBase* destination,
|
BufferBase* destination,
|
||||||
uint64_t destinationOffset,
|
uint64_t destinationOffset,
|
||||||
uint64_t size) override;
|
uint64_t size) override;
|
||||||
MaybeError CopyFromStagingToTexture(StagingBufferBase* source,
|
MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||||
const TextureDataLayout& src,
|
const TextureDataLayout& src,
|
||||||
TextureCopy* dst,
|
TextureCopy* dst,
|
||||||
const Extent3D& copySizePixels);
|
const Extent3D& copySizePixels) override;
|
||||||
|
|
||||||
ResultOrError<ResourceMemoryAllocation> AllocateMemory(VkMemoryRequirements requirements,
|
ResultOrError<ResourceMemoryAllocation> AllocateMemory(VkMemoryRequirements requirements,
|
||||||
bool mappable);
|
bool mappable);
|
||||||
|
@ -105,6 +105,9 @@ namespace dawn_native { namespace vulkan {
|
||||||
// needs to be set.
|
// needs to be set.
|
||||||
uint32_t GetComputeSubgroupSize() const;
|
uint32_t GetComputeSubgroupSize() const;
|
||||||
|
|
||||||
|
uint32_t GetOptimalBytesPerRowAlignment() const override;
|
||||||
|
uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Device(Adapter* adapter, const DeviceDescriptor* descriptor);
|
Device(Adapter* adapter, const DeviceDescriptor* descriptor);
|
||||||
|
|
||||||
|
|
|
@ -27,61 +27,6 @@
|
||||||
|
|
||||||
namespace dawn_native { namespace vulkan {
|
namespace dawn_native { namespace vulkan {
|
||||||
|
|
||||||
namespace {
|
|
||||||
ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRow(
|
|
||||||
DeviceBase* device,
|
|
||||||
const void* data,
|
|
||||||
uint32_t alignedBytesPerRow,
|
|
||||||
uint32_t optimallyAlignedBytesPerRow,
|
|
||||||
uint32_t alignedRowsPerImage,
|
|
||||||
const TextureDataLayout& dataLayout,
|
|
||||||
const TexelBlockInfo& blockInfo,
|
|
||||||
const Extent3D& writeSizePixel) {
|
|
||||||
uint64_t newDataSizeBytes;
|
|
||||||
DAWN_TRY_ASSIGN(
|
|
||||||
newDataSizeBytes,
|
|
||||||
ComputeRequiredBytesInCopy(blockInfo, writeSizePixel, optimallyAlignedBytesPerRow,
|
|
||||||
alignedRowsPerImage));
|
|
||||||
|
|
||||||
uint64_t optimalOffsetAlignment =
|
|
||||||
ToBackend(device)
|
|
||||||
->GetDeviceInfo()
|
|
||||||
.properties.limits.optimalBufferCopyOffsetAlignment;
|
|
||||||
ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
|
|
||||||
ASSERT(IsPowerOfTwo(blockInfo.blockByteSize));
|
|
||||||
// We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
|
|
||||||
// since both of them are powers of two, we only need to align to the max value.
|
|
||||||
uint64_t offsetAlignment =
|
|
||||||
std::max(optimalOffsetAlignment, uint64_t(blockInfo.blockByteSize));
|
|
||||||
|
|
||||||
UploadHandle uploadHandle;
|
|
||||||
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
|
|
||||||
newDataSizeBytes, device->GetPendingCommandSerial(),
|
|
||||||
offsetAlignment));
|
|
||||||
ASSERT(uploadHandle.mappedBuffer != nullptr);
|
|
||||||
|
|
||||||
uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
|
|
||||||
const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
|
|
||||||
srcPointer += dataLayout.offset;
|
|
||||||
|
|
||||||
uint32_t alignedRowsPerImageInBlock = alignedRowsPerImage / blockInfo.blockHeight;
|
|
||||||
uint32_t dataRowsPerImageInBlock = dataLayout.rowsPerImage / blockInfo.blockHeight;
|
|
||||||
if (dataRowsPerImageInBlock == 0) {
|
|
||||||
dataRowsPerImageInBlock = writeSizePixel.height / blockInfo.blockHeight;
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT(dataRowsPerImageInBlock >= alignedRowsPerImageInBlock);
|
|
||||||
uint64_t imageAdditionalStride =
|
|
||||||
dataLayout.bytesPerRow * (dataRowsPerImageInBlock - alignedRowsPerImageInBlock);
|
|
||||||
|
|
||||||
CopyTextureData(dstPointer, srcPointer, writeSizePixel.depth,
|
|
||||||
alignedRowsPerImageInBlock, imageAdditionalStride, alignedBytesPerRow,
|
|
||||||
optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
|
|
||||||
|
|
||||||
return uploadHandle;
|
|
||||||
}
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
// static
|
// static
|
||||||
Queue* Queue::Create(Device* device) {
|
Queue* Queue::Create(Device* device) {
|
||||||
return new Queue(device);
|
return new Queue(device);
|
||||||
|
@ -108,46 +53,4 @@ namespace dawn_native { namespace vulkan {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeError Queue::WriteTextureImpl(const TextureCopyView& destination,
|
|
||||||
const void* data,
|
|
||||||
const TextureDataLayout& dataLayout,
|
|
||||||
const Extent3D& writeSizePixel) {
|
|
||||||
const TexelBlockInfo& blockInfo =
|
|
||||||
destination.texture->GetFormat().GetTexelBlockInfo(destination.aspect);
|
|
||||||
|
|
||||||
// We are only copying the part of the data that will appear in the texture.
|
|
||||||
// Note that validating texture copy range ensures that writeSizePixel->width and
|
|
||||||
// writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
|
|
||||||
uint32_t alignedBytesPerRow =
|
|
||||||
(writeSizePixel.width) / blockInfo.blockWidth * blockInfo.blockByteSize;
|
|
||||||
uint32_t alignedRowsPerImage = writeSizePixel.height;
|
|
||||||
|
|
||||||
uint32_t optimalBytesPerRowAlignment =
|
|
||||||
ToBackend(GetDevice())
|
|
||||||
->GetDeviceInfo()
|
|
||||||
.properties.limits.optimalBufferCopyRowPitchAlignment;
|
|
||||||
uint32_t optimallyAlignedBytesPerRow =
|
|
||||||
Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
|
|
||||||
|
|
||||||
UploadHandle uploadHandle;
|
|
||||||
DAWN_TRY_ASSIGN(uploadHandle,
|
|
||||||
UploadTextureDataAligningBytesPerRow(
|
|
||||||
GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
|
|
||||||
alignedRowsPerImage, dataLayout, blockInfo, writeSizePixel));
|
|
||||||
|
|
||||||
TextureDataLayout passDataLayout = dataLayout;
|
|
||||||
passDataLayout.offset = uploadHandle.startOffset;
|
|
||||||
passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
|
|
||||||
passDataLayout.rowsPerImage = alignedRowsPerImage;
|
|
||||||
|
|
||||||
TextureCopy textureCopy;
|
|
||||||
textureCopy.texture = destination.texture;
|
|
||||||
textureCopy.mipLevel = destination.mipLevel;
|
|
||||||
textureCopy.origin = destination.origin;
|
|
||||||
textureCopy.aspect = ConvertAspect(destination.texture->GetFormat(), destination.aspect);
|
|
||||||
|
|
||||||
return ToBackend(GetDevice())
|
|
||||||
->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout, &textureCopy,
|
|
||||||
writeSizePixel);
|
|
||||||
}
|
|
||||||
}} // namespace dawn_native::vulkan
|
}} // namespace dawn_native::vulkan
|
||||||
|
|
|
@ -31,10 +31,6 @@ namespace dawn_native { namespace vulkan {
|
||||||
using QueueBase::QueueBase;
|
using QueueBase::QueueBase;
|
||||||
|
|
||||||
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
||||||
MaybeError WriteTextureImpl(const TextureCopyView& destination,
|
|
||||||
const void* data,
|
|
||||||
const TextureDataLayout& dataLayout,
|
|
||||||
const Extent3D& writeSizePixel) override;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}} // namespace dawn_native::vulkan
|
}} // namespace dawn_native::vulkan
|
||||||
|
|
|
@ -174,6 +174,12 @@ TEST_P(QueueWriteBufferTests, SuperLargeWriteBuffer) {
|
||||||
// Test a special code path: writing when dynamic uploader already contatins some unaligned
|
// Test a special code path: writing when dynamic uploader already contatins some unaligned
|
||||||
// data, it might be necessary to use a ring buffer with properly aligned offset.
|
// data, it might be necessary to use a ring buffer with properly aligned offset.
|
||||||
TEST_P(QueueWriteBufferTests, UnalignedDynamicUploader) {
|
TEST_P(QueueWriteBufferTests, UnalignedDynamicUploader) {
|
||||||
|
// TODO(dawn:483): Skipping test because WriteTexture inside UnalignDynamicUploader
|
||||||
|
// is not implemented. Moreover when using UnalignDynamicUploader we are assuming
|
||||||
|
// that WriteTexture implementation uses a DynamicUploader which might be false in the
|
||||||
|
// case of a future OpenGL implementation.
|
||||||
|
DAWN_SKIP_TEST_IF(IsOpenGL());
|
||||||
|
|
||||||
utils::UnalignDynamicUploader(device);
|
utils::UnalignDynamicUploader(device);
|
||||||
|
|
||||||
wgpu::BufferDescriptor descriptor;
|
wgpu::BufferDescriptor descriptor;
|
||||||
|
|
Loading…
Reference in New Issue