Implement buffer lazy initialization before CopyBufferToBuffer
This patch implements buffer lazy initialization before CopyBufferToBuffer() behind the toggle LazyClearBufferOnFirstUse. - If the source buffer is not initialized, it will be cleared to 0 before CopyBufferToBuffer(). - If the destination buffer is not initialized and the copy doesn't overwrite the whole buffer, it will be cleared to 0 before CopyBufferToBuffer(), otherwise the buffer shouldn't be cleared. BUG=dawn:414 TEST=dawn_end2end_tests Change-Id: I3d0512c6376a1ed8928e86f8e56fefebc16910fa Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/24360 Commit-Queue: Jiawei Shao <jiawei.shao@intel.com> Reviewed-by: Corentin Wallez <cwallez@chromium.org> Reviewed-by: Austin Eng <enga@chromium.org>
This commit is contained in:
parent
d3bf2188aa
commit
dab10eae8a
|
@ -313,10 +313,44 @@ namespace dawn_native { namespace d3d12 {
|
|||
return mResourceAllocation.GetInfo().mMethod == allocationMethod;
|
||||
}
|
||||
|
||||
MaybeError Buffer::ClearBufferContentsToZero(CommandRecordingContext* commandContext) {
|
||||
MaybeError Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
|
||||
// TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
|
||||
// instead when buffer lazy initialization is completely supported.
|
||||
if (IsDataInitialized() ||
|
||||
!GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
|
||||
return {};
|
||||
}
|
||||
|
||||
DAWN_TRY(InitializeToZero(commandContext));
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
MaybeError Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
|
||||
uint64_t offset,
|
||||
uint64_t size) {
|
||||
// TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
|
||||
// instead when buffer lazy initialization is completely supported.
|
||||
if (IsDataInitialized() ||
|
||||
!GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
|
||||
return {};
|
||||
}
|
||||
|
||||
if (IsFullBufferRange(offset, size)) {
|
||||
SetIsDataInitialized();
|
||||
} else {
|
||||
DAWN_TRY(InitializeToZero(commandContext));
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
MaybeError Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
|
||||
ASSERT(GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse));
|
||||
ASSERT(!IsDataInitialized());
|
||||
|
||||
// TODO(jiawei.shao@intel.com): skip initializing the buffer when it is created on a heap
|
||||
// that has already been zero initialized.
|
||||
DAWN_TRY(ClearBuffer(commandContext, uint8_t(0u)));
|
||||
SetIsDataInitialized();
|
||||
GetDevice()->IncrementLazyClearCountForTesting();
|
||||
|
|
|
@ -44,7 +44,10 @@ namespace dawn_native { namespace d3d12 {
|
|||
bool CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const;
|
||||
bool CheckIsResidentForTesting() const;
|
||||
|
||||
MaybeError ClearBufferContentsToZero(CommandRecordingContext* commandContext);
|
||||
MaybeError EnsureDataInitialized(CommandRecordingContext* commandContext);
|
||||
MaybeError EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
|
||||
uint64_t offset,
|
||||
uint64_t size);
|
||||
|
||||
private:
|
||||
~Buffer() override;
|
||||
|
@ -63,6 +66,7 @@ namespace dawn_native { namespace d3d12 {
|
|||
D3D12_RESOURCE_BARRIER* barrier,
|
||||
wgpu::BufferUsage newUsage);
|
||||
|
||||
MaybeError InitializeToZero(CommandRecordingContext* commandContext);
|
||||
MaybeError ClearBuffer(CommandRecordingContext* commandContext, uint8_t clearValue);
|
||||
|
||||
ResourceHeapAllocation mResourceAllocation;
|
||||
|
|
|
@ -582,6 +582,10 @@ namespace dawn_native { namespace d3d12 {
|
|||
Buffer* srcBuffer = ToBackend(copy->source.Get());
|
||||
Buffer* dstBuffer = ToBackend(copy->destination.Get());
|
||||
|
||||
DAWN_TRY(srcBuffer->EnsureDataInitialized(commandContext));
|
||||
DAWN_TRY(dstBuffer->EnsureDataInitializedAsDestination(
|
||||
commandContext, copy->destinationOffset, copy->size));
|
||||
|
||||
srcBuffer->TrackUsageAndTransitionNow(commandContext,
|
||||
wgpu::BufferUsage::CopySrc);
|
||||
dstBuffer->TrackUsageAndTransitionNow(commandContext,
|
||||
|
|
|
@ -337,15 +337,8 @@ namespace dawn_native { namespace d3d12 {
|
|||
|
||||
Buffer* dstBuffer = ToBackend(destination);
|
||||
|
||||
// TODO(jiawei.shao@intel.com): use Toggle::LazyClearResourceOnFirstUse when the support of
|
||||
// buffer lazy initialization is completed.
|
||||
if (IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse) && !dstBuffer->IsDataInitialized()) {
|
||||
if (dstBuffer->IsFullBufferRange(destinationOffset, size)) {
|
||||
dstBuffer->SetIsDataInitialized();
|
||||
} else {
|
||||
DAWN_TRY(dstBuffer->ClearBufferContentsToZero(commandRecordingContext));
|
||||
}
|
||||
}
|
||||
DAWN_TRY(dstBuffer->EnsureDataInitializedAsDestination(commandRecordingContext,
|
||||
destinationOffset, size));
|
||||
|
||||
CopyFromStagingToBufferImpl(commandRecordingContext, source, sourceOffset, destination,
|
||||
destinationOffset, size);
|
||||
|
|
|
@ -31,7 +31,10 @@ namespace dawn_native { namespace metal {
|
|||
const BufferDescriptor* descriptor);
|
||||
id<MTLBuffer> GetMTLBuffer() const;
|
||||
|
||||
void ClearBufferContentsToZero(CommandRecordingContext* commandContext);
|
||||
void EnsureDataInitialized(CommandRecordingContext* commandContext);
|
||||
void EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
|
||||
uint64_t offset,
|
||||
uint64_t size);
|
||||
|
||||
private:
|
||||
using BufferBase::BufferBase;
|
||||
|
@ -47,6 +50,7 @@ namespace dawn_native { namespace metal {
|
|||
bool IsMapWritable() const override;
|
||||
MaybeError MapAtCreationImpl() override;
|
||||
|
||||
void InitializeToZero(CommandRecordingContext* commandContext);
|
||||
void ClearBuffer(CommandRecordingContext* commandContext, uint8_t clearValue);
|
||||
|
||||
id<MTLBuffer> mMtlBuffer = nil;
|
||||
|
|
|
@ -134,7 +134,35 @@ namespace dawn_native { namespace metal {
|
|||
mMtlBuffer = nil;
|
||||
}
|
||||
|
||||
void Buffer::ClearBufferContentsToZero(CommandRecordingContext* commandContext) {
|
||||
void Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
|
||||
// TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
|
||||
// instead when buffer lazy initialization is completely supported.
|
||||
if (IsDataInitialized() ||
|
||||
!GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
|
||||
return;
|
||||
}
|
||||
|
||||
InitializeToZero(commandContext);
|
||||
}
|
||||
|
||||
void Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
|
||||
uint64_t offset,
|
||||
uint64_t size) {
|
||||
// TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
|
||||
// instead when buffer lazy initialization is completely supported.
|
||||
if (IsDataInitialized() ||
|
||||
!GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (IsFullBufferRange(offset, size)) {
|
||||
SetIsDataInitialized();
|
||||
} else {
|
||||
InitializeToZero(commandContext);
|
||||
}
|
||||
}
|
||||
|
||||
void Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
|
||||
ASSERT(GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse));
|
||||
ASSERT(!IsDataInitialized());
|
||||
|
||||
|
|
|
@ -721,6 +721,11 @@ namespace dawn_native { namespace metal {
|
|||
case Command::CopyBufferToBuffer: {
|
||||
CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
|
||||
|
||||
ToBackend(copy->source)->EnsureDataInitialized(commandContext);
|
||||
ToBackend(copy->destination)
|
||||
->EnsureDataInitializedAsDestination(commandContext,
|
||||
copy->destinationOffset, copy->size);
|
||||
|
||||
[commandContext->EnsureBlit()
|
||||
copyFromBuffer:ToBackend(copy->source)->GetMTLBuffer()
|
||||
sourceOffset:copy->sourceOffset
|
||||
|
|
|
@ -254,16 +254,9 @@ namespace dawn_native { namespace metal {
|
|||
// this function.
|
||||
ASSERT(size != 0);
|
||||
|
||||
// TODO(jiawei.shao@intel.com): use Toggle::LazyClearResourceOnFirstUse when the support of
|
||||
// buffer lazy initialization is completed.
|
||||
if (IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse) &&
|
||||
!destination->IsDataInitialized()) {
|
||||
if (destination->IsFullBufferRange(destinationOffset, size)) {
|
||||
destination->SetIsDataInitialized();
|
||||
} else {
|
||||
ToBackend(destination)->ClearBufferContentsToZero(GetPendingCommandContext());
|
||||
}
|
||||
}
|
||||
ToBackend(destination)
|
||||
->EnsureDataInitializedAsDestination(GetPendingCommandContext(), destinationOffset,
|
||||
size);
|
||||
|
||||
id<MTLBuffer> uploadBuffer = ToBackend(source)->GetBufferHandle();
|
||||
id<MTLBuffer> buffer = ToBackend(destination)->GetMTLBuffer();
|
||||
|
|
|
@ -51,7 +51,33 @@ namespace dawn_native { namespace opengl {
|
|||
return std::max(GetSize(), uint64_t(4u));
|
||||
}
|
||||
|
||||
void Buffer::ClearBufferContentsToZero() {
|
||||
void Buffer::EnsureDataInitialized() {
|
||||
// TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
|
||||
// instead when buffer lazy initialization is completely supported.
|
||||
if (IsDataInitialized() ||
|
||||
!GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
|
||||
return;
|
||||
}
|
||||
|
||||
InitializeToZero();
|
||||
}
|
||||
|
||||
void Buffer::EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size) {
|
||||
// TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
|
||||
// instead when buffer lazy initialization is completely supported.
|
||||
if (IsDataInitialized() ||
|
||||
!GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (IsFullBufferRange(offset, size)) {
|
||||
SetIsDataInitialized();
|
||||
} else {
|
||||
InitializeToZero();
|
||||
}
|
||||
}
|
||||
|
||||
void Buffer::InitializeToZero() {
|
||||
ASSERT(GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse));
|
||||
ASSERT(!IsDataInitialized());
|
||||
|
||||
|
@ -61,9 +87,9 @@ namespace dawn_native { namespace opengl {
|
|||
const std::vector<uint8_t> clearValues(size, 0u);
|
||||
device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
|
||||
device->gl.BufferSubData(GL_ARRAY_BUFFER, 0, size, clearValues.data());
|
||||
device->IncrementLazyClearCountForTesting();
|
||||
|
||||
SetIsDataInitialized();
|
||||
device->IncrementLazyClearCountForTesting();
|
||||
}
|
||||
|
||||
bool Buffer::IsMapWritable() const {
|
||||
|
|
|
@ -29,7 +29,8 @@ namespace dawn_native { namespace opengl {
|
|||
|
||||
GLuint GetHandle() const;
|
||||
|
||||
void ClearBufferContentsToZero();
|
||||
void EnsureDataInitialized();
|
||||
void EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size);
|
||||
|
||||
private:
|
||||
~Buffer() override;
|
||||
|
@ -44,6 +45,8 @@ namespace dawn_native { namespace opengl {
|
|||
void* GetMappedPointerImpl() override;
|
||||
uint64_t GetAppliedSize() const;
|
||||
|
||||
void InitializeToZero();
|
||||
|
||||
GLuint mBuffer = 0;
|
||||
void* mMappedData = nullptr;
|
||||
};
|
||||
|
|
|
@ -493,6 +493,10 @@ namespace dawn_native { namespace opengl {
|
|||
case Command::CopyBufferToBuffer: {
|
||||
CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
|
||||
|
||||
ToBackend(copy->source)->EnsureDataInitialized();
|
||||
ToBackend(copy->destination)
|
||||
->EnsureDataInitializedAsDestination(copy->destinationOffset, copy->size);
|
||||
|
||||
gl.BindBuffer(GL_PIXEL_PACK_BUFFER, ToBackend(copy->source)->GetHandle());
|
||||
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER,
|
||||
ToBackend(copy->destination)->GetHandle());
|
||||
|
|
|
@ -44,16 +44,7 @@ namespace dawn_native { namespace opengl {
|
|||
size_t size) {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
|
||||
// TODO(jiawei.shao@intel.com): use Toggle::LazyClearResourceOnFirstUse when the support of
|
||||
// buffer lazy initialization is completed.
|
||||
if (GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse) &&
|
||||
!buffer->IsDataInitialized()) {
|
||||
if (buffer->IsFullBufferRange(bufferOffset, size)) {
|
||||
buffer->SetIsDataInitialized();
|
||||
} else {
|
||||
ToBackend(buffer)->ClearBufferContentsToZero();
|
||||
}
|
||||
}
|
||||
ToBackend(buffer)->EnsureDataInitializedAsDestination(bufferOffset, size);
|
||||
|
||||
gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
|
||||
gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
|
||||
|
|
|
@ -278,14 +278,41 @@ namespace dawn_native { namespace vulkan {
|
|||
}
|
||||
}
|
||||
|
||||
void Buffer::ClearBufferContentsToZero(CommandRecordingContext* recordingContext) {
|
||||
void Buffer::EnsureDataInitialized(CommandRecordingContext* recordingContext) {
|
||||
// TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
|
||||
// instead when buffer lazy initialization is completely supported.
|
||||
if (IsDataInitialized() ||
|
||||
!GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
|
||||
return;
|
||||
}
|
||||
|
||||
InitializeToZero(recordingContext);
|
||||
}
|
||||
|
||||
void Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
|
||||
uint64_t offset,
|
||||
uint64_t size) {
|
||||
// TODO(jiawei.shao@intel.com): check Toggle::LazyClearResourceOnFirstUse
|
||||
// instead when buffer lazy initialization is completely supported.
|
||||
if (IsDataInitialized() ||
|
||||
!GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (IsFullBufferRange(offset, size)) {
|
||||
SetIsDataInitialized();
|
||||
} else {
|
||||
InitializeToZero(recordingContext);
|
||||
}
|
||||
}
|
||||
|
||||
void Buffer::InitializeToZero(CommandRecordingContext* recordingContext) {
|
||||
ASSERT(GetDevice()->IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse));
|
||||
ASSERT(!IsDataInitialized());
|
||||
|
||||
ClearBuffer(recordingContext, 0u);
|
||||
|
||||
SetIsDataInitialized();
|
||||
GetDevice()->IncrementLazyClearCountForTesting();
|
||||
SetIsDataInitialized();
|
||||
}
|
||||
|
||||
void Buffer::ClearBuffer(CommandRecordingContext* recordingContext, uint32_t clearValue) {
|
||||
|
|
|
@ -43,12 +43,16 @@ namespace dawn_native { namespace vulkan {
|
|||
VkPipelineStageFlags* srcStages,
|
||||
VkPipelineStageFlags* dstStages);
|
||||
|
||||
void ClearBufferContentsToZero(CommandRecordingContext* recordingContext);
|
||||
void EnsureDataInitialized(CommandRecordingContext* recordingContext);
|
||||
void EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
|
||||
uint64_t offset,
|
||||
uint64_t size);
|
||||
|
||||
private:
|
||||
~Buffer() override;
|
||||
using BufferBase::BufferBase;
|
||||
MaybeError Initialize();
|
||||
void InitializeToZero(CommandRecordingContext* recordingContext);
|
||||
void ClearBuffer(CommandRecordingContext* recordingContext, uint32_t clearValue);
|
||||
|
||||
// Dawn API
|
||||
|
|
|
@ -424,6 +424,10 @@ namespace dawn_native { namespace vulkan {
|
|||
Buffer* srcBuffer = ToBackend(copy->source.Get());
|
||||
Buffer* dstBuffer = ToBackend(copy->destination.Get());
|
||||
|
||||
srcBuffer->EnsureDataInitialized(recordingContext);
|
||||
dstBuffer->EnsureDataInitializedAsDestination(
|
||||
recordingContext, copy->destinationOffset, copy->size);
|
||||
|
||||
srcBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
|
||||
dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
|
||||
|
||||
|
|
|
@ -592,16 +592,8 @@ namespace dawn_native { namespace vulkan {
|
|||
|
||||
CommandRecordingContext* recordingContext = GetPendingRecordingContext();
|
||||
|
||||
// TODO(jiawei.shao@intel.com): use Toggle::LazyClearResourceOnFirstUse when the support of
|
||||
// buffer lazy initialization is completed.
|
||||
if (IsToggleEnabled(Toggle::LazyClearBufferOnFirstUse) &&
|
||||
!destination->IsDataInitialized()) {
|
||||
if (destination->IsFullBufferRange(destinationOffset, size)) {
|
||||
destination->SetIsDataInitialized();
|
||||
} else {
|
||||
ToBackend(destination)->ClearBufferContentsToZero(recordingContext);
|
||||
}
|
||||
}
|
||||
ToBackend(destination)
|
||||
->EnsureDataInitializedAsDestination(recordingContext, destinationOffset, size);
|
||||
|
||||
// Insert memory barrier to ensure host write operations are made visible before
|
||||
// copying from the staging buffer. However, this barrier can be removed (see note below).
|
||||
|
|
|
@ -87,6 +87,179 @@ TEST_P(BufferZeroInitTest, WriteBufferToSubBuffer) {
|
|||
}
|
||||
}
|
||||
|
||||
// Test that the code path of CopyBufferToBuffer clears the source buffer correctly when it is the
|
||||
// first use of the source buffer.
|
||||
TEST_P(BufferZeroInitTest, CopyBufferToBufferSource) {
|
||||
constexpr uint64_t kBufferSize = 16u;
|
||||
constexpr wgpu::BufferUsage kBufferUsage =
|
||||
wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
|
||||
wgpu::BufferDescriptor bufferDescriptor;
|
||||
bufferDescriptor.size = kBufferSize;
|
||||
bufferDescriptor.usage = kBufferUsage;
|
||||
|
||||
constexpr std::array<uint8_t, kBufferSize> kInitialData = {
|
||||
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}};
|
||||
|
||||
wgpu::Buffer dstBuffer =
|
||||
utils::CreateBufferFromData(device, kInitialData.data(), kBufferSize, kBufferUsage);
|
||||
|
||||
constexpr std::array<uint32_t, kBufferSize / sizeof(uint32_t)> kExpectedData = {{0, 0, 0, 0}};
|
||||
|
||||
// Full copy from the source buffer
|
||||
{
|
||||
wgpu::Buffer srcBuffer = device.CreateBuffer(&bufferDescriptor);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyBufferToBuffer(srcBuffer, 0, dstBuffer, 0, kBufferSize);
|
||||
wgpu::CommandBuffer commandBuffer = encoder.Finish();
|
||||
|
||||
EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), srcBuffer, 0,
|
||||
kBufferSize / sizeof(uint32_t));
|
||||
}
|
||||
|
||||
// Partial copy from the source buffer
|
||||
// srcOffset == 0
|
||||
{
|
||||
constexpr uint64_t kSrcOffset = 0;
|
||||
constexpr uint64_t kCopySize = kBufferSize / 2;
|
||||
|
||||
wgpu::Buffer srcBuffer = device.CreateBuffer(&bufferDescriptor);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyBufferToBuffer(srcBuffer, kSrcOffset, dstBuffer, 0, kCopySize);
|
||||
wgpu::CommandBuffer commandBuffer = encoder.Finish();
|
||||
|
||||
EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), srcBuffer, 0,
|
||||
kBufferSize / sizeof(uint32_t));
|
||||
}
|
||||
|
||||
// srcOffset > 0 and srcOffset + copySize == srcBufferSize
|
||||
{
|
||||
constexpr uint64_t kSrcOffset = kBufferSize / 2;
|
||||
constexpr uint64_t kCopySize = kBufferSize - kSrcOffset;
|
||||
|
||||
wgpu::Buffer srcBuffer = device.CreateBuffer(&bufferDescriptor);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyBufferToBuffer(srcBuffer, kSrcOffset, dstBuffer, 0, kCopySize);
|
||||
wgpu::CommandBuffer commandBuffer = encoder.Finish();
|
||||
|
||||
EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), srcBuffer, 0,
|
||||
kBufferSize / sizeof(uint32_t));
|
||||
}
|
||||
|
||||
// srcOffset > 0 and srcOffset + copySize < srcBufferSize
|
||||
{
|
||||
constexpr uint64_t kSrcOffset = kBufferSize / 4;
|
||||
constexpr uint64_t kCopySize = kBufferSize / 2;
|
||||
|
||||
wgpu::Buffer srcBuffer = device.CreateBuffer(&bufferDescriptor);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyBufferToBuffer(srcBuffer, kSrcOffset, dstBuffer, 0, kCopySize);
|
||||
wgpu::CommandBuffer commandBuffer = encoder.Finish();
|
||||
|
||||
EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(kExpectedData.data(), srcBuffer, 0,
|
||||
kBufferSize / sizeof(uint32_t));
|
||||
}
|
||||
}
|
||||
|
||||
// Test that the code path of CopyBufferToBuffer clears the destination buffer correctly when it is
|
||||
// the first use of the destination buffer.
|
||||
TEST_P(BufferZeroInitTest, CopyBufferToBufferDestination) {
|
||||
constexpr uint64_t kBufferSize = 16u;
|
||||
constexpr wgpu::BufferUsage kBufferUsage =
|
||||
wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
|
||||
wgpu::BufferDescriptor bufferDescriptor;
|
||||
bufferDescriptor.size = kBufferSize;
|
||||
bufferDescriptor.usage = kBufferUsage;
|
||||
|
||||
const std::array<uint8_t, kBufferSize> kInitialData = {
|
||||
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}};
|
||||
wgpu::Buffer srcBuffer =
|
||||
utils::CreateBufferFromData(device, kInitialData.data(), kBufferSize, kBufferUsage);
|
||||
|
||||
// Full copy from the source buffer doesn't need lazy initialization at all.
|
||||
{
|
||||
wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyBufferToBuffer(srcBuffer, 0, dstBuffer, 0, kBufferSize);
|
||||
wgpu::CommandBuffer commandBuffer = encoder.Finish();
|
||||
|
||||
EXPECT_LAZY_CLEAR(0u, queue.Submit(1, &commandBuffer));
|
||||
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<const uint32_t*>(kInitialData.data()),
|
||||
dstBuffer, 0, kBufferSize / sizeof(uint32_t));
|
||||
}
|
||||
|
||||
// Partial copy from the source buffer needs lazy initialization.
|
||||
// offset == 0
|
||||
{
|
||||
constexpr uint32_t kDstOffset = 0;
|
||||
constexpr uint32_t kCopySize = kBufferSize / 2;
|
||||
|
||||
wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyBufferToBuffer(srcBuffer, 0, dstBuffer, kDstOffset, kCopySize);
|
||||
wgpu::CommandBuffer commandBuffer = encoder.Finish();
|
||||
|
||||
EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
|
||||
|
||||
std::array<uint8_t, kBufferSize> expectedData;
|
||||
expectedData.fill(0);
|
||||
for (uint32_t index = kDstOffset; index < kDstOffset + kCopySize; ++index) {
|
||||
expectedData[index] = kInitialData[index - kDstOffset];
|
||||
}
|
||||
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(expectedData.data()), dstBuffer, 0,
|
||||
kBufferSize / sizeof(uint32_t));
|
||||
}
|
||||
|
||||
// offset > 0 and dstOffset + CopySize == kBufferSize
|
||||
{
|
||||
constexpr uint32_t kDstOffset = kBufferSize / 2;
|
||||
constexpr uint32_t kCopySize = kBufferSize - kDstOffset;
|
||||
|
||||
wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyBufferToBuffer(srcBuffer, 0, dstBuffer, kDstOffset, kCopySize);
|
||||
wgpu::CommandBuffer commandBuffer = encoder.Finish();
|
||||
|
||||
EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
|
||||
|
||||
std::array<uint8_t, kBufferSize> expectedData;
|
||||
expectedData.fill(0);
|
||||
for (uint32_t index = kDstOffset; index < kDstOffset + kCopySize; ++index) {
|
||||
expectedData[index] = kInitialData[index - kDstOffset];
|
||||
}
|
||||
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(expectedData.data()), dstBuffer, 0,
|
||||
kBufferSize / sizeof(uint32_t));
|
||||
}
|
||||
|
||||
// offset > 0 and dstOffset + CopySize < kBufferSize
|
||||
{
|
||||
constexpr uint32_t kDstOffset = kBufferSize / 4;
|
||||
constexpr uint32_t kCopySize = kBufferSize / 2;
|
||||
|
||||
wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyBufferToBuffer(srcBuffer, 0, dstBuffer, kDstOffset, kCopySize);
|
||||
wgpu::CommandBuffer commandBuffer = encoder.Finish();
|
||||
|
||||
EXPECT_LAZY_CLEAR(1u, queue.Submit(1, &commandBuffer));
|
||||
|
||||
std::array<uint8_t, kBufferSize> expectedData;
|
||||
expectedData.fill(0);
|
||||
for (uint32_t index = kDstOffset; index < kDstOffset + kCopySize; ++index) {
|
||||
expectedData[index] = kInitialData[index - kDstOffset];
|
||||
}
|
||||
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(expectedData.data()), dstBuffer, 0,
|
||||
kBufferSize / sizeof(uint32_t));
|
||||
}
|
||||
}
|
||||
|
||||
DAWN_INSTANTIATE_TEST(BufferZeroInitTest,
|
||||
D3D12Backend({"nonzero_clear_resources_on_creation_for_testing",
|
||||
"lazy_clear_buffer_on_first_use"}),
|
||||
|
|
Loading…
Reference in New Issue