Deprecate Buffer::SetSubData in favor of Queue::WriteBuffer

Bug: dawn:22
Change-Id: I00b3cd65ac4eb494b05918251f4b3b2bcaf24f71
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/22200
Commit-Queue: Corentin Wallez <cwallez@chromium.org>
Reviewed-by: Kai Ninomiya <kainino@chromium.org>
This commit is contained in:
Corentin Wallez 2020-06-02 09:24:39 +00:00 committed by Commit Bot service account
parent 3b7d0858bf
commit 47a3341e07
40 changed files with 528 additions and 390 deletions

View File

@ -902,6 +902,15 @@
"args": [ "args": [
{"name": "descriptor", "type": "fence descriptor", "annotation": "const*", "optional": true} {"name": "descriptor", "type": "fence descriptor", "annotation": "const*", "optional": true}
] ]
},
{
"name": "write buffer",
"args": [
{"name": "buffer", "type": "buffer"},
{"name": "buffer offset", "type": "uint64_t"},
{"name": "data", "type": "void", "annotation": "const*", "length": "size"},
{"name": "size", "type": "size_t"}
]
} }
] ]
}, },
@ -1636,6 +1645,9 @@
"int32_t": { "int32_t": {
"category": "native" "category": "native"
}, },
"size_t": {
"category": "native"
},
"uint64_t": { "uint64_t": {
"category": "native" "category": "native"
}, },

View File

@ -47,6 +47,13 @@
"destroy object": [ "destroy object": [
{ "name": "object type", "type": "ObjectType" }, { "name": "object type", "type": "ObjectType" },
{ "name": "object id", "type": "ObjectId" } { "name": "object id", "type": "ObjectId" }
],
"queue write buffer internal": [
{"name": "queue id", "type": "ObjectId" },
{"name": "buffer id", "type": "ObjectId" },
{"name": "buffer offset", "type": "uint64_t"},
{"name": "data", "type": "uint8_t", "annotation": "const*", "length": "size"},
{"name": "size", "type": "size_t"}
] ]
}, },
"return commands": { "return commands": {
@ -94,7 +101,8 @@
"DeviceSetDeviceLostCallback", "DeviceSetDeviceLostCallback",
"DeviceSetUncapturedErrorCallback", "DeviceSetUncapturedErrorCallback",
"FenceGetCompletedValue", "FenceGetCompletedValue",
"FenceOnCompletion" "FenceOnCompletion",
"QueueWriteBuffer"
], ],
"client_handwritten_commands": [ "client_handwritten_commands": [
"BufferDestroy", "BufferDestroy",

View File

@ -52,7 +52,7 @@ scripts/perf_test_runner.py DrawCallPerf.Run/Vulkan__e_skip_validation
**BufferUploadPerf** **BufferUploadPerf**
Tests repetitively uploading data to the GPU using either `SetSubData` or `CreateBufferMapped`. Tests repetitively uploading data to the GPU using either `WriteBuffer` or `CreateBufferMapped`.
**DrawCallPerf** **DrawCallPerf**

View File

@ -150,7 +150,7 @@ void frame() {
for (auto& data : shaderData) { for (auto& data : shaderData) {
data.time = f / 60.0f; data.time = f / 60.0f;
} }
ubo.SetSubData(0, kNumTriangles * sizeof(ShaderData), shaderData.data()); queue.WriteBuffer(ubo, 0, shaderData.data(), kNumTriangles * sizeof(ShaderData));
utils::ComboRenderPassDescriptor renderPass({backbufferView}); utils::ComboRenderPassDescriptor renderPass({backbufferView});
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();

View File

@ -89,9 +89,9 @@ void initBuffers() {
wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Storage; wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Storage;
particleBuffers[i] = device.CreateBuffer(&descriptor); particleBuffers[i] = device.CreateBuffer(&descriptor);
particleBuffers[i].SetSubData(0, queue.WriteBuffer(particleBuffers[i], 0,
sizeof(Particle) * kNumParticles, reinterpret_cast<uint8_t*>(initialParticles.data()),
reinterpret_cast<uint8_t*>(initialParticles.data())); sizeof(Particle) * kNumParticles);
} }
} }

View File

@ -262,7 +262,7 @@ void frame() {
glm::vec3(0.0f, 1.0f, 0.0f) glm::vec3(0.0f, 1.0f, 0.0f)
); );
cameraBuffer.SetSubData(0, sizeof(CameraData), &cameraData); queue.WriteBuffer(cameraBuffer, 0, &cameraData, sizeof(CameraData));
wgpu::TextureView backbufferView = swapchain.GetCurrentTextureView(); wgpu::TextureView backbufferView = swapchain.GetCurrentTextureView();
utils::ComboRenderPassDescriptor renderPass({backbufferView}, depthStencilView); utils::ComboRenderPassDescriptor renderPass({backbufferView}, depthStencilView);

View File

@ -19,6 +19,7 @@
#include "dawn_native/DynamicUploader.h" #include "dawn_native/DynamicUploader.h"
#include "dawn_native/ErrorData.h" #include "dawn_native/ErrorData.h"
#include "dawn_native/MapRequestTracker.h" #include "dawn_native/MapRequestTracker.h"
#include "dawn_native/Queue.h"
#include "dawn_native/ValidationUtils_autogen.h" #include "dawn_native/ValidationUtils_autogen.h"
#include <cstdio> #include <cstdio>
@ -62,10 +63,6 @@ namespace dawn_native {
return {}; return {};
} }
MaybeError SetSubDataImpl(uint32_t start, uint32_t count, const void* data) override {
UNREACHABLE();
return {};
}
MaybeError MapReadAsyncImpl(uint32_t serial) override { MaybeError MapReadAsyncImpl(uint32_t serial) override {
UNREACHABLE(); UNREACHABLE();
return {}; return {};
@ -186,7 +183,7 @@ namespace dawn_native {
return {}; return {};
} }
MaybeError BufferBase::ValidateCanUseInSubmitNow() const { MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
ASSERT(!IsError()); ASSERT(!IsError());
switch (mState) { switch (mState) {
@ -244,14 +241,10 @@ namespace dawn_native {
} }
void BufferBase::SetSubData(uint32_t start, uint32_t count, const void* data) { void BufferBase::SetSubData(uint32_t start, uint32_t count, const void* data) {
if (GetDevice()->ConsumedError(ValidateSetSubData(start, count))) { Ref<QueueBase> queue = AcquireRef(GetDevice()->GetDefaultQueue());
return; GetDevice()->EmitDeprecationWarning(
} "Buffer::SetSubData is deprecated, use Queue::WriteBuffer instead");
ASSERT(!IsError()); queue->WriteBuffer(this, start, data, count);
if (GetDevice()->ConsumedError(SetSubDataImpl(start, count, data))) {
return;
}
} }
void BufferBase::MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata) { void BufferBase::MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata) {
@ -279,22 +272,6 @@ namespace dawn_native {
tracker->Track(this, mMapSerial, false); tracker->Track(this, mMapSerial, false);
} }
MaybeError BufferBase::SetSubDataImpl(uint32_t start, uint32_t count, const void* data) {
DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle,
uploader->Allocate(count, GetDevice()->GetPendingCommandSerial()));
ASSERT(uploadHandle.mappedBuffer != nullptr);
memcpy(uploadHandle.mappedBuffer, data, count);
DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(
uploadHandle.stagingBuffer, uploadHandle.startOffset, this, start, count));
return {};
}
void BufferBase::MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata) { void BufferBase::MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata) {
WGPUBufferMapAsyncStatus status; WGPUBufferMapAsyncStatus status;
if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapWrite, &status))) { if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapWrite, &status))) {
@ -378,45 +355,6 @@ namespace dawn_native {
mMapUserdata = 0; mMapUserdata = 0;
} }
MaybeError BufferBase::ValidateSetSubData(uint32_t start, uint32_t count) const {
DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
switch (mState) {
case BufferState::Mapped:
return DAWN_VALIDATION_ERROR("Buffer is mapped");
case BufferState::Destroyed:
return DAWN_VALIDATION_ERROR("Buffer is destroyed");
case BufferState::Unmapped:
break;
}
if (count > GetSize()) {
return DAWN_VALIDATION_ERROR("Buffer subdata with too much data");
}
// Metal requests buffer to buffer copy size must be a multiple of 4 bytes on macOS
if (count % 4 != 0) {
return DAWN_VALIDATION_ERROR("Buffer subdata size must be a multiple of 4 bytes");
}
// Metal requests offset of buffer to buffer copy must be a multiple of 4 bytes on macOS
if (start % 4 != 0) {
return DAWN_VALIDATION_ERROR("Start position must be a multiple of 4 bytes");
}
// Note that no overflow can happen because we already checked for GetSize() >= count
if (start > GetSize() - count) {
return DAWN_VALIDATION_ERROR("Buffer subdata out of range");
}
if (!(mUsage & wgpu::BufferUsage::CopyDst)) {
return DAWN_VALIDATION_ERROR("Buffer needs the CopyDst usage bit");
}
return {};
}
MaybeError BufferBase::ValidateMap(wgpu::BufferUsage requiredUsage, MaybeError BufferBase::ValidateMap(wgpu::BufferUsage requiredUsage,
WGPUBufferMapAsyncStatus* status) const { WGPUBufferMapAsyncStatus* status) const {
*status = WGPUBufferMapAsyncStatus_DeviceLost; *status = WGPUBufferMapAsyncStatus_DeviceLost;

View File

@ -52,7 +52,7 @@ namespace dawn_native {
MaybeError MapAtCreation(uint8_t** mappedPointer); MaybeError MapAtCreation(uint8_t** mappedPointer);
void OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite); void OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite);
MaybeError ValidateCanUseInSubmitNow() const; MaybeError ValidateCanUseOnQueueNow() const;
// Dawn API // Dawn API
void SetSubData(uint32_t start, uint32_t count, const void* data); void SetSubData(uint32_t start, uint32_t count, const void* data);
@ -80,7 +80,6 @@ namespace dawn_native {
private: private:
virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) = 0; virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) = 0;
virtual MaybeError SetSubDataImpl(uint32_t start, uint32_t count, const void* data);
virtual MaybeError MapReadAsyncImpl(uint32_t serial) = 0; virtual MaybeError MapReadAsyncImpl(uint32_t serial) = 0;
virtual MaybeError MapWriteAsyncImpl(uint32_t serial) = 0; virtual MaybeError MapWriteAsyncImpl(uint32_t serial) = 0;
virtual void UnmapImpl() = 0; virtual void UnmapImpl() = 0;
@ -90,7 +89,6 @@ namespace dawn_native {
virtual bool IsMapWritable() const = 0; virtual bool IsMapWritable() const = 0;
MaybeError CopyFromStagingBuffer(); MaybeError CopyFromStagingBuffer();
MaybeError ValidateSetSubData(uint32_t start, uint32_t count) const;
MaybeError ValidateMap(wgpu::BufferUsage requiredUsage, MaybeError ValidateMap(wgpu::BufferUsage requiredUsage,
WGPUBufferMapAsyncStatus* status) const; WGPUBufferMapAsyncStatus* status) const;
MaybeError ValidateUnmap() const; MaybeError ValidateUnmap() const;

View File

@ -17,6 +17,7 @@
#include "dawn_native/Buffer.h" #include "dawn_native/Buffer.h"
#include "dawn_native/CommandBuffer.h" #include "dawn_native/CommandBuffer.h"
#include "dawn_native/Device.h" #include "dawn_native/Device.h"
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ErrorScope.h" #include "dawn_native/ErrorScope.h"
#include "dawn_native/ErrorScopeTracker.h" #include "dawn_native/ErrorScopeTracker.h"
#include "dawn_native/Fence.h" #include "dawn_native/Fence.h"
@ -91,8 +92,42 @@ namespace dawn_native {
return new Fence(this, descriptor); return new Fence(this, descriptor);
} }
void QueueBase::WriteBuffer(BufferBase* buffer,
uint64_t bufferOffset,
const void* data,
size_t size) {
GetDevice()->ConsumedError(WriteBufferInternal(buffer, bufferOffset, data, size));
}
MaybeError QueueBase::WriteBufferInternal(BufferBase* buffer,
uint64_t bufferOffset,
const void* data,
size_t size) {
DAWN_TRY(ValidateWriteBuffer(buffer, bufferOffset, size));
return WriteBufferImpl(buffer, bufferOffset, data, size);
}
MaybeError QueueBase::WriteBufferImpl(BufferBase* buffer,
uint64_t bufferOffset,
const void* data,
size_t size) {
DeviceBase* device = GetDevice();
UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
size, device->GetPendingCommandSerial()));
ASSERT(uploadHandle.mappedBuffer != nullptr);
memcpy(uploadHandle.mappedBuffer, data, size);
DAWN_TRY(device->CopyFromStagingToBuffer(
uploadHandle.stagingBuffer, uploadHandle.startOffset, buffer, bufferOffset, size));
return {};
}
MaybeError QueueBase::ValidateSubmit(uint32_t commandCount, MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
CommandBufferBase* const* commands) { CommandBufferBase* const* commands) const {
TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit"); TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
DAWN_TRY(GetDevice()->ValidateObject(this)); DAWN_TRY(GetDevice()->ValidateObject(this));
@ -103,7 +138,7 @@ namespace dawn_native {
for (const PassResourceUsage& passUsages : usages.perPass) { for (const PassResourceUsage& passUsages : usages.perPass) {
for (const BufferBase* buffer : passUsages.buffers) { for (const BufferBase* buffer : passUsages.buffers) {
DAWN_TRY(buffer->ValidateCanUseInSubmitNow()); DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
} }
for (const TextureBase* texture : passUsages.textures) { for (const TextureBase* texture : passUsages.textures) {
DAWN_TRY(texture->ValidateCanUseInSubmitNow()); DAWN_TRY(texture->ValidateCanUseInSubmitNow());
@ -111,7 +146,7 @@ namespace dawn_native {
} }
for (const BufferBase* buffer : usages.topLevelBuffers) { for (const BufferBase* buffer : usages.topLevelBuffers) {
DAWN_TRY(buffer->ValidateCanUseInSubmitNow()); DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
} }
for (const TextureBase* texture : usages.topLevelTextures) { for (const TextureBase* texture : usages.topLevelTextures) {
DAWN_TRY(texture->ValidateCanUseInSubmitNow()); DAWN_TRY(texture->ValidateCanUseInSubmitNow());
@ -121,7 +156,7 @@ namespace dawn_native {
return {}; return {};
} }
MaybeError QueueBase::ValidateSignal(const Fence* fence, uint64_t signalValue) { MaybeError QueueBase::ValidateSignal(const Fence* fence, uint64_t signalValue) const {
DAWN_TRY(GetDevice()->ValidateIsAlive()); DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this)); DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_TRY(GetDevice()->ValidateObject(fence)); DAWN_TRY(GetDevice()->ValidateObject(fence));
@ -136,7 +171,7 @@ namespace dawn_native {
return {}; return {};
} }
MaybeError QueueBase::ValidateCreateFence(const FenceDescriptor* descriptor) { MaybeError QueueBase::ValidateCreateFence(const FenceDescriptor* descriptor) const {
DAWN_TRY(GetDevice()->ValidateIsAlive()); DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this)); DAWN_TRY(GetDevice()->ValidateObject(this));
if (descriptor != nullptr) { if (descriptor != nullptr) {
@ -146,4 +181,30 @@ namespace dawn_native {
return {}; return {};
} }
MaybeError QueueBase::ValidateWriteBuffer(const BufferBase* buffer,
uint64_t bufferOffset,
size_t size) const {
DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_TRY(GetDevice()->ValidateObject(buffer));
if (bufferOffset % 4 != 0) {
return DAWN_VALIDATION_ERROR("Queue::WriteBuffer bufferOffset must be a multiple of 4");
}
if (size % 4 != 0) {
return DAWN_VALIDATION_ERROR("Queue::WriteBuffer size must be a multiple of 4");
}
uint64_t bufferSize = buffer->GetSize();
if (bufferOffset > bufferSize || size > (bufferSize - bufferOffset)) {
return DAWN_VALIDATION_ERROR("Queue::WriteBuffer out of range");
}
if (!(buffer->GetUsage() & wgpu::BufferUsage::CopyDst)) {
return DAWN_VALIDATION_ERROR("Buffer needs the CopyDst usage bit");
}
return buffer->ValidateCanUseOnQueueNow();
}
} // namespace dawn_native } // namespace dawn_native

View File

@ -33,15 +33,28 @@ namespace dawn_native {
void Submit(uint32_t commandCount, CommandBufferBase* const* commands); void Submit(uint32_t commandCount, CommandBufferBase* const* commands);
void Signal(Fence* fence, uint64_t signalValue); void Signal(Fence* fence, uint64_t signalValue);
Fence* CreateFence(const FenceDescriptor* descriptor); Fence* CreateFence(const FenceDescriptor* descriptor);
void WriteBuffer(BufferBase* buffer, uint64_t bufferOffset, const void* data, size_t size);
private: private:
QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag); QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag);
virtual MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands); MaybeError WriteBufferInternal(BufferBase* buffer,
uint64_t bufferOffset,
const void* data,
size_t size);
MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands); virtual MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands);
MaybeError ValidateSignal(const Fence* fence, uint64_t signalValue); virtual MaybeError WriteBufferImpl(BufferBase* buffer,
MaybeError ValidateCreateFence(const FenceDescriptor* descriptor); uint64_t bufferOffset,
const void* data,
size_t size);
MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
MaybeError ValidateSignal(const Fence* fence, uint64_t signalValue) const;
MaybeError ValidateCreateFence(const FenceDescriptor* descriptor) const;
MaybeError ValidateWriteBuffer(const BufferBase* buffer,
uint64_t bufferOffset,
size_t size) const;
}; };
} // namespace dawn_native } // namespace dawn_native

View File

@ -66,7 +66,7 @@ namespace dawn_native { namespace d3d12 {
Serial mLastUsage = 0; Serial mLastUsage = 0;
// mLastSubmission denotes the last time this pageable was submitted to the GPU. Note that // mLastSubmission denotes the last time this pageable was submitted to the GPU. Note that
// although this variable often contains the same value as mLastUsage, it can differ in some // although this variable often contains the same value as mLastUsage, it can differ in some
// situations. When some asynchronous APIs (like SetSubData) are called, mLastUsage is // situations. When some asynchronous APIs (like WriteBuffer) are called, mLastUsage is
// updated upon the call, but the backend operation is deferred until the next submission // updated upon the call, but the backend operation is deferred until the next submission
// to the GPU. This makes mLastSubmission unique from mLastUsage, and allows us to // to the GPU. This makes mLastSubmission unique from mLastUsage, and allows us to
// accurately identify when a pageable can be evicted. // accurately identify when a pageable can be evicted.

View File

@ -47,7 +47,7 @@ namespace dawn_native { namespace metal {
return nil; return nil;
} }
// A blit encoder can be left open from SetSubData, make sure we close it. // A blit encoder can be left open from WriteBuffer, make sure we close it.
EndBlit(); EndBlit();
ASSERT(!mInEncoder); ASSERT(!mInEncoder);

View File

@ -304,11 +304,10 @@ namespace dawn_native { namespace null {
memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size); memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
} }
MaybeError Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const void* data) { void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) {
ASSERT(start + count <= GetSize()); ASSERT(bufferOffset + size <= GetSize());
ASSERT(mBackingData); ASSERT(mBackingData);
memcpy(mBackingData.get() + start, data, count); memcpy(mBackingData.get() + bufferOffset, data, size);
return {};
} }
MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) { MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) {
@ -366,6 +365,14 @@ namespace dawn_native { namespace null {
return {}; return {};
} }
MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
uint64_t bufferOffset,
const void* data,
size_t size) {
ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size);
return {};
}
// SwapChain // SwapChain
SwapChain::SwapChain(Device* device, SwapChain::SwapChain(Device* device,

View File

@ -187,11 +187,12 @@ namespace dawn_native { namespace null {
uint64_t destinationOffset, uint64_t destinationOffset,
uint64_t size); uint64_t size);
void DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size);
private: private:
~Buffer() override; ~Buffer() override;
// Dawn API // Dawn API
MaybeError SetSubDataImpl(uint32_t start, uint32_t count, const void* data) override;
MaybeError MapReadAsyncImpl(uint32_t serial) override; MaybeError MapReadAsyncImpl(uint32_t serial) override;
MaybeError MapWriteAsyncImpl(uint32_t serial) override; MaybeError MapWriteAsyncImpl(uint32_t serial) override;
void UnmapImpl() override; void UnmapImpl() override;
@ -222,6 +223,10 @@ namespace dawn_native { namespace null {
private: private:
~Queue() override; ~Queue() override;
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override; MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
MaybeError WriteBufferImpl(BufferBase* buffer,
uint64_t bufferOffset,
const void* data,
size_t size) override;
}; };
class SwapChain final : public NewSwapChainBase { class SwapChain final : public NewSwapChainBase {

View File

@ -50,14 +50,6 @@ namespace dawn_native { namespace opengl {
return {}; return {};
} }
MaybeError Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const void* data) {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
gl.BufferSubData(GL_ARRAY_BUFFER, start, count, data);
return {};
}
MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) { MaybeError Buffer::MapReadAsyncImpl(uint32_t serial) {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl; const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;

View File

@ -32,7 +32,6 @@ namespace dawn_native { namespace opengl {
private: private:
~Buffer() override; ~Buffer() override;
// Dawn API // Dawn API
MaybeError SetSubDataImpl(uint32_t start, uint32_t count, const void* data) override;
MaybeError MapReadAsyncImpl(uint32_t serial) override; MaybeError MapReadAsyncImpl(uint32_t serial) override;
MaybeError MapWriteAsyncImpl(uint32_t serial) override; MaybeError MapWriteAsyncImpl(uint32_t serial) override;
void UnmapImpl() override; void UnmapImpl() override;

View File

@ -14,6 +14,7 @@
#include "dawn_native/opengl/QueueGL.h" #include "dawn_native/opengl/QueueGL.h"
#include "dawn_native/opengl/BufferGL.h"
#include "dawn_native/opengl/CommandBufferGL.h" #include "dawn_native/opengl/CommandBufferGL.h"
#include "dawn_native/opengl/DeviceGL.h" #include "dawn_native/opengl/DeviceGL.h"
#include "dawn_platform/DawnPlatform.h" #include "dawn_platform/DawnPlatform.h"
@ -37,4 +38,15 @@ namespace dawn_native { namespace opengl {
return {}; return {};
} }
MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
uint64_t bufferOffset,
const void* data,
size_t size) {
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
return {};
}
}} // namespace dawn_native::opengl }} // namespace dawn_native::opengl

View File

@ -28,6 +28,10 @@ namespace dawn_native { namespace opengl {
private: private:
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override; MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
MaybeError WriteBufferImpl(BufferBase* buffer,
uint64_t bufferOffset,
const void* data,
size_t size) override;
}; };
}} // namespace dawn_native::opengl }} // namespace dawn_native::opengl

View File

@ -366,6 +366,27 @@ namespace dawn_wire { namespace client {
cmd.Serialize(allocatedBuffer, *fence->device->GetClient()); cmd.Serialize(allocatedBuffer, *fence->device->GetClient());
} }
void ClientHandwrittenQueueWriteBuffer(WGPUQueue cQueue,
WGPUBuffer cBuffer,
uint64_t bufferOffset,
const void* data,
size_t size) {
Queue* queue = reinterpret_cast<Queue*>(cQueue);
Buffer* buffer = reinterpret_cast<Buffer*>(cBuffer);
QueueWriteBufferInternalCmd cmd;
cmd.queueId = queue->id;
cmd.bufferId = buffer->id;
cmd.bufferOffset = bufferOffset;
cmd.data = static_cast<const uint8_t*>(data);
cmd.size = size;
Client* wireClient = buffer->device->GetClient();
size_t requiredSize = cmd.GetRequiredSize();
char* allocatedBuffer = static_cast<char*>(wireClient->GetCmdSpace(requiredSize));
cmd.Serialize(allocatedBuffer);
}
void ClientDeviceReference(WGPUDevice) { void ClientDeviceReference(WGPUDevice) {
} }

View File

@ -38,4 +38,21 @@ namespace dawn_wire { namespace server {
return true; return true;
} }
bool Server::DoQueueWriteBufferInternal(ObjectId queueId,
ObjectId bufferId,
uint64_t bufferOffset,
const uint8_t* data,
size_t size) {
// The null object isn't valid as `self` or `buffer` so we can combine the check with the
// check that the ID is valid.
auto* queue = QueueObjects().Get(queueId);
auto* buffer = BufferObjects().Get(bufferId);
if (queue == nullptr || buffer == nullptr) {
return false;
}
mProcs.queueWriteBuffer(queue->handle, buffer->handle, bufferOffset, data, size);
return true;
}
}} // namespace dawn_wire::server }} // namespace dawn_wire::server

View File

@ -26,30 +26,30 @@ TEST_P(BasicTests, VendorIdFilter) {
ASSERT_EQ(GetAdapterProperties().vendorID, GetVendorIdFilter()); ASSERT_EQ(GetAdapterProperties().vendorID, GetVendorIdFilter());
} }
// Test Buffer::SetSubData changes the content of the buffer, but really this is the most // Test Queue::WriteBuffer changes the content of the buffer, but really this is the most
// basic test possible, and tests the test harness // basic test possible, and tests the test harness
TEST_P(BasicTests, BufferSetSubData) { TEST_P(BasicTests, QueueWriteBuffer) {
wgpu::BufferDescriptor descriptor; wgpu::BufferDescriptor descriptor;
descriptor.size = 4; descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst; descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor); wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
uint32_t value = 0x01020304; uint32_t value = 0x01020304;
buffer.SetSubData(0, sizeof(value), &value); queue.WriteBuffer(buffer, 0, &value, sizeof(value));
EXPECT_BUFFER_U32_EQ(value, buffer, 0); EXPECT_BUFFER_U32_EQ(value, buffer, 0);
} }
// Test a validation error for buffer setSubData, but really this is the most basic test possible // Test a validation error for Queue::WriteBuffer but really this is the most basic test possible
// for ASSERT_DEVICE_ERROR // for ASSERT_DEVICE_ERROR
TEST_P(BasicTests, BufferSetSubDataError) { TEST_P(BasicTests, QueueWriteBufferError) {
wgpu::BufferDescriptor descriptor; wgpu::BufferDescriptor descriptor;
descriptor.size = 4; descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst; descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor); wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
uint8_t value = 187; uint8_t value = 187;
ASSERT_DEVICE_ERROR(buffer.SetSubData(1000, sizeof(value), &value)); ASSERT_DEVICE_ERROR(queue.WriteBuffer(buffer, 1000, &value, sizeof(value)));
} }
DAWN_INSTANTIATE_TEST(BasicTests, D3D12Backend(), MetalBackend(), OpenGLBackend(), VulkanBackend()); DAWN_INSTANTIATE_TEST(BasicTests, D3D12Backend(), MetalBackend(), OpenGLBackend(), VulkanBackend());

View File

@ -770,9 +770,9 @@ TEST_P(BindGroupTests, DynamicOffsetOrder) {
wgpu::Buffer buffer2 = device.CreateBuffer(&bufferDescriptor); wgpu::Buffer buffer2 = device.CreateBuffer(&bufferDescriptor);
// Populate the values // Populate the values
buffer0.SetSubData(offsets[0], sizeof(uint32_t), &values[0]); queue.WriteBuffer(buffer0, offsets[0], &values[0], sizeof(uint32_t));
buffer2.SetSubData(offsets[1], sizeof(uint32_t), &values[1]); queue.WriteBuffer(buffer2, offsets[1], &values[1], sizeof(uint32_t));
buffer3.SetSubData(offsets[2], sizeof(uint32_t), &values[2]); queue.WriteBuffer(buffer3, offsets[2], &values[2], sizeof(uint32_t));
wgpu::Buffer outputBuffer = utils::CreateBufferFromData( wgpu::Buffer outputBuffer = utils::CreateBufferFromData(
device, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Storage, {0, 0, 0}); device, wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Storage, {0, 0, 0});

View File

@ -55,7 +55,7 @@ TEST_P(BufferMapReadTests, SmallReadAtZero) {
wgpu::Buffer buffer = device.CreateBuffer(&descriptor); wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
uint32_t myData = 0x01020304; uint32_t myData = 0x01020304;
buffer.SetSubData(0, sizeof(myData), &myData); queue.WriteBuffer(buffer, 0, &myData, sizeof(myData));
const void* mappedData = MapReadAsyncAndWait(buffer); const void* mappedData = MapReadAsyncAndWait(buffer);
ASSERT_EQ(myData, *reinterpret_cast<const uint32_t*>(mappedData)); ASSERT_EQ(myData, *reinterpret_cast<const uint32_t*>(mappedData));
@ -71,7 +71,7 @@ TEST_P(BufferMapReadTests, MapTwice) {
wgpu::Buffer buffer = device.CreateBuffer(&descriptor); wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
uint32_t myData = 0x01020304; uint32_t myData = 0x01020304;
buffer.SetSubData(0, sizeof(myData), &myData); queue.WriteBuffer(buffer, 0, &myData, sizeof(myData));
const void* mappedData = MapReadAsyncAndWait(buffer); const void* mappedData = MapReadAsyncAndWait(buffer);
EXPECT_EQ(myData, *reinterpret_cast<const uint32_t*>(mappedData)); EXPECT_EQ(myData, *reinterpret_cast<const uint32_t*>(mappedData));
@ -79,7 +79,7 @@ TEST_P(BufferMapReadTests, MapTwice) {
UnmapBuffer(buffer); UnmapBuffer(buffer);
myData = 0x05060708; myData = 0x05060708;
buffer.SetSubData(0, sizeof(myData), &myData); queue.WriteBuffer(buffer, 0, &myData, sizeof(myData));
const void* mappedData1 = MapReadAsyncAndWait(buffer); const void* mappedData1 = MapReadAsyncAndWait(buffer);
EXPECT_EQ(myData, *reinterpret_cast<const uint32_t*>(mappedData1)); EXPECT_EQ(myData, *reinterpret_cast<const uint32_t*>(mappedData1));
@ -100,7 +100,7 @@ TEST_P(BufferMapReadTests, LargeRead) {
descriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst; descriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor); wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
buffer.SetSubData(0, kDataSize * sizeof(uint32_t), myData.data()); queue.WriteBuffer(buffer, 0, myData.data(), kDataSize * sizeof(uint32_t));
const void* mappedData = MapReadAsyncAndWait(buffer); const void* mappedData = MapReadAsyncAndWait(buffer);
ASSERT_EQ(0, memcmp(mappedData, myData.data(), kDataSize * sizeof(uint32_t))); ASSERT_EQ(0, memcmp(mappedData, myData.data(), kDataSize * sizeof(uint32_t)));
@ -233,144 +233,6 @@ TEST_P(BufferMapWriteTests, ManyWrites) {
DAWN_INSTANTIATE_TEST(BufferMapWriteTests, D3D12Backend(), MetalBackend(), OpenGLBackend(), VulkanBackend()); DAWN_INSTANTIATE_TEST(BufferMapWriteTests, D3D12Backend(), MetalBackend(), OpenGLBackend(), VulkanBackend());
class BufferSetSubDataTests : public DawnTest {
};
// Test the simplest set sub data: setting one u32 at offset 0.
TEST_P(BufferSetSubDataTests, SmallDataAtZero) {
wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
uint32_t value = 0x01020304;
buffer.SetSubData(0, sizeof(value), &value);
EXPECT_BUFFER_U32_EQ(value, buffer, 0);
}
// Test the simplest set sub data: setting nothing
TEST_P(BufferSetSubDataTests, ZeroSized) {
wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
uint32_t initialValue = 0x42;
buffer.SetSubData(0, sizeof(initialValue), &initialValue);
buffer.SetSubData(0, 0, nullptr);
// The content of the buffer isn't changed
EXPECT_BUFFER_U32_EQ(initialValue, buffer, 0);
}
// Call SetSubData at offset 0 via a u32 twice. Test that data is updated accoordingly.
TEST_P(BufferSetSubDataTests, SetTwice) {
wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
uint32_t value = 0x01020304;
buffer.SetSubData(0, sizeof(value), &value);
EXPECT_BUFFER_U32_EQ(value, buffer, 0);
value = 0x05060708;
buffer.SetSubData(0, sizeof(value), &value);
EXPECT_BUFFER_U32_EQ(value, buffer, 0);
}
// Test that SetSubData offset works.
TEST_P(BufferSetSubDataTests, SmallDataAtOffset) {
wgpu::BufferDescriptor descriptor;
descriptor.size = 4000;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
constexpr uint64_t kOffset = 2000;
uint32_t value = 0x01020304;
buffer.SetSubData(kOffset, sizeof(value), &value);
EXPECT_BUFFER_U32_EQ(value, buffer, kOffset);
}
// Stress test for many calls to SetSubData
TEST_P(BufferSetSubDataTests, ManySetSubData) {
// Note: Increasing the size of the buffer will likely cause timeout issues.
// In D3D12, timeout detection occurs when the GPU scheduler tries but cannot preempt the task
// executing these commands in-flight. If this takes longer than ~2s, a device reset occurs and
// fails the test. Since GPUs may or may not complete by then, this test must be disabled OR
// modified to be well-below the timeout limit.
// TODO (jiawei.shao@intel.com): find out why this test fails on Intel Vulkan Linux bots.
DAWN_SKIP_TEST_IF(IsIntel() && IsVulkan() && IsLinux());
// TODO(https://bugs.chromium.org/p/dawn/issues/detail?id=228): Re-enable
// once the issue with Metal on 10.14.6 is fixed.
DAWN_SKIP_TEST_IF(IsMacOS() && IsIntel() && IsMetal());
constexpr uint64_t kSize = 4000 * 1000;
constexpr uint32_t kElements = 500 * 500;
wgpu::BufferDescriptor descriptor;
descriptor.size = kSize;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
std::vector<uint32_t> expectedData;
for (uint32_t i = 0; i < kElements; ++i) {
buffer.SetSubData(i * sizeof(uint32_t), sizeof(i), &i);
expectedData.push_back(i);
}
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), buffer, 0, kElements);
}
// Test using SetSubData for lots of data
TEST_P(BufferSetSubDataTests, LargeSetSubData) {
constexpr uint64_t kSize = 4000 * 1000;
constexpr uint32_t kElements = 1000 * 1000;
wgpu::BufferDescriptor descriptor;
descriptor.size = kSize;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
std::vector<uint32_t> expectedData;
for (uint32_t i = 0; i < kElements; ++i) {
expectedData.push_back(i);
}
buffer.SetSubData(0, kElements * sizeof(uint32_t), expectedData.data());
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), buffer, 0, kElements);
}
// Test using SetSubData for super large data block
TEST_P(BufferSetSubDataTests, SuperLargeSetSubData) {
constexpr uint64_t kSize = 12000 * 1000;
constexpr uint64_t kElements = 3000 * 1000;
wgpu::BufferDescriptor descriptor;
descriptor.size = kSize;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
std::vector<uint32_t> expectedData;
for (uint32_t i = 0; i < kElements; ++i) {
expectedData.push_back(i);
}
buffer.SetSubData(0, kElements * sizeof(uint32_t), expectedData.data());
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), buffer, 0, kElements);
}
DAWN_INSTANTIATE_TEST(BufferSetSubDataTests,
D3D12Backend(),
MetalBackend(),
OpenGLBackend(),
VulkanBackend());
// TODO(enga): These tests should use the testing toggle to initialize resources to 1. // TODO(enga): These tests should use the testing toggle to initialize resources to 1.
class CreateBufferMappedTests : public DawnTest { class CreateBufferMappedTests : public DawnTest {
protected: protected:

View File

@ -48,7 +48,7 @@ void ComputeCopyStorageBufferTests::BasicTest(const char* shader) {
for (uint32_t i = 0; i < kNumUints; ++i) { for (uint32_t i = 0; i < kNumUints; ++i) {
expected[i] = (i + 1u) * 0x11111111u; expected[i] = (i + 1u) * 0x11111111u;
} }
src.SetSubData(0, sizeof(expected), expected.data()); queue.WriteBuffer(src, 0, expected.data(), sizeof(expected));
EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), src, 0, kNumUints); EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), src, 0, kNumUints);
// Set up dst storage buffer // Set up dst storage buffer
@ -59,7 +59,7 @@ void ComputeCopyStorageBufferTests::BasicTest(const char* shader) {
wgpu::Buffer dst = device.CreateBuffer(&dstDesc); wgpu::Buffer dst = device.CreateBuffer(&dstDesc);
std::array<uint32_t, kNumUints> zero{}; std::array<uint32_t, kNumUints> zero{};
dst.SetSubData(0, sizeof(zero), zero.data()); queue.WriteBuffer(dst, 0, zero.data(), sizeof(zero));
// Set up bind group and issue dispatch // Set up bind group and issue dispatch
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),

View File

@ -42,7 +42,7 @@ void ComputeSharedMemoryTests::BasicTest(const char* shader) {
wgpu::Buffer dst = device.CreateBuffer(&dstDesc); wgpu::Buffer dst = device.CreateBuffer(&dstDesc);
const uint32_t zero = 0; const uint32_t zero = 0;
dst.SetSubData(0, sizeof(zero), &zero); queue.WriteBuffer(dst, 0, &zero, sizeof(zero));
// Set up bind group and issue dispatch // Set up bind group and issue dispatch
wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),

View File

@ -124,8 +124,8 @@ class CopyTests_T2B : public CopyTests {
bufDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst; bufDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&bufDescriptor); wgpu::Buffer buffer = device.CreateBuffer(&bufDescriptor);
std::vector<RGBA8> emptyData(bufferSpec.size / kBytesPerTexel * textureSpec.arraySize); std::vector<RGBA8> emptyData(bufferSpec.size / kBytesPerTexel * textureSpec.arraySize);
buffer.SetSubData(0, static_cast<uint32_t>(emptyData.size() * sizeof(RGBA8)), queue.WriteBuffer(buffer, 0, emptyData.data(),
emptyData.data()); static_cast<uint32_t>(emptyData.size() * sizeof(RGBA8)));
uint64_t bufferOffset = bufferSpec.offset; uint64_t bufferOffset = bufferSpec.offset;
for (uint32_t slice = 0; slice < textureSpec.arraySize; ++slice) { for (uint32_t slice = 0; slice < textureSpec.arraySize; ++slice) {
@ -195,8 +195,8 @@ protected:
std::vector<RGBA8> bufferData(bufferSpec.size / kBytesPerTexel); std::vector<RGBA8> bufferData(bufferSpec.size / kBytesPerTexel);
FillBufferData(bufferData.data(), bufferData.size()); FillBufferData(bufferData.data(), bufferData.size());
buffer.SetSubData(0, static_cast<uint32_t>(bufferData.size() * sizeof(RGBA8)), queue.WriteBuffer(buffer, 0, bufferData.data(),
bufferData.data()); static_cast<uint32_t>(bufferData.size() * sizeof(RGBA8)));
// Create a texture that is `width` x `height` with (`level` + 1) mip levels. // Create a texture that is `width` x `height` with (`level` + 1) mip levels.
wgpu::TextureDescriptor descriptor; wgpu::TextureDescriptor descriptor;

View File

@ -58,6 +58,30 @@ class DeprecationTests : public DawnTest {
} \ } \
} while (0) } while (0)
// Test that using SetSubData emits a deprecation warning.
TEST_P(DeprecationTests, SetSubDataDeprecated) {
wgpu::BufferDescriptor descriptor;
descriptor.usage = wgpu::BufferUsage::CopyDst;
descriptor.size = 4;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
EXPECT_DEPRECATION_WARNING(buffer.SetSubData(0, 0, nullptr));
}
// Test that using SetSubData works
TEST_P(DeprecationTests, SetSubDataStillWorks) {
DAWN_SKIP_TEST_IF(IsNull());
wgpu::BufferDescriptor descriptor;
descriptor.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
descriptor.size = 4;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
uint32_t data = 2020;
EXPECT_DEPRECATION_WARNING(buffer.SetSubData(0, 4, &data));
EXPECT_BUFFER_U32_EQ(data, buffer, 0);
}
DAWN_INSTANTIATE_TEST(DeprecationTests, DAWN_INSTANTIATE_TEST(DeprecationTests,
D3D12Backend(), D3D12Backend(),
MetalBackend(), MetalBackend(),

View File

@ -205,7 +205,7 @@ class DepthSamplingTest : public DawnTest {
} }
void UpdateInputTexture(wgpu::CommandEncoder commandEncoder, float textureValue) { void UpdateInputTexture(wgpu::CommandEncoder commandEncoder, float textureValue) {
mTextureUploadBuffer.SetSubData(0, sizeof(float), &textureValue); queue.WriteBuffer(mTextureUploadBuffer, 0, &textureValue, sizeof(float));
wgpu::BufferCopyView bufferCopyView = {}; wgpu::BufferCopyView bufferCopyView = {};
bufferCopyView.buffer = mTextureUploadBuffer; bufferCopyView.buffer = mTextureUploadBuffer;
@ -315,7 +315,7 @@ class DepthSamplingTest : public DawnTest {
float compareRef, float compareRef,
wgpu::CompareFunction compare, wgpu::CompareFunction compare,
std::vector<float> textureValues) { std::vector<float> textureValues) {
mUniformBuffer.SetSubData(0, sizeof(float), &compareRef); queue.WriteBuffer(mUniformBuffer, 0, &compareRef, sizeof(float));
wgpu::SamplerDescriptor samplerDesc; wgpu::SamplerDescriptor samplerDesc;
samplerDesc.compare = compare; samplerDesc.compare = compare;
@ -357,7 +357,7 @@ class DepthSamplingTest : public DawnTest {
float compareRef, float compareRef,
wgpu::CompareFunction compare, wgpu::CompareFunction compare,
std::vector<float> textureValues) { std::vector<float> textureValues) {
mUniformBuffer.SetSubData(0, sizeof(float), &compareRef); queue.WriteBuffer(mUniformBuffer, 0, &compareRef, sizeof(float));
wgpu::SamplerDescriptor samplerDesc; wgpu::SamplerDescriptor samplerDesc;
samplerDesc.compare = compare; samplerDesc.compare = compare;

View File

@ -320,8 +320,8 @@ TEST_P(DeviceLostTest, BufferMapReadAsyncBeforeLossFails) {
SetCallbackAndLoseForTesting(); SetCallbackAndLoseForTesting();
} }
// Test that SetSubData fails after device is lost // Test that WriteBuffer fails after device is lost
TEST_P(DeviceLostTest, SetSubDataFails) { TEST_P(DeviceLostTest, WriteBufferFails) {
wgpu::BufferDescriptor bufferDescriptor; wgpu::BufferDescriptor bufferDescriptor;
bufferDescriptor.size = sizeof(float); bufferDescriptor.size = sizeof(float);
bufferDescriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst; bufferDescriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
@ -329,8 +329,8 @@ TEST_P(DeviceLostTest, SetSubDataFails) {
wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor); wgpu::Buffer buffer = device.CreateBuffer(&bufferDescriptor);
SetCallbackAndLoseForTesting(); SetCallbackAndLoseForTesting();
std::array<float, 1> data = {12}; float data = 12.0f;
ASSERT_DEVICE_ERROR(buffer.SetSubData(0, sizeof(float), data.data())); ASSERT_DEVICE_ERROR(queue.WriteBuffer(buffer, 0, &data, sizeof(data)));
} }
// Test that Command Encoder Finish fails when device lost // Test that Command Encoder Finish fails when device lost

View File

@ -29,7 +29,7 @@ class GpuMemorySyncTests : public DawnTest {
wgpu::Buffer buffer = device.CreateBuffer(&srcDesc); wgpu::Buffer buffer = device.CreateBuffer(&srcDesc);
int myData = 0; int myData = 0;
buffer.SetSubData(0, sizeof(myData), &myData); queue.WriteBuffer(buffer, 0, &myData, sizeof(myData));
return buffer; return buffer;
} }
@ -432,7 +432,7 @@ class MultipleWriteThenMultipleReadTests : public DawnTest {
wgpu::Buffer buffer = device.CreateBuffer(&srcDesc); wgpu::Buffer buffer = device.CreateBuffer(&srcDesc);
std::vector<uint8_t> zeros(size, 0); std::vector<uint8_t> zeros(size, 0);
buffer.SetSubData(0, size, zeros.data()); queue.WriteBuffer(buffer, 0, zeros.data(), size);
return buffer; return buffer;
} }

View File

@ -34,3 +34,140 @@ DAWN_INSTANTIATE_TEST(QueueTests,
NullBackend(), NullBackend(),
OpenGLBackend(), OpenGLBackend(),
VulkanBackend()); VulkanBackend());
class QueueWriteBufferTests : public DawnTest {};
// Test the simplest WriteBuffer setting one u32 at offset 0.
TEST_P(QueueWriteBufferTests, SmallDataAtZero) {
wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
uint32_t value = 0x01020304;
queue.WriteBuffer(buffer, 0, &value, sizeof(value));
EXPECT_BUFFER_U32_EQ(value, buffer, 0);
}
// Test an empty WriteBuffer
TEST_P(QueueWriteBufferTests, ZeroSized) {
wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
uint32_t initialValue = 0x42;
queue.WriteBuffer(buffer, 0, &initialValue, sizeof(initialValue));
queue.WriteBuffer(buffer, 0, nullptr, 0);
// The content of the buffer isn't changed
EXPECT_BUFFER_U32_EQ(initialValue, buffer, 0);
}
// Call WriteBuffer at offset 0 via a u32 twice. Test that data is updated accoordingly.
TEST_P(QueueWriteBufferTests, SetTwice) {
wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
uint32_t value = 0x01020304;
queue.WriteBuffer(buffer, 0, &value, sizeof(value));
EXPECT_BUFFER_U32_EQ(value, buffer, 0);
value = 0x05060708;
queue.WriteBuffer(buffer, 0, &value, sizeof(value));
EXPECT_BUFFER_U32_EQ(value, buffer, 0);
}
// Test that WriteBuffer offset works.
TEST_P(QueueWriteBufferTests, SmallDataAtOffset) {
wgpu::BufferDescriptor descriptor;
descriptor.size = 4000;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
constexpr uint64_t kOffset = 2000;
uint32_t value = 0x01020304;
queue.WriteBuffer(buffer, kOffset, &value, sizeof(value));
EXPECT_BUFFER_U32_EQ(value, buffer, kOffset);
}
// Stress test for many calls to WriteBuffer
TEST_P(QueueWriteBufferTests, ManyWriteBuffer) {
// Note: Increasing the size of the buffer will likely cause timeout issues.
// In D3D12, timeout detection occurs when the GPU scheduler tries but cannot preempt the task
// executing these commands in-flight. If this takes longer than ~2s, a device reset occurs and
// fails the test. Since GPUs may or may not complete by then, this test must be disabled OR
// modified to be well-below the timeout limit.
// TODO (jiawei.shao@intel.com): find out why this test fails on Intel Vulkan Linux bots.
DAWN_SKIP_TEST_IF(IsIntel() && IsVulkan() && IsLinux());
// TODO(https://bugs.chromium.org/p/dawn/issues/detail?id=228): Re-enable
// once the issue with Metal on 10.14.6 is fixed.
DAWN_SKIP_TEST_IF(IsMacOS() && IsIntel() && IsMetal());
constexpr uint64_t kSize = 4000 * 1000;
constexpr uint32_t kElements = 500 * 500;
wgpu::BufferDescriptor descriptor;
descriptor.size = kSize;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
std::vector<uint32_t> expectedData;
for (uint32_t i = 0; i < kElements; ++i) {
queue.WriteBuffer(buffer, i * sizeof(uint32_t), &i, sizeof(i));
expectedData.push_back(i);
}
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), buffer, 0, kElements);
}
// Test using WriteBuffer for lots of data
TEST_P(QueueWriteBufferTests, LargeWriteBuffer) {
constexpr uint64_t kSize = 4000 * 1000;
constexpr uint32_t kElements = 1000 * 1000;
wgpu::BufferDescriptor descriptor;
descriptor.size = kSize;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
std::vector<uint32_t> expectedData;
for (uint32_t i = 0; i < kElements; ++i) {
expectedData.push_back(i);
}
queue.WriteBuffer(buffer, 0, expectedData.data(), kElements * sizeof(uint32_t));
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), buffer, 0, kElements);
}
// Test using WriteBuffer for super large data block
TEST_P(QueueWriteBufferTests, SuperLargeWriteBuffer) {
constexpr uint64_t kSize = 12000 * 1000;
constexpr uint64_t kElements = 3000 * 1000;
wgpu::BufferDescriptor descriptor;
descriptor.size = kSize;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
std::vector<uint32_t> expectedData;
for (uint32_t i = 0; i < kElements; ++i) {
expectedData.push_back(i);
}
queue.WriteBuffer(buffer, 0, expectedData.data(), kElements * sizeof(uint32_t));
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), buffer, 0, kElements);
}
DAWN_INSTANTIATE_TEST(QueueWriteBufferTests,
D3D12Backend(),
MetalBackend(),
OpenGLBackend(),
VulkanBackend());

View File

@ -224,7 +224,7 @@ class TextureFormatTest : public DawnTest {
ASSERT(sampleDataSize % sampleFormatInfo.texelByteSize == 0); ASSERT(sampleDataSize % sampleFormatInfo.texelByteSize == 0);
uint32_t width = sampleDataSize / sampleFormatInfo.texelByteSize; uint32_t width = sampleDataSize / sampleFormatInfo.texelByteSize;
// The input data must be a multiple of 4 byte in length for setSubData // The input data must be a multiple of 4 byte in length for WriteBuffer
ASSERT(sampleDataSize % 4 == 0); ASSERT(sampleDataSize % 4 == 0);
ASSERT(expectedRenderDataSize % 4 == 0); ASSERT(expectedRenderDataSize % 4 == 0);

View File

@ -586,7 +586,7 @@ TEST_P(TextureZeroInitTest, ComputePassSampledTextureClear) {
wgpu::Buffer bufferTex = device.CreateBuffer(&bufferDescriptor); wgpu::Buffer bufferTex = device.CreateBuffer(&bufferDescriptor);
// Add data to buffer to ensure it is initialized // Add data to buffer to ensure it is initialized
uint32_t data = 100; uint32_t data = 100;
bufferTex.SetSubData(0, sizeof(data), &data); queue.WriteBuffer(bufferTex, 0, &data, sizeof(data));
wgpu::SamplerDescriptor samplerDesc = utils::GetDefaultSamplerDescriptor(); wgpu::SamplerDescriptor samplerDesc = utils::GetDefaultSamplerDescriptor();
wgpu::Sampler sampler = device.CreateSampler(&samplerDesc); wgpu::Sampler sampler = device.CreateSampler(&samplerDesc);

View File

@ -22,7 +22,7 @@ namespace {
constexpr unsigned int kNumIterations = 50; constexpr unsigned int kNumIterations = 50;
enum class UploadMethod { enum class UploadMethod {
SetSubData, WriteBuffer,
CreateBufferMapped, CreateBufferMapped,
}; };
@ -52,8 +52,8 @@ namespace {
ostream << static_cast<const AdapterTestParam&>(param); ostream << static_cast<const AdapterTestParam&>(param);
switch (param.uploadMethod) { switch (param.uploadMethod) {
case UploadMethod::SetSubData: case UploadMethod::WriteBuffer:
ostream << "_SetSubData"; ostream << "_WriteBuffer";
break; break;
case UploadMethod::CreateBufferMapped: case UploadMethod::CreateBufferMapped:
ostream << "_CreateBufferMapped"; ostream << "_CreateBufferMapped";
@ -113,11 +113,11 @@ void BufferUploadPerf::SetUp() {
void BufferUploadPerf::Step() { void BufferUploadPerf::Step() {
switch (GetParam().uploadMethod) { switch (GetParam().uploadMethod) {
case UploadMethod::SetSubData: { case UploadMethod::WriteBuffer: {
for (unsigned int i = 0; i < kNumIterations; ++i) { for (unsigned int i = 0; i < kNumIterations; ++i) {
dst.SetSubData(0, data.size(), data.data()); queue.WriteBuffer(dst, 0, data.data(), data.size());
} }
// Make sure all SetSubData's are flushed. // Make sure all WriteBuffer's are flushed.
queue.Submit(0, nullptr); queue.Submit(0, nullptr);
break; break;
} }
@ -150,7 +150,7 @@ TEST_P(BufferUploadPerf, Run) {
DAWN_INSTANTIATE_PERF_TEST_SUITE_P(BufferUploadPerf, DAWN_INSTANTIATE_PERF_TEST_SUITE_P(BufferUploadPerf,
{D3D12Backend(), MetalBackend(), OpenGLBackend(), {D3D12Backend(), MetalBackend(), OpenGLBackend(),
VulkanBackend()}, VulkanBackend()},
{UploadMethod::SetSubData, UploadMethod::CreateBufferMapped}, {UploadMethod::WriteBuffer, UploadMethod::CreateBufferMapped},
{UploadSize::BufferSize_1KB, UploadSize::BufferSize_64KB, {UploadSize::BufferSize_1KB, UploadSize::BufferSize_64KB,
UploadSize::BufferSize_1MB, UploadSize::BufferSize_4MB, UploadSize::BufferSize_1MB, UploadSize::BufferSize_4MB,
UploadSize::BufferSize_16MB}); UploadSize::BufferSize_16MB});

View File

@ -568,18 +568,20 @@ void DrawCallPerf::Step() {
switch (GetParam().bindGroupType) { switch (GetParam().bindGroupType) {
case BindGroup::NoChange: case BindGroup::NoChange:
case BindGroup::Redundant: case BindGroup::Redundant:
mUniformBuffers[0].SetSubData(0, 3 * sizeof(float), mUniformBufferData.data()); queue.WriteBuffer(mUniformBuffers[0], 0, mUniformBufferData.data(),
3 * sizeof(float));
break; break;
case BindGroup::NoReuse: case BindGroup::NoReuse:
case BindGroup::Multiple: case BindGroup::Multiple:
for (uint32_t i = 0; i < kNumDraws; ++i) { for (uint32_t i = 0; i < kNumDraws; ++i) {
mUniformBuffers[i].SetSubData( queue.WriteBuffer(mUniformBuffers[i], 0,
0, 3 * sizeof(float), mUniformBufferData.data() + i * mNumUniformFloats); mUniformBufferData.data() + i * mNumUniformFloats,
3 * sizeof(float));
} }
break; break;
case BindGroup::Dynamic: case BindGroup::Dynamic:
mUniformBuffers[0].SetSubData(0, mUniformBufferData.size() * sizeof(float), queue.WriteBuffer(mUniformBuffers[0], 0, mUniformBufferData.data(),
mUniformBufferData.data()); mUniformBufferData.size() * sizeof(float));
break; break;
} }
} }

View File

@ -74,13 +74,6 @@ class BufferValidationTest : public ValidationTest {
return device.CreateBuffer(&descriptor); return device.CreateBuffer(&descriptor);
} }
wgpu::Buffer CreateSetSubDataBuffer(uint64_t size) {
wgpu::BufferDescriptor descriptor;
descriptor.size = size;
descriptor.usage = wgpu::BufferUsage::CopyDst;
return device.CreateBuffer(&descriptor);
}
wgpu::CreateBufferMappedResult CreateBufferMapped(uint64_t size, wgpu::BufferUsage usage) { wgpu::CreateBufferMappedResult CreateBufferMapped(uint64_t size, wgpu::BufferUsage usage) {
wgpu::BufferDescriptor descriptor; wgpu::BufferDescriptor descriptor;
@ -429,72 +422,6 @@ TEST_F(BufferValidationTest, DestroyInsideMapWriteCallback) {
queue.Submit(0, nullptr); queue.Submit(0, nullptr);
} }
// Test the success case for Buffer::SetSubData
TEST_F(BufferValidationTest, SetSubDataSuccess) {
wgpu::Buffer buf = CreateSetSubDataBuffer(4);
uint32_t foo = 0x01020304;
buf.SetSubData(0, sizeof(foo), &foo);
}
// Test error case for SetSubData out of bounds
TEST_F(BufferValidationTest, SetSubDataOutOfBounds) {
wgpu::Buffer buf = CreateSetSubDataBuffer(1);
uint8_t foo[2] = {0, 0};
ASSERT_DEVICE_ERROR(buf.SetSubData(0, 2, foo));
}
// Test error case for SetSubData out of bounds with an overflow
TEST_F(BufferValidationTest, SetSubDataOutOfBoundsOverflow) {
wgpu::Buffer buf = CreateSetSubDataBuffer(1000);
uint8_t foo[2] = {0, 0};
// An offset that when added to "2" would overflow to be zero and pass validation without
// overflow checks.
uint64_t offset = uint64_t(int64_t(0) - int64_t(2));
ASSERT_DEVICE_ERROR(buf.SetSubData(offset, 2, foo));
}
// Test error case for SetSubData with the wrong usage
TEST_F(BufferValidationTest, SetSubDataWrongUsage) {
wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::Vertex;
wgpu::Buffer buf = device.CreateBuffer(&descriptor);
uint8_t foo = 0;
ASSERT_DEVICE_ERROR(buf.SetSubData(0, sizeof(foo), &foo));
}
// Test SetSubData with unaligned size
TEST_F(BufferValidationTest, SetSubDataWithUnalignedSize) {
wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buf = device.CreateBuffer(&descriptor);
uint8_t value = 123;
ASSERT_DEVICE_ERROR(buf.SetSubData(0, sizeof(value), &value));
}
// Test SetSubData with unaligned offset
TEST_F(BufferValidationTest, SetSubDataWithUnalignedOffset) {
wgpu::BufferDescriptor descriptor;
descriptor.size = 4000;
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buf = device.CreateBuffer(&descriptor);
uint64_t kOffset = 2999;
uint32_t value = 0x01020304;
ASSERT_DEVICE_ERROR(buf.SetSubData(kOffset, sizeof(value), &value));
}
// Test that it is valid to destroy an unmapped buffer // Test that it is valid to destroy an unmapped buffer
TEST_F(BufferValidationTest, DestroyUnmappedBuffer) { TEST_F(BufferValidationTest, DestroyUnmappedBuffer) {
{ {
@ -547,7 +474,7 @@ TEST_F(BufferValidationTest, DestroyMappedBufferCausesImplicitUnmap) {
// Test that it is valid to Destroy a destroyed buffer // Test that it is valid to Destroy a destroyed buffer
TEST_F(BufferValidationTest, DestroyDestroyedBuffer) { TEST_F(BufferValidationTest, DestroyDestroyedBuffer) {
wgpu::Buffer buf = CreateSetSubDataBuffer(4); wgpu::Buffer buf = CreateMapWriteBuffer(4);
buf.Destroy(); buf.Destroy();
buf.Destroy(); buf.Destroy();
} }
@ -580,14 +507,6 @@ TEST_F(BufferValidationTest, MapDestroyedBuffer) {
} }
} }
// Test that it is invalid to call SetSubData on a destroyed buffer
TEST_F(BufferValidationTest, SetSubDataDestroyedBuffer) {
wgpu::Buffer buf = CreateSetSubDataBuffer(4);
buf.Destroy();
uint8_t foo = 0;
ASSERT_DEVICE_ERROR(buf.SetSubData(0, sizeof(foo), &foo));
}
// Test that is is invalid to Map a mapped buffer // Test that is is invalid to Map a mapped buffer
TEST_F(BufferValidationTest, MapMappedBuffer) { TEST_F(BufferValidationTest, MapMappedBuffer) {
{ {
@ -618,24 +537,6 @@ TEST_F(BufferValidationTest, MapCreateBufferMappedBuffer) {
} }
} }
// Test that it is invalid to call SetSubData on a mapped buffer
TEST_F(BufferValidationTest, SetSubDataMappedBuffer) {
{
wgpu::Buffer buf = CreateMapReadBuffer(4);
buf.MapReadAsync(ToMockBufferMapReadCallback, nullptr);
uint8_t foo = 0;
ASSERT_DEVICE_ERROR(buf.SetSubData(0, sizeof(foo), &foo));
queue.Submit(0, nullptr);
}
{
wgpu::Buffer buf = CreateMapWriteBuffer(4);
buf.MapWriteAsync(ToMockBufferMapWriteCallback, nullptr);
uint8_t foo = 0;
ASSERT_DEVICE_ERROR(buf.SetSubData(0, sizeof(foo), &foo));
queue.Submit(0, nullptr);
}
}
// Test that it is valid to submit a buffer in a queue with a map usage if it is unmapped // Test that it is valid to submit a buffer in a queue with a map usage if it is unmapped
TEST_F(BufferValidationTest, SubmitBufferWithMapUsage) { TEST_F(BufferValidationTest, SubmitBufferWithMapUsage) {
wgpu::BufferDescriptor descriptorA; wgpu::BufferDescriptor descriptorA;
@ -732,7 +633,11 @@ TEST_F(BufferValidationTest, SubmitDestroyedBuffer) {
// Test that a map usage is required to call Unmap // Test that a map usage is required to call Unmap
TEST_F(BufferValidationTest, UnmapWithoutMapUsage) { TEST_F(BufferValidationTest, UnmapWithoutMapUsage) {
wgpu::Buffer buf = CreateSetSubDataBuffer(4); wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopyDst;
wgpu::Buffer buf = device.CreateBuffer(&descriptor);
ASSERT_DEVICE_ERROR(buf.Unmap()); ASSERT_DEVICE_ERROR(buf.Unmap());
} }

View File

@ -66,4 +66,125 @@ TEST_F(QueueSubmitValidationTest, SubmitWithMappedBuffer) {
queue.Submit(1, &commands); queue.Submit(1, &commands);
} }
class QueueWriteBufferValidationTest : public ValidationTest {
private:
void SetUp() override {
ValidationTest::SetUp();
queue = device.GetDefaultQueue();
}
protected:
wgpu::Buffer CreateBuffer(uint64_t size) {
wgpu::BufferDescriptor descriptor;
descriptor.size = size;
descriptor.usage = wgpu::BufferUsage::CopyDst;
return device.CreateBuffer(&descriptor);
}
wgpu::Queue queue;
};
// Test the success case for WriteBuffer
TEST_F(QueueWriteBufferValidationTest, Success) {
wgpu::Buffer buf = CreateBuffer(4);
uint32_t foo = 0x01020304;
queue.WriteBuffer(buf, 0, &foo, sizeof(foo));
}
// Test error case for WriteBuffer out of bounds
TEST_F(QueueWriteBufferValidationTest, OutOfBounds) {
wgpu::Buffer buf = CreateBuffer(4);
uint32_t foo[2] = {0, 0};
ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 0, foo, 8));
}
// Test error case for WriteBuffer out of bounds with an overflow
TEST_F(QueueWriteBufferValidationTest, OutOfBoundsOverflow) {
wgpu::Buffer buf = CreateBuffer(1024);
uint32_t foo[2] = {0, 0};
// An offset that when added to "4" would overflow to be zero and pass validation without
// overflow checks.
uint64_t offset = uint64_t(int64_t(0) - int64_t(4));
ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, offset, foo, 4));
}
// Test error case for WriteBuffer with the wrong usage
TEST_F(QueueWriteBufferValidationTest, WrongUsage) {
wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::Vertex;
wgpu::Buffer buf = device.CreateBuffer(&descriptor);
uint32_t foo = 0;
ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 0, &foo, sizeof(foo)));
}
// Test WriteBuffer with unaligned size
TEST_F(QueueWriteBufferValidationTest, UnalignedSize) {
wgpu::Buffer buf = CreateBuffer(4);
uint16_t value = 123;
ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 0, &value, sizeof(value)));
}
// Test WriteBuffer with unaligned offset
TEST_F(QueueWriteBufferValidationTest, UnalignedOffset) {
wgpu::Buffer buf = CreateBuffer(8);
uint32_t value = 0x01020304;
ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 2, &value, sizeof(value)));
}
// Test WriteBuffer with destroyed buffer
TEST_F(QueueWriteBufferValidationTest, DestroyedBuffer) {
wgpu::Buffer buf = CreateBuffer(4);
buf.Destroy();
uint32_t value = 0;
ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 0, &value, sizeof(value)));
}
// Test WriteBuffer with mapped buffer
TEST_F(QueueWriteBufferValidationTest, MappedBuffer) {
// CreateBufferMapped
{
wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopyDst;
wgpu::CreateBufferMappedResult result = device.CreateBufferMapped(&descriptor);
uint32_t value = 0;
ASSERT_DEVICE_ERROR(queue.WriteBuffer(result.buffer, 0, &value, sizeof(value)));
}
// MapReadAsync
{
wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
wgpu::Buffer buf = device.CreateBuffer(&descriptor);
buf.MapReadAsync(nullptr, nullptr);
uint32_t value = 0;
ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 0, &value, sizeof(value)));
}
// MapWriteAsync
{
wgpu::BufferDescriptor descriptor;
descriptor.size = 4;
descriptor.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
wgpu::Buffer buf = device.CreateBuffer(&descriptor);
buf.MapReadAsync(nullptr, nullptr);
uint32_t value = 0;
ASSERT_DEVICE_ERROR(queue.WriteBuffer(buf, 0, &value, sizeof(value)));
}
}
} // anonymous namespace } // anonymous namespace

View File

@ -601,7 +601,7 @@ TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOMultipleSubmits) {
// Re-encode the first bindgroup again. // Re-encode the first bindgroup again.
{ {
std::array<float, 4> greenColor = {0, 1, 0, 1}; std::array<float, 4> greenColor = {0, 1, 0, 1};
firstUniformBuffer.SetSubData(0, sizeof(greenColor), &greenColor); queue.WriteBuffer(firstUniformBuffer, 0, &greenColor, sizeof(greenColor));
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{ {

View File

@ -217,7 +217,7 @@ TEST_P(D3D12ResourceResidencyTests, AsyncMappedBufferRead) {
wgpu::Buffer buffer = CreateBuffer(4, kMapReadBufferUsage); wgpu::Buffer buffer = CreateBuffer(4, kMapReadBufferUsage);
uint32_t data = 12345; uint32_t data = 12345;
buffer.SetSubData(0, sizeof(uint32_t), &data); queue.WriteBuffer(buffer, 0, &data, sizeof(uint32_t));
// The mappable buffer should be resident. // The mappable buffer should be resident.
EXPECT_TRUE(CheckIfBufferIsResident(buffer)); EXPECT_TRUE(CheckIfBufferIsResident(buffer));

View File

@ -161,9 +161,9 @@ namespace utils {
wgpu::BufferDescriptor descriptor; wgpu::BufferDescriptor descriptor;
descriptor.size = size; descriptor.size = size;
descriptor.usage = usage | wgpu::BufferUsage::CopyDst; descriptor.usage = usage | wgpu::BufferUsage::CopyDst;
wgpu::Buffer buffer = device.CreateBuffer(&descriptor); wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
buffer.SetSubData(0, size, data);
device.GetDefaultQueue().WriteBuffer(buffer, 0, data, size);
return buffer; return buffer;
} }