Implement Buffer::MapAsync
MapAsync in dawn_native is fully implemented and only missing a couple cleanups that can be done once MapRead/WriteAsync are removed. MapAsync in dawn_wire is left as a pure shim on top of MapRead/WriteAsync and will be transitioned to its own commands in follow-ups. All MapRead/WriteAsync end2end and validation tests are duplicated for MapAsync. Bug: dawn:445 Change-Id: Ib1430b9257149917be19a84f13e0ddd2a8eccc32 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/24260 Commit-Queue: Corentin Wallez <cwallez@chromium.org> Reviewed-by: Austin Eng <enga@chromium.org> Reviewed-by: Stephen White <senorblanco@chromium.org>
This commit is contained in:
parent
92f501dbfd
commit
0d52f800a1
25
dawn.json
25
dawn.json
|
@ -197,6 +197,16 @@
|
||||||
{"name": "userdata", "type": "void", "annotation": "*"}
|
{"name": "userdata", "type": "void", "annotation": "*"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "map async",
|
||||||
|
"args": [
|
||||||
|
{"name": "flags", "type": "map mode"},
|
||||||
|
{"name": "offset", "type": "size_t"},
|
||||||
|
{"name": "size", "type": "size_t"},
|
||||||
|
{"name": "callback", "type": "buffer map callback"},
|
||||||
|
{"name": "userdata", "type": "void", "annotation": "*"}
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "get mapped range",
|
"name": "get mapped range",
|
||||||
"returns": "void *"
|
"returns": "void *"
|
||||||
|
@ -234,6 +244,13 @@
|
||||||
{"name": "mapped at creation", "type": "bool", "default": "false"}
|
{"name": "mapped at creation", "type": "bool", "default": "false"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"buffer map callback": {
|
||||||
|
"category": "callback",
|
||||||
|
"args": [
|
||||||
|
{"name": "status", "type": "buffer map async status"},
|
||||||
|
{"name": "userdata", "type": "void", "annotation": "*"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"buffer map read callback": {
|
"buffer map read callback": {
|
||||||
"category": "callback",
|
"category": "callback",
|
||||||
"args": [
|
"args": [
|
||||||
|
@ -864,6 +881,14 @@
|
||||||
{"value": 1, "name": "load"}
|
{"value": 1, "name": "load"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"map mode": {
|
||||||
|
"category": "bitmask",
|
||||||
|
"values": [
|
||||||
|
{"value": 0, "name": "none"},
|
||||||
|
{"value": 1, "name": "read"},
|
||||||
|
{"value": 2, "name": "write"}
|
||||||
|
]
|
||||||
|
},
|
||||||
"store op": {
|
"store op": {
|
||||||
"category": "enum",
|
"category": "enum",
|
||||||
"values": [
|
"values": [
|
||||||
|
|
|
@ -102,6 +102,7 @@
|
||||||
"SurfaceDescriptorFromXlib"
|
"SurfaceDescriptorFromXlib"
|
||||||
],
|
],
|
||||||
"client_side_commands": [
|
"client_side_commands": [
|
||||||
|
"BufferMapAsync",
|
||||||
"BufferMapReadAsync",
|
"BufferMapReadAsync",
|
||||||
"BufferMapWriteAsync",
|
"BufferMapWriteAsync",
|
||||||
"BufferSetSubData",
|
"BufferSetSubData",
|
||||||
|
|
|
@ -96,6 +96,19 @@ void ProcTableAsClass::BufferMapWriteAsync(WGPUBuffer self,
|
||||||
OnBufferMapWriteAsyncCallback(self, callback, userdata);
|
OnBufferMapWriteAsyncCallback(self, callback, userdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ProcTableAsClass::BufferMapAsync(WGPUBuffer self,
|
||||||
|
WGPUMapModeFlags mode,
|
||||||
|
size_t offset,
|
||||||
|
size_t size,
|
||||||
|
WGPUBufferMapCallback callback,
|
||||||
|
void* userdata) {
|
||||||
|
auto object = reinterpret_cast<ProcTableAsClass::Object*>(self);
|
||||||
|
object->mapAsyncCallback = callback;
|
||||||
|
object->userdata = userdata;
|
||||||
|
|
||||||
|
OnBufferMapAsyncCallback(self, callback, userdata);
|
||||||
|
}
|
||||||
|
|
||||||
void ProcTableAsClass::FenceOnCompletion(WGPUFence self,
|
void ProcTableAsClass::FenceOnCompletion(WGPUFence self,
|
||||||
uint64_t value,
|
uint64_t value,
|
||||||
WGPUFenceOnCompletionCallback callback,
|
WGPUFenceOnCompletionCallback callback,
|
||||||
|
@ -135,6 +148,11 @@ void ProcTableAsClass::CallMapWriteCallback(WGPUBuffer buffer,
|
||||||
object->mapWriteCallback(status, data, dataLength, object->userdata);
|
object->mapWriteCallback(status, data, dataLength, object->userdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ProcTableAsClass::CallMapAsyncCallback(WGPUBuffer buffer, WGPUBufferMapAsyncStatus status) {
|
||||||
|
auto object = reinterpret_cast<ProcTableAsClass::Object*>(buffer);
|
||||||
|
object->mapAsyncCallback(status, object->userdata);
|
||||||
|
}
|
||||||
|
|
||||||
void ProcTableAsClass::CallFenceOnCompletionCallback(WGPUFence fence,
|
void ProcTableAsClass::CallFenceOnCompletionCallback(WGPUFence fence,
|
||||||
WGPUFenceCompletionStatus status) {
|
WGPUFenceCompletionStatus status) {
|
||||||
auto object = reinterpret_cast<ProcTableAsClass::Object*>(fence);
|
auto object = reinterpret_cast<ProcTableAsClass::Object*>(fence);
|
||||||
|
|
|
@ -65,6 +65,12 @@ class ProcTableAsClass {
|
||||||
void BufferMapWriteAsync(WGPUBuffer self,
|
void BufferMapWriteAsync(WGPUBuffer self,
|
||||||
WGPUBufferMapWriteCallback callback,
|
WGPUBufferMapWriteCallback callback,
|
||||||
void* userdata);
|
void* userdata);
|
||||||
|
void BufferMapAsync(WGPUBuffer self,
|
||||||
|
WGPUMapModeFlags mode,
|
||||||
|
size_t offset,
|
||||||
|
size_t size,
|
||||||
|
WGPUBufferMapCallback callback,
|
||||||
|
void* userdata);
|
||||||
void FenceOnCompletion(WGPUFence self,
|
void FenceOnCompletion(WGPUFence self,
|
||||||
uint64_t value,
|
uint64_t value,
|
||||||
WGPUFenceOnCompletionCallback callback,
|
WGPUFenceOnCompletionCallback callback,
|
||||||
|
@ -86,6 +92,9 @@ class ProcTableAsClass {
|
||||||
virtual void OnBufferMapWriteAsyncCallback(WGPUBuffer buffer,
|
virtual void OnBufferMapWriteAsyncCallback(WGPUBuffer buffer,
|
||||||
WGPUBufferMapWriteCallback callback,
|
WGPUBufferMapWriteCallback callback,
|
||||||
void* userdata) = 0;
|
void* userdata) = 0;
|
||||||
|
virtual void OnBufferMapAsyncCallback(WGPUBuffer buffer,
|
||||||
|
WGPUBufferMapCallback callback,
|
||||||
|
void* userdata) = 0;
|
||||||
virtual void OnFenceOnCompletionCallback(WGPUFence fence,
|
virtual void OnFenceOnCompletionCallback(WGPUFence fence,
|
||||||
uint64_t value,
|
uint64_t value,
|
||||||
WGPUFenceOnCompletionCallback callback,
|
WGPUFenceOnCompletionCallback callback,
|
||||||
|
@ -96,6 +105,7 @@ class ProcTableAsClass {
|
||||||
void CallDeviceLostCallback(WGPUDevice device, const char* message);
|
void CallDeviceLostCallback(WGPUDevice device, const char* message);
|
||||||
void CallMapReadCallback(WGPUBuffer buffer, WGPUBufferMapAsyncStatus status, const void* data, uint64_t dataLength);
|
void CallMapReadCallback(WGPUBuffer buffer, WGPUBufferMapAsyncStatus status, const void* data, uint64_t dataLength);
|
||||||
void CallMapWriteCallback(WGPUBuffer buffer, WGPUBufferMapAsyncStatus status, void* data, uint64_t dataLength);
|
void CallMapWriteCallback(WGPUBuffer buffer, WGPUBufferMapAsyncStatus status, void* data, uint64_t dataLength);
|
||||||
|
void CallMapAsyncCallback(WGPUBuffer buffer, WGPUBufferMapAsyncStatus status);
|
||||||
void CallFenceOnCompletionCallback(WGPUFence fence, WGPUFenceCompletionStatus status);
|
void CallFenceOnCompletionCallback(WGPUFence fence, WGPUFenceCompletionStatus status);
|
||||||
|
|
||||||
struct Object {
|
struct Object {
|
||||||
|
@ -104,6 +114,7 @@ class ProcTableAsClass {
|
||||||
WGPUDeviceLostCallback deviceLostCallback = nullptr;
|
WGPUDeviceLostCallback deviceLostCallback = nullptr;
|
||||||
WGPUBufferMapReadCallback mapReadCallback = nullptr;
|
WGPUBufferMapReadCallback mapReadCallback = nullptr;
|
||||||
WGPUBufferMapWriteCallback mapWriteCallback = nullptr;
|
WGPUBufferMapWriteCallback mapWriteCallback = nullptr;
|
||||||
|
WGPUBufferMapCallback mapAsyncCallback = nullptr;
|
||||||
WGPUFenceOnCompletionCallback fenceOnCompletionCallback = nullptr;
|
WGPUFenceOnCompletionCallback fenceOnCompletionCallback = nullptr;
|
||||||
void* userdata = 0;
|
void* userdata = 0;
|
||||||
};
|
};
|
||||||
|
@ -140,6 +151,10 @@ class MockProcTable : public ProcTableAsClass {
|
||||||
MOCK_METHOD(bool, OnDevicePopErrorScopeCallback, (WGPUDevice device, WGPUErrorCallback callback, void* userdata), (override));
|
MOCK_METHOD(bool, OnDevicePopErrorScopeCallback, (WGPUDevice device, WGPUErrorCallback callback, void* userdata), (override));
|
||||||
MOCK_METHOD(void, OnBufferMapReadAsyncCallback, (WGPUBuffer buffer, WGPUBufferMapReadCallback callback, void* userdata), (override));
|
MOCK_METHOD(void, OnBufferMapReadAsyncCallback, (WGPUBuffer buffer, WGPUBufferMapReadCallback callback, void* userdata), (override));
|
||||||
MOCK_METHOD(void, OnBufferMapWriteAsyncCallback, (WGPUBuffer buffer, WGPUBufferMapWriteCallback callback, void* userdata), (override));
|
MOCK_METHOD(void, OnBufferMapWriteAsyncCallback, (WGPUBuffer buffer, WGPUBufferMapWriteCallback callback, void* userdata), (override));
|
||||||
|
MOCK_METHOD(void,
|
||||||
|
OnBufferMapAsyncCallback,
|
||||||
|
(WGPUBuffer buffer, WGPUBufferMapCallback callback, void* userdata),
|
||||||
|
(override));
|
||||||
MOCK_METHOD(void, OnFenceOnCompletionCallback, (WGPUFence fence, uint64_t value, WGPUFenceOnCompletionCallback callback, void* userdata), (override));
|
MOCK_METHOD(void, OnFenceOnCompletionCallback, (WGPUFence fence, uint64_t value, WGPUFenceOnCompletionCallback callback, void* userdata), (override));
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -72,6 +72,10 @@ namespace dawn_native {
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
|
||||||
|
UNREACHABLE();
|
||||||
|
return {};
|
||||||
|
}
|
||||||
void* GetMappedPointerImpl() override {
|
void* GetMappedPointerImpl() override {
|
||||||
return mFakeMappedData.get();
|
return mFakeMappedData.get();
|
||||||
}
|
}
|
||||||
|
@ -245,6 +249,22 @@ namespace dawn_native {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void BufferBase::CallMapCallback(uint32_t serial, WGPUBufferMapAsyncStatus status) {
|
||||||
|
ASSERT(!IsError());
|
||||||
|
if (mMapCallback != nullptr && serial == mMapSerial) {
|
||||||
|
// Tag the callback as fired before firing it, otherwise it could fire a second time if
|
||||||
|
// for example buffer.Unmap() is called inside the application-provided callback.
|
||||||
|
WGPUBufferMapCallback callback = mMapCallback;
|
||||||
|
mMapCallback = nullptr;
|
||||||
|
|
||||||
|
if (GetDevice()->IsLost()) {
|
||||||
|
callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
|
||||||
|
} else {
|
||||||
|
callback(status, mMapUserdata);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void BufferBase::SetSubData(uint64_t start, uint64_t count, const void* data) {
|
void BufferBase::SetSubData(uint64_t start, uint64_t count, const void* data) {
|
||||||
if (count > uint64_t(std::numeric_limits<size_t>::max())) {
|
if (count > uint64_t(std::numeric_limits<size_t>::max())) {
|
||||||
GetDevice()->HandleError(InternalErrorType::Validation, "count too big");
|
GetDevice()->HandleError(InternalErrorType::Validation, "count too big");
|
||||||
|
@ -252,11 +272,14 @@ namespace dawn_native {
|
||||||
|
|
||||||
Ref<QueueBase> queue = AcquireRef(GetDevice()->GetDefaultQueue());
|
Ref<QueueBase> queue = AcquireRef(GetDevice()->GetDefaultQueue());
|
||||||
GetDevice()->EmitDeprecationWarning(
|
GetDevice()->EmitDeprecationWarning(
|
||||||
"Buffer::SetSubData is deprecated, use Queue::WriteBuffer instead");
|
"Buffer::SetSubData is deprecate. Use Queue::WriteBuffer instead");
|
||||||
queue->WriteBuffer(this, start, data, static_cast<size_t>(count));
|
queue->WriteBuffer(this, start, data, static_cast<size_t>(count));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BufferBase::MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata) {
|
void BufferBase::MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata) {
|
||||||
|
GetDevice()->EmitDeprecationWarning(
|
||||||
|
"Buffer::MapReadAsync is deprecated. Use Buffer::MapAsync instead");
|
||||||
|
|
||||||
WGPUBufferMapAsyncStatus status;
|
WGPUBufferMapAsyncStatus status;
|
||||||
if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapRead, &status))) {
|
if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapRead, &status))) {
|
||||||
callback(status, nullptr, 0, userdata);
|
callback(status, nullptr, 0, userdata);
|
||||||
|
@ -270,6 +293,7 @@ namespace dawn_native {
|
||||||
mMapSerial++;
|
mMapSerial++;
|
||||||
mMapReadCallback = callback;
|
mMapReadCallback = callback;
|
||||||
mMapUserdata = userdata;
|
mMapUserdata = userdata;
|
||||||
|
mMapOffset = 0;
|
||||||
mState = BufferState::Mapped;
|
mState = BufferState::Mapped;
|
||||||
|
|
||||||
if (GetDevice()->ConsumedError(MapReadAsyncImpl())) {
|
if (GetDevice()->ConsumedError(MapReadAsyncImpl())) {
|
||||||
|
@ -278,10 +302,13 @@ namespace dawn_native {
|
||||||
}
|
}
|
||||||
|
|
||||||
MapRequestTracker* tracker = GetDevice()->GetMapRequestTracker();
|
MapRequestTracker* tracker = GetDevice()->GetMapRequestTracker();
|
||||||
tracker->Track(this, mMapSerial, false);
|
tracker->Track(this, mMapSerial, MapType::Read);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BufferBase::MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata) {
|
void BufferBase::MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata) {
|
||||||
|
GetDevice()->EmitDeprecationWarning(
|
||||||
|
"Buffer::MapReadAsync is deprecated. Use Buffer::MapAsync instead");
|
||||||
|
|
||||||
WGPUBufferMapAsyncStatus status;
|
WGPUBufferMapAsyncStatus status;
|
||||||
if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapWrite, &status))) {
|
if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapWrite, &status))) {
|
||||||
callback(status, nullptr, 0, userdata);
|
callback(status, nullptr, 0, userdata);
|
||||||
|
@ -295,6 +322,7 @@ namespace dawn_native {
|
||||||
mMapSerial++;
|
mMapSerial++;
|
||||||
mMapWriteCallback = callback;
|
mMapWriteCallback = callback;
|
||||||
mMapUserdata = userdata;
|
mMapUserdata = userdata;
|
||||||
|
mMapOffset = 0;
|
||||||
mState = BufferState::Mapped;
|
mState = BufferState::Mapped;
|
||||||
|
|
||||||
if (GetDevice()->ConsumedError(MapWriteAsyncImpl())) {
|
if (GetDevice()->ConsumedError(MapWriteAsyncImpl())) {
|
||||||
|
@ -303,7 +331,45 @@ namespace dawn_native {
|
||||||
}
|
}
|
||||||
|
|
||||||
MapRequestTracker* tracker = GetDevice()->GetMapRequestTracker();
|
MapRequestTracker* tracker = GetDevice()->GetMapRequestTracker();
|
||||||
tracker->Track(this, mMapSerial, true);
|
tracker->Track(this, mMapSerial, MapType::Write);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BufferBase::MapAsync(wgpu::MapMode mode,
|
||||||
|
size_t offset,
|
||||||
|
size_t size,
|
||||||
|
WGPUBufferMapCallback callback,
|
||||||
|
void* userdata) {
|
||||||
|
// Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
|
||||||
|
// possible to default the function argument (because there is the callback later in the
|
||||||
|
// argument list)
|
||||||
|
if (size == 0 && offset < mSize) {
|
||||||
|
size = mSize - offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
WGPUBufferMapAsyncStatus status;
|
||||||
|
if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status))) {
|
||||||
|
if (callback) {
|
||||||
|
callback(status, userdata);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ASSERT(!IsError());
|
||||||
|
|
||||||
|
// TODO(cwallez@chromium.org): what to do on wraparound? Could cause crashes.
|
||||||
|
mMapSerial++;
|
||||||
|
mMapMode = mode;
|
||||||
|
mMapOffset = offset;
|
||||||
|
mMapCallback = callback;
|
||||||
|
mMapUserdata = userdata;
|
||||||
|
mState = BufferState::Mapped;
|
||||||
|
|
||||||
|
if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
|
||||||
|
CallMapCallback(mMapSerial, WGPUBufferMapAsyncStatus_DeviceLost);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
MapRequestTracker* tracker = GetDevice()->GetMapRequestTracker();
|
||||||
|
tracker->Track(this, mMapSerial, MapType::Async);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* BufferBase::GetMappedRange() {
|
void* BufferBase::GetMappedRange() {
|
||||||
|
@ -322,12 +388,12 @@ namespace dawn_native {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mStagingBuffer != nullptr) {
|
if (mStagingBuffer != nullptr) {
|
||||||
return mStagingBuffer->GetMappedPointer();
|
return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + mMapOffset;
|
||||||
}
|
}
|
||||||
if (mSize == 0) {
|
if (mSize == 0) {
|
||||||
return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
|
return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
|
||||||
}
|
}
|
||||||
return GetMappedPointerImpl();
|
return static_cast<uint8_t*>(GetMappedPointerImpl()) + mMapOffset;
|
||||||
}
|
}
|
||||||
|
|
||||||
void BufferBase::Destroy() {
|
void BufferBase::Destroy() {
|
||||||
|
@ -389,6 +455,7 @@ namespace dawn_native {
|
||||||
// CreateBufferMapped.
|
// CreateBufferMapped.
|
||||||
CallMapReadCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
|
CallMapReadCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
|
||||||
CallMapWriteCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
|
CallMapWriteCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
|
||||||
|
CallMapCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown);
|
||||||
UnmapImpl();
|
UnmapImpl();
|
||||||
|
|
||||||
mMapReadCallback = nullptr;
|
mMapReadCallback = nullptr;
|
||||||
|
@ -418,7 +485,7 @@ namespace dawn_native {
|
||||||
switch (mState) {
|
switch (mState) {
|
||||||
case BufferState::Mapped:
|
case BufferState::Mapped:
|
||||||
case BufferState::MappedAtCreation:
|
case BufferState::MappedAtCreation:
|
||||||
return DAWN_VALIDATION_ERROR("Buffer already mapped");
|
return DAWN_VALIDATION_ERROR("Buffer is already mapped");
|
||||||
case BufferState::Destroyed:
|
case BufferState::Destroyed:
|
||||||
return DAWN_VALIDATION_ERROR("Buffer is destroyed");
|
return DAWN_VALIDATION_ERROR("Buffer is destroyed");
|
||||||
case BufferState::Unmapped:
|
case BufferState::Unmapped:
|
||||||
|
@ -433,6 +500,60 @@ namespace dawn_native {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
|
||||||
|
size_t offset,
|
||||||
|
size_t size,
|
||||||
|
WGPUBufferMapAsyncStatus* status) const {
|
||||||
|
*status = WGPUBufferMapAsyncStatus_DeviceLost;
|
||||||
|
DAWN_TRY(GetDevice()->ValidateIsAlive());
|
||||||
|
|
||||||
|
*status = WGPUBufferMapAsyncStatus_Error;
|
||||||
|
DAWN_TRY(GetDevice()->ValidateObject(this));
|
||||||
|
|
||||||
|
if (offset % 4 != 0) {
|
||||||
|
return DAWN_VALIDATION_ERROR("offset must be a multiple of 4");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (size % 4 != 0) {
|
||||||
|
return DAWN_VALIDATION_ERROR("size must be a multiple of 4");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (uint64_t(offset) > mSize || uint64_t(size) > mSize - uint64_t(offset)) {
|
||||||
|
return DAWN_VALIDATION_ERROR("size + offset must fit in the buffer");
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (mState) {
|
||||||
|
case BufferState::Mapped:
|
||||||
|
case BufferState::MappedAtCreation:
|
||||||
|
return DAWN_VALIDATION_ERROR("Buffer is already mapped");
|
||||||
|
case BufferState::Destroyed:
|
||||||
|
return DAWN_VALIDATION_ERROR("Buffer is destroyed");
|
||||||
|
case BufferState::Unmapped:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isReadMode = mode & wgpu::MapMode::Read;
|
||||||
|
bool isWriteMode = mode & wgpu::MapMode::Write;
|
||||||
|
if (!(isReadMode ^ isWriteMode)) {
|
||||||
|
return DAWN_VALIDATION_ERROR("Exactly one of Read or Write mode must be set");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mode & wgpu::MapMode::Read) {
|
||||||
|
if (!(mUsage & wgpu::BufferUsage::MapRead)) {
|
||||||
|
return DAWN_VALIDATION_ERROR("The buffer must have the MapRead usage");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ASSERT(mode & wgpu::MapMode::Write);
|
||||||
|
|
||||||
|
if (!(mUsage & wgpu::BufferUsage::MapWrite)) {
|
||||||
|
return DAWN_VALIDATION_ERROR("The buffer must have the MapWrite usage");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*status = WGPUBufferMapAsyncStatus_Success;
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
bool BufferBase::CanGetMappedRange(bool writable) const {
|
bool BufferBase::CanGetMappedRange(bool writable) const {
|
||||||
// Note that:
|
// Note that:
|
||||||
//
|
//
|
||||||
|
@ -448,6 +569,8 @@ namespace dawn_native {
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
case BufferState::Mapped:
|
case BufferState::Mapped:
|
||||||
|
// TODO(dawn:445): When mapRead/WriteAsync is removed, check against mMapMode
|
||||||
|
// instead of mUsage
|
||||||
ASSERT(bool(mUsage & wgpu::BufferUsage::MapRead) ^
|
ASSERT(bool(mUsage & wgpu::BufferUsage::MapRead) ^
|
||||||
bool(mUsage & wgpu::BufferUsage::MapWrite));
|
bool(mUsage & wgpu::BufferUsage::MapWrite));
|
||||||
return !writable || (mUsage & wgpu::BufferUsage::MapWrite);
|
return !writable || (mUsage & wgpu::BufferUsage::MapWrite);
|
||||||
|
@ -495,16 +618,19 @@ namespace dawn_native {
|
||||||
mState = BufferState::Destroyed;
|
mState = BufferState::Destroyed;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BufferBase::IsMapped() const {
|
void BufferBase::OnMapCommandSerialFinished(uint32_t mapSerial, MapType type) {
|
||||||
return mState == BufferState::Mapped;
|
switch (type) {
|
||||||
}
|
case MapType::Read:
|
||||||
|
CallMapReadCallback(mapSerial, WGPUBufferMapAsyncStatus_Success,
|
||||||
void BufferBase::OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite) {
|
GetMappedRangeInternal(false), GetSize());
|
||||||
void* data = GetMappedRangeInternal(isWrite);
|
break;
|
||||||
if (isWrite) {
|
case MapType::Write:
|
||||||
CallMapWriteCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
|
CallMapWriteCallback(mapSerial, WGPUBufferMapAsyncStatus_Success,
|
||||||
} else {
|
GetMappedRangeInternal(true), GetSize());
|
||||||
CallMapReadCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
|
break;
|
||||||
|
case MapType::Async:
|
||||||
|
CallMapCallback(mapSerial, WGPUBufferMapAsyncStatus_Success);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,6 +25,8 @@
|
||||||
|
|
||||||
namespace dawn_native {
|
namespace dawn_native {
|
||||||
|
|
||||||
|
enum class MapType : uint32_t;
|
||||||
|
|
||||||
MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
|
MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
|
||||||
|
|
||||||
static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
|
static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
|
||||||
|
@ -48,7 +50,7 @@ namespace dawn_native {
|
||||||
wgpu::BufferUsage GetUsage() const;
|
wgpu::BufferUsage GetUsage() const;
|
||||||
|
|
||||||
MaybeError MapAtCreation();
|
MaybeError MapAtCreation();
|
||||||
void OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite);
|
void OnMapCommandSerialFinished(uint32_t mapSerial, MapType type);
|
||||||
|
|
||||||
MaybeError ValidateCanUseOnQueueNow() const;
|
MaybeError ValidateCanUseOnQueueNow() const;
|
||||||
|
|
||||||
|
@ -60,6 +62,11 @@ namespace dawn_native {
|
||||||
void SetSubData(uint64_t start, uint64_t count, const void* data);
|
void SetSubData(uint64_t start, uint64_t count, const void* data);
|
||||||
void MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata);
|
void MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata);
|
||||||
void MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata);
|
void MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata);
|
||||||
|
void MapAsync(wgpu::MapMode mode,
|
||||||
|
size_t offset,
|
||||||
|
size_t size,
|
||||||
|
WGPUBufferMapCallback callback,
|
||||||
|
void* userdata);
|
||||||
void* GetMappedRange();
|
void* GetMappedRange();
|
||||||
const void* GetConstMappedRange();
|
const void* GetConstMappedRange();
|
||||||
void Unmap();
|
void Unmap();
|
||||||
|
@ -79,6 +86,7 @@ namespace dawn_native {
|
||||||
virtual MaybeError MapAtCreationImpl() = 0;
|
virtual MaybeError MapAtCreationImpl() = 0;
|
||||||
virtual MaybeError MapReadAsyncImpl() = 0;
|
virtual MaybeError MapReadAsyncImpl() = 0;
|
||||||
virtual MaybeError MapWriteAsyncImpl() = 0;
|
virtual MaybeError MapWriteAsyncImpl() = 0;
|
||||||
|
virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
|
||||||
virtual void UnmapImpl() = 0;
|
virtual void UnmapImpl() = 0;
|
||||||
virtual void DestroyImpl() = 0;
|
virtual void DestroyImpl() = 0;
|
||||||
virtual void* GetMappedPointerImpl() = 0;
|
virtual void* GetMappedPointerImpl() = 0;
|
||||||
|
@ -94,26 +102,32 @@ namespace dawn_native {
|
||||||
WGPUBufferMapAsyncStatus status,
|
WGPUBufferMapAsyncStatus status,
|
||||||
void* pointer,
|
void* pointer,
|
||||||
uint64_t dataLength);
|
uint64_t dataLength);
|
||||||
|
void CallMapCallback(uint32_t serial, WGPUBufferMapAsyncStatus status);
|
||||||
|
|
||||||
MaybeError ValidateMap(wgpu::BufferUsage requiredUsage,
|
MaybeError ValidateMap(wgpu::BufferUsage requiredUsage,
|
||||||
WGPUBufferMapAsyncStatus* status) const;
|
WGPUBufferMapAsyncStatus* status) const;
|
||||||
|
MaybeError ValidateMapAsync(wgpu::MapMode mode,
|
||||||
|
size_t offset,
|
||||||
|
size_t size,
|
||||||
|
WGPUBufferMapAsyncStatus* status) const;
|
||||||
MaybeError ValidateUnmap() const;
|
MaybeError ValidateUnmap() const;
|
||||||
MaybeError ValidateDestroy() const;
|
MaybeError ValidateDestroy() const;
|
||||||
bool CanGetMappedRange(bool writable) const;
|
bool CanGetMappedRange(bool writable) const;
|
||||||
|
|
||||||
uint64_t mSize = 0;
|
uint64_t mSize = 0;
|
||||||
wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
|
wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
|
||||||
|
BufferState mState;
|
||||||
WGPUBufferMapReadCallback mMapReadCallback = nullptr;
|
bool mIsDataInitialized = false;
|
||||||
WGPUBufferMapWriteCallback mMapWriteCallback = nullptr;
|
|
||||||
void* mMapUserdata = 0;
|
|
||||||
uint32_t mMapSerial = 0;
|
|
||||||
|
|
||||||
std::unique_ptr<StagingBufferBase> mStagingBuffer;
|
std::unique_ptr<StagingBufferBase> mStagingBuffer;
|
||||||
|
|
||||||
BufferState mState;
|
WGPUBufferMapReadCallback mMapReadCallback = nullptr;
|
||||||
|
WGPUBufferMapWriteCallback mMapWriteCallback = nullptr;
|
||||||
bool mIsDataInitialized = false;
|
WGPUBufferMapCallback mMapCallback = nullptr;
|
||||||
|
void* mMapUserdata = 0;
|
||||||
|
uint32_t mMapSerial = 0;
|
||||||
|
wgpu::MapMode mMapMode = wgpu::MapMode::None;
|
||||||
|
size_t mMapOffset = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace dawn_native
|
} // namespace dawn_native
|
||||||
|
|
|
@ -27,11 +27,11 @@ namespace dawn_native {
|
||||||
ASSERT(mInflightRequests.Empty());
|
ASSERT(mInflightRequests.Empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
void MapRequestTracker::Track(BufferBase* buffer, uint32_t mapSerial, bool isWrite) {
|
void MapRequestTracker::Track(BufferBase* buffer, uint32_t mapSerial, MapType type) {
|
||||||
Request request;
|
Request request;
|
||||||
request.buffer = buffer;
|
request.buffer = buffer;
|
||||||
request.mapSerial = mapSerial;
|
request.mapSerial = mapSerial;
|
||||||
request.isWrite = isWrite;
|
request.type = type;
|
||||||
|
|
||||||
mInflightRequests.Enqueue(std::move(request), mDevice->GetPendingCommandSerial());
|
mInflightRequests.Enqueue(std::move(request), mDevice->GetPendingCommandSerial());
|
||||||
mDevice->AddFutureCallbackSerial(mDevice->GetPendingCommandSerial());
|
mDevice->AddFutureCallbackSerial(mDevice->GetPendingCommandSerial());
|
||||||
|
@ -39,7 +39,7 @@ namespace dawn_native {
|
||||||
|
|
||||||
void MapRequestTracker::Tick(Serial finishedSerial) {
|
void MapRequestTracker::Tick(Serial finishedSerial) {
|
||||||
for (auto& request : mInflightRequests.IterateUpTo(finishedSerial)) {
|
for (auto& request : mInflightRequests.IterateUpTo(finishedSerial)) {
|
||||||
request.buffer->OnMapCommandSerialFinished(request.mapSerial, request.isWrite);
|
request.buffer->OnMapCommandSerialFinished(request.mapSerial, request.type);
|
||||||
}
|
}
|
||||||
mInflightRequests.ClearUpTo(finishedSerial);
|
mInflightRequests.ClearUpTo(finishedSerial);
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,12 +20,15 @@
|
||||||
|
|
||||||
namespace dawn_native {
|
namespace dawn_native {
|
||||||
|
|
||||||
|
// TODO(dawn:22) remove this enum once MapReadAsync/MapWriteAsync are removed.
|
||||||
|
enum class MapType : uint32_t { Read, Write, Async };
|
||||||
|
|
||||||
class MapRequestTracker {
|
class MapRequestTracker {
|
||||||
public:
|
public:
|
||||||
MapRequestTracker(DeviceBase* device);
|
MapRequestTracker(DeviceBase* device);
|
||||||
~MapRequestTracker();
|
~MapRequestTracker();
|
||||||
|
|
||||||
void Track(BufferBase* buffer, uint32_t mapSerial, bool isWrite);
|
void Track(BufferBase* buffer, uint32_t mapSerial, MapType type);
|
||||||
void Tick(Serial finishedSerial);
|
void Tick(Serial finishedSerial);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -34,7 +37,7 @@ namespace dawn_native {
|
||||||
struct Request {
|
struct Request {
|
||||||
Ref<BufferBase> buffer;
|
Ref<BufferBase> buffer;
|
||||||
uint32_t mapSerial;
|
uint32_t mapSerial;
|
||||||
bool isWrite;
|
MapType type;
|
||||||
};
|
};
|
||||||
SerialQueue<Request> mInflightRequests;
|
SerialQueue<Request> mInflightRequests;
|
||||||
};
|
};
|
||||||
|
|
|
@ -248,13 +248,23 @@ namespace dawn_native { namespace d3d12 {
|
||||||
return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
|
return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeError Buffer::MapInternal(bool isWrite, const char* contextInfo) {
|
MaybeError Buffer::MapInternal(bool isWrite,
|
||||||
|
size_t offset,
|
||||||
|
size_t size,
|
||||||
|
const char* contextInfo) {
|
||||||
// The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
|
// The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
|
||||||
// evicted. This buffer should already have been made resident when it was created.
|
// evicted. This buffer should already have been made resident when it was created.
|
||||||
Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
|
Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
|
||||||
DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockAllocation(heap));
|
DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockAllocation(heap));
|
||||||
|
|
||||||
D3D12_RANGE range = {0, size_t(GetSize())};
|
D3D12_RANGE range = {offset, offset + size};
|
||||||
|
// mMappedData is the pointer to the start of the resource, irrespective of offset.
|
||||||
|
// MSDN says (note the weird use of "never"):
|
||||||
|
//
|
||||||
|
// When ppData is not NULL, the pointer returned is never offset by any values in
|
||||||
|
// pReadRange.
|
||||||
|
//
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12resource-map
|
||||||
DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &range, &mMappedData), contextInfo));
|
DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &range, &mMappedData), contextInfo));
|
||||||
|
|
||||||
if (isWrite) {
|
if (isWrite) {
|
||||||
|
@ -267,16 +277,20 @@ namespace dawn_native { namespace d3d12 {
|
||||||
MaybeError Buffer::MapAtCreationImpl() {
|
MaybeError Buffer::MapAtCreationImpl() {
|
||||||
// Setting isMapWrite to false on MapRead buffers to silence D3D12 debug layer warning.
|
// Setting isMapWrite to false on MapRead buffers to silence D3D12 debug layer warning.
|
||||||
bool isMapWrite = (GetUsage() & wgpu::BufferUsage::MapWrite) != 0;
|
bool isMapWrite = (GetUsage() & wgpu::BufferUsage::MapWrite) != 0;
|
||||||
DAWN_TRY(MapInternal(isMapWrite, "D3D12 map at creation"));
|
DAWN_TRY(MapInternal(isMapWrite, 0, size_t(GetSize()), "D3D12 map at creation"));
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeError Buffer::MapReadAsyncImpl() {
|
MaybeError Buffer::MapReadAsyncImpl() {
|
||||||
return MapInternal(false, "D3D12 map read async");
|
return MapInternal(false, 0, size_t(GetSize()), "D3D12 map read async");
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeError Buffer::MapWriteAsyncImpl() {
|
MaybeError Buffer::MapWriteAsyncImpl() {
|
||||||
return MapInternal(true, "D3D12 map write async");
|
return MapInternal(true, 0, size_t(GetSize()), "D3D12 map write async");
|
||||||
|
}
|
||||||
|
|
||||||
|
MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
|
||||||
|
return MapInternal(mode & wgpu::MapMode::Write, offset, size, "D3D12 map async");
|
||||||
}
|
}
|
||||||
|
|
||||||
void Buffer::UnmapImpl() {
|
void Buffer::UnmapImpl() {
|
||||||
|
@ -291,6 +305,8 @@ namespace dawn_native { namespace d3d12 {
|
||||||
}
|
}
|
||||||
|
|
||||||
void* Buffer::GetMappedPointerImpl() {
|
void* Buffer::GetMappedPointerImpl() {
|
||||||
|
// The frontend asks that the pointer returned is from the start of the resource
|
||||||
|
// irrespective of the offset passed in MapAsyncImpl, which is what mMappedData is.
|
||||||
return mMappedData;
|
return mMappedData;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,7 +382,7 @@ namespace dawn_native { namespace d3d12 {
|
||||||
// The state of the buffers on UPLOAD heap must always be GENERIC_READ and cannot be
|
// The state of the buffers on UPLOAD heap must always be GENERIC_READ and cannot be
|
||||||
// changed away, so we can only clear such buffer with buffer mapping.
|
// changed away, so we can only clear such buffer with buffer mapping.
|
||||||
if (D3D12HeapType(GetUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
|
if (D3D12HeapType(GetUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
|
||||||
DAWN_TRY(MapInternal(true, "D3D12 map at clear buffer"));
|
DAWN_TRY(MapInternal(true, 0, size_t(GetSize()), "D3D12 map at clear buffer"));
|
||||||
memset(mMappedData, clearValue, GetSize());
|
memset(mMappedData, clearValue, GetSize());
|
||||||
UnmapImpl();
|
UnmapImpl();
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -54,13 +54,14 @@ namespace dawn_native { namespace d3d12 {
|
||||||
// Dawn API
|
// Dawn API
|
||||||
MaybeError MapReadAsyncImpl() override;
|
MaybeError MapReadAsyncImpl() override;
|
||||||
MaybeError MapWriteAsyncImpl() override;
|
MaybeError MapWriteAsyncImpl() override;
|
||||||
|
MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
|
||||||
void UnmapImpl() override;
|
void UnmapImpl() override;
|
||||||
void DestroyImpl() override;
|
void DestroyImpl() override;
|
||||||
|
|
||||||
bool IsMappableAtCreation() const override;
|
bool IsMappableAtCreation() const override;
|
||||||
virtual MaybeError MapAtCreationImpl() override;
|
virtual MaybeError MapAtCreationImpl() override;
|
||||||
void* GetMappedPointerImpl() override;
|
void* GetMappedPointerImpl() override;
|
||||||
MaybeError MapInternal(bool isWrite, const char* contextInfo);
|
MaybeError MapInternal(bool isWrite, size_t start, size_t end, const char* contextInfo);
|
||||||
|
|
||||||
bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
|
bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
|
||||||
D3D12_RESOURCE_BARRIER* barrier,
|
D3D12_RESOURCE_BARRIER* barrier,
|
||||||
|
|
|
@ -43,6 +43,7 @@ namespace dawn_native { namespace metal {
|
||||||
// Dawn API
|
// Dawn API
|
||||||
MaybeError MapReadAsyncImpl() override;
|
MaybeError MapReadAsyncImpl() override;
|
||||||
MaybeError MapWriteAsyncImpl() override;
|
MaybeError MapWriteAsyncImpl() override;
|
||||||
|
MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
|
||||||
void UnmapImpl() override;
|
void UnmapImpl() override;
|
||||||
void DestroyImpl() override;
|
void DestroyImpl() override;
|
||||||
void* GetMappedPointerImpl() override;
|
void* GetMappedPointerImpl() override;
|
||||||
|
|
|
@ -121,6 +121,10 @@ namespace dawn_native { namespace metal {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
void* Buffer::GetMappedPointerImpl() {
|
void* Buffer::GetMappedPointerImpl() {
|
||||||
return [mMtlBuffer contents];
|
return [mMtlBuffer contents];
|
||||||
}
|
}
|
||||||
|
|
|
@ -312,6 +312,10 @@ namespace dawn_native { namespace null {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
void* Buffer::GetMappedPointerImpl() {
|
void* Buffer::GetMappedPointerImpl() {
|
||||||
return mBackingData.get();
|
return mBackingData.get();
|
||||||
}
|
}
|
||||||
|
|
|
@ -201,6 +201,7 @@ namespace dawn_native { namespace null {
|
||||||
// Dawn API
|
// Dawn API
|
||||||
MaybeError MapReadAsyncImpl() override;
|
MaybeError MapReadAsyncImpl() override;
|
||||||
MaybeError MapWriteAsyncImpl() override;
|
MaybeError MapWriteAsyncImpl() override;
|
||||||
|
MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
|
||||||
void UnmapImpl() override;
|
void UnmapImpl() override;
|
||||||
void DestroyImpl() override;
|
void DestroyImpl() override;
|
||||||
|
|
||||||
|
|
|
@ -125,7 +125,37 @@ namespace dawn_native { namespace opengl {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
|
||||||
|
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||||
|
|
||||||
|
// It is an error to map an empty range in OpenGL. We always have at least a 4-byte buffer
|
||||||
|
// so we extend the range to be 4 bytes.
|
||||||
|
if (size == 0) {
|
||||||
|
if (offset != 0) {
|
||||||
|
offset -= 4;
|
||||||
|
}
|
||||||
|
size = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(cwallez@chromium.org): this does GPU->CPU synchronization, we could require a high
|
||||||
|
// version of OpenGL that would let us map the buffer unsynchronized.
|
||||||
|
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
|
||||||
|
void* mappedData = nullptr;
|
||||||
|
if (mode & wgpu::MapMode::Read) {
|
||||||
|
mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_READ_BIT);
|
||||||
|
} else {
|
||||||
|
ASSERT(mode & wgpu::MapMode::Write);
|
||||||
|
mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_WRITE_BIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The frontend asks that the pointer returned by GetMappedPointerImpl is from the start of
|
||||||
|
// the resource but OpenGL gives us the pointer at offset. Remove the offset.
|
||||||
|
mMappedData = static_cast<uint8_t*>(mappedData) - offset;
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
void* Buffer::GetMappedPointerImpl() {
|
void* Buffer::GetMappedPointerImpl() {
|
||||||
|
// The mapping offset has already been removed.
|
||||||
return mMappedData;
|
return mMappedData;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,7 @@ namespace dawn_native { namespace opengl {
|
||||||
// Dawn API
|
// Dawn API
|
||||||
MaybeError MapReadAsyncImpl() override;
|
MaybeError MapReadAsyncImpl() override;
|
||||||
MaybeError MapWriteAsyncImpl() override;
|
MaybeError MapWriteAsyncImpl() override;
|
||||||
|
MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
|
||||||
void UnmapImpl() override;
|
void UnmapImpl() override;
|
||||||
void DestroyImpl() override;
|
void DestroyImpl() override;
|
||||||
|
|
||||||
|
|
|
@ -259,6 +259,19 @@ namespace dawn_native { namespace vulkan {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
|
||||||
|
Device* device = ToBackend(GetDevice());
|
||||||
|
|
||||||
|
CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
|
||||||
|
if (mode & wgpu::MapMode::Read) {
|
||||||
|
TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapRead);
|
||||||
|
} else {
|
||||||
|
ASSERT(mode & wgpu::MapMode::Write);
|
||||||
|
TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapWrite);
|
||||||
|
}
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
void Buffer::UnmapImpl() {
|
void Buffer::UnmapImpl() {
|
||||||
// No need to do anything, we keep CPU-visible memory mapped at all time.
|
// No need to do anything, we keep CPU-visible memory mapped at all time.
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,6 +58,7 @@ namespace dawn_native { namespace vulkan {
|
||||||
// Dawn API
|
// Dawn API
|
||||||
MaybeError MapReadAsyncImpl() override;
|
MaybeError MapReadAsyncImpl() override;
|
||||||
MaybeError MapWriteAsyncImpl() override;
|
MaybeError MapWriteAsyncImpl() override;
|
||||||
|
MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
|
||||||
void UnmapImpl() override;
|
void UnmapImpl() override;
|
||||||
void DestroyImpl() override;
|
void DestroyImpl() override;
|
||||||
|
|
||||||
|
|
|
@ -239,6 +239,72 @@ namespace dawn_wire { namespace client {
|
||||||
SerializeBufferMapAsync(this, serial, writeHandle);
|
SerializeBufferMapAsync(this, serial, writeHandle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Buffer::MapAsync(WGPUMapModeFlags mode,
|
||||||
|
size_t offset,
|
||||||
|
size_t size,
|
||||||
|
WGPUBufferMapCallback callback,
|
||||||
|
void* userdata) {
|
||||||
|
// Do early validation for mode because it needs to be correct for the proxying to
|
||||||
|
// MapReadAsync or MapWriteAsync to work.
|
||||||
|
bool isReadMode = mode & WGPUMapMode_Read;
|
||||||
|
bool isWriteMode = mode & WGPUMapMode_Write;
|
||||||
|
bool modeOk = isReadMode ^ isWriteMode;
|
||||||
|
// Do early validation of offset and size because it isn't checked by MapReadAsync /
|
||||||
|
// MapWriteAsync.
|
||||||
|
bool offsetOk = (uint64_t(offset) <= mSize) && offset % 4 == 0;
|
||||||
|
bool sizeOk = (uint64_t(size) <= mSize - uint64_t(offset)) && size % 4 == 0;
|
||||||
|
|
||||||
|
if (!(modeOk && offsetOk && sizeOk)) {
|
||||||
|
device->InjectError(WGPUErrorType_Validation, "MapAsync error (you figure out :P)");
|
||||||
|
if (callback != nullptr) {
|
||||||
|
callback(WGPUBufferMapAsyncStatus_Error, userdata);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The structure to keep arguments so we can forward the MapReadAsync and MapWriteAsync to
|
||||||
|
// `callback`
|
||||||
|
struct ProxyData {
|
||||||
|
WGPUBufferMapCallback callback;
|
||||||
|
void* userdata;
|
||||||
|
size_t mapOffset;
|
||||||
|
Buffer* self;
|
||||||
|
};
|
||||||
|
ProxyData* proxy = new ProxyData;
|
||||||
|
proxy->callback = callback;
|
||||||
|
proxy->userdata = userdata;
|
||||||
|
proxy->mapOffset = offset;
|
||||||
|
proxy->self = this;
|
||||||
|
// Note technically we should keep the buffer alive until the callback is fired but the
|
||||||
|
// client doesn't have good facilities to do that yet.
|
||||||
|
|
||||||
|
// Call MapReadAsync or MapWriteAsync and forward the callback.
|
||||||
|
if (mode & WGPUMapMode_Read) {
|
||||||
|
MapReadAsync(
|
||||||
|
[](WGPUBufferMapAsyncStatus status, const void*, uint64_t, void* userdata) {
|
||||||
|
ProxyData* proxy = static_cast<ProxyData*>(userdata);
|
||||||
|
if (proxy->callback) {
|
||||||
|
proxy->callback(status, proxy->userdata);
|
||||||
|
}
|
||||||
|
proxy->self->mMapOffset = proxy->mapOffset;
|
||||||
|
delete proxy;
|
||||||
|
},
|
||||||
|
proxy);
|
||||||
|
} else {
|
||||||
|
ASSERT(mode & WGPUMapMode_Write);
|
||||||
|
MapWriteAsync(
|
||||||
|
[](WGPUBufferMapAsyncStatus status, void*, uint64_t, void* userdata) {
|
||||||
|
ProxyData* proxy = static_cast<ProxyData*>(userdata);
|
||||||
|
if (proxy->callback) {
|
||||||
|
proxy->callback(status, proxy->userdata);
|
||||||
|
}
|
||||||
|
proxy->self->mMapOffset = proxy->mapOffset;
|
||||||
|
delete proxy;
|
||||||
|
},
|
||||||
|
proxy);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool Buffer::OnMapReadAsyncCallback(uint32_t requestSerial,
|
bool Buffer::OnMapReadAsyncCallback(uint32_t requestSerial,
|
||||||
uint32_t status,
|
uint32_t status,
|
||||||
uint64_t initialDataInfoLength,
|
uint64_t initialDataInfoLength,
|
||||||
|
@ -369,14 +435,14 @@ namespace dawn_wire { namespace client {
|
||||||
if (!IsMappedForWriting()) {
|
if (!IsMappedForWriting()) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
return mMappedData;
|
return static_cast<uint8_t*>(mMappedData) + mMapOffset;
|
||||||
}
|
}
|
||||||
|
|
||||||
const void* Buffer::GetConstMappedRange() {
|
const void* Buffer::GetConstMappedRange() {
|
||||||
if (!IsMappedForWriting() && !IsMappedForReading()) {
|
if (!IsMappedForWriting() && !IsMappedForReading()) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
return mMappedData;
|
return static_cast<uint8_t*>(mMappedData) + mMapOffset;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Buffer::Unmap() {
|
void Buffer::Unmap() {
|
||||||
|
@ -413,6 +479,7 @@ namespace dawn_wire { namespace client {
|
||||||
mReadHandle = nullptr;
|
mReadHandle = nullptr;
|
||||||
}
|
}
|
||||||
mMappedData = nullptr;
|
mMappedData = nullptr;
|
||||||
|
mMapOffset = 0;
|
||||||
ClearMapRequests(WGPUBufferMapAsyncStatus_Unknown);
|
ClearMapRequests(WGPUBufferMapAsyncStatus_Unknown);
|
||||||
|
|
||||||
BufferUnmapCmd cmd;
|
BufferUnmapCmd cmd;
|
||||||
|
|
|
@ -43,6 +43,11 @@ namespace dawn_wire { namespace client {
|
||||||
uint64_t initialDataInfoLength,
|
uint64_t initialDataInfoLength,
|
||||||
const uint8_t* initialDataInfo);
|
const uint8_t* initialDataInfo);
|
||||||
bool OnMapWriteAsyncCallback(uint32_t requestSerial, uint32_t status);
|
bool OnMapWriteAsyncCallback(uint32_t requestSerial, uint32_t status);
|
||||||
|
void MapAsync(WGPUMapModeFlags mode,
|
||||||
|
size_t offset,
|
||||||
|
size_t size,
|
||||||
|
WGPUBufferMapCallback callback,
|
||||||
|
void* userdata);
|
||||||
void* GetMappedRange();
|
void* GetMappedRange();
|
||||||
const void* GetConstMappedRange();
|
const void* GetConstMappedRange();
|
||||||
void Unmap();
|
void Unmap();
|
||||||
|
@ -77,6 +82,7 @@ namespace dawn_wire { namespace client {
|
||||||
std::unique_ptr<MemoryTransferService::ReadHandle> mReadHandle = nullptr;
|
std::unique_ptr<MemoryTransferService::ReadHandle> mReadHandle = nullptr;
|
||||||
std::unique_ptr<MemoryTransferService::WriteHandle> mWriteHandle = nullptr;
|
std::unique_ptr<MemoryTransferService::WriteHandle> mWriteHandle = nullptr;
|
||||||
void* mMappedData = nullptr;
|
void* mMappedData = nullptr;
|
||||||
|
size_t mMapOffset = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
}} // namespace dawn_wire::client
|
}} // namespace dawn_wire::client
|
||||||
|
|
|
@ -317,6 +317,216 @@ DAWN_INSTANTIATE_TEST(BufferMapWriteTests,
|
||||||
OpenGLBackend(),
|
OpenGLBackend(),
|
||||||
VulkanBackend());
|
VulkanBackend());
|
||||||
|
|
||||||
|
class BufferMappingTests : public DawnTest {
|
||||||
|
protected:
|
||||||
|
void MapAsyncAndWait(const wgpu::Buffer& buffer,
|
||||||
|
wgpu::MapMode mode,
|
||||||
|
size_t offset,
|
||||||
|
size_t size) {
|
||||||
|
bool done = false;
|
||||||
|
buffer.MapAsync(
|
||||||
|
mode, offset, size,
|
||||||
|
[](WGPUBufferMapAsyncStatus status, void* userdata) {
|
||||||
|
ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
|
||||||
|
*static_cast<bool*>(userdata) = true;
|
||||||
|
},
|
||||||
|
&done);
|
||||||
|
|
||||||
|
while (!done) {
|
||||||
|
WaitABit();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wgpu::Buffer CreateMapReadBuffer(uint64_t size) {
|
||||||
|
wgpu::BufferDescriptor descriptor;
|
||||||
|
descriptor.size = size;
|
||||||
|
descriptor.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
|
||||||
|
return device.CreateBuffer(&descriptor);
|
||||||
|
}
|
||||||
|
|
||||||
|
wgpu::Buffer CreateMapWriteBuffer(uint64_t size) {
|
||||||
|
wgpu::BufferDescriptor descriptor;
|
||||||
|
descriptor.size = size;
|
||||||
|
descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
|
||||||
|
return device.CreateBuffer(&descriptor);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Test that the simplest map read works
|
||||||
|
TEST_P(BufferMappingTests, MapRead_Basic) {
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(4);
|
||||||
|
|
||||||
|
uint32_t myData = 0x01020304;
|
||||||
|
queue.WriteBuffer(buffer, 0, &myData, sizeof(myData));
|
||||||
|
|
||||||
|
MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, 4);
|
||||||
|
ASSERT_NE(nullptr, buffer.GetConstMappedRange());
|
||||||
|
ASSERT_EQ(myData, *static_cast<const uint32_t*>(buffer.GetConstMappedRange()));
|
||||||
|
buffer.Unmap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map-reading a zero-sized buffer.
|
||||||
|
TEST_P(BufferMappingTests, MapRead_ZeroSized) {
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(0);
|
||||||
|
|
||||||
|
MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, 0);
|
||||||
|
ASSERT_NE(buffer.GetConstMappedRange(), nullptr);
|
||||||
|
buffer.Unmap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map-reading with a non-zero offset
|
||||||
|
TEST_P(BufferMappingTests, MapRead_NonZeroOffset) {
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(8);
|
||||||
|
|
||||||
|
uint32_t myData[2] = {0x01020304, 0x05060708};
|
||||||
|
queue.WriteBuffer(buffer, 0, &myData, sizeof(myData));
|
||||||
|
|
||||||
|
MapAsyncAndWait(buffer, wgpu::MapMode::Read, 4, 4);
|
||||||
|
ASSERT_EQ(myData[1], *static_cast<const uint32_t*>(buffer.GetConstMappedRange()));
|
||||||
|
buffer.Unmap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map read and unmap twice. Test that both of these two iterations work.
|
||||||
|
TEST_P(BufferMappingTests, MapRead_Twice) {
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(4);
|
||||||
|
|
||||||
|
uint32_t myData = 0x01020304;
|
||||||
|
queue.WriteBuffer(buffer, 0, &myData, sizeof(myData));
|
||||||
|
|
||||||
|
MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, 4);
|
||||||
|
ASSERT_EQ(myData, *static_cast<const uint32_t*>(buffer.GetConstMappedRange()));
|
||||||
|
buffer.Unmap();
|
||||||
|
|
||||||
|
myData = 0x05060708;
|
||||||
|
queue.WriteBuffer(buffer, 0, &myData, sizeof(myData));
|
||||||
|
|
||||||
|
MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, 4);
|
||||||
|
ASSERT_EQ(myData, *static_cast<const uint32_t*>(buffer.GetConstMappedRange()));
|
||||||
|
buffer.Unmap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map-reading a large buffer.
|
||||||
|
TEST_P(BufferMappingTests, MapRead_Large) {
|
||||||
|
constexpr uint32_t kDataSize = 1000 * 1000;
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(kDataSize * sizeof(uint32_t));
|
||||||
|
|
||||||
|
std::vector<uint32_t> myData;
|
||||||
|
for (uint32_t i = 0; i < kDataSize; ++i) {
|
||||||
|
myData.push_back(i);
|
||||||
|
}
|
||||||
|
queue.WriteBuffer(buffer, 0, myData.data(), kDataSize * sizeof(uint32_t));
|
||||||
|
|
||||||
|
MapAsyncAndWait(buffer, wgpu::MapMode::Read, 0, 4);
|
||||||
|
ASSERT_EQ(0, memcmp(buffer.GetConstMappedRange(), myData.data(), kDataSize * sizeof(uint32_t)));
|
||||||
|
buffer.Unmap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the simplest map write works.
|
||||||
|
TEST_P(BufferMappingTests, MapWrite_Basic) {
|
||||||
|
wgpu::Buffer buffer = CreateMapWriteBuffer(4);
|
||||||
|
|
||||||
|
uint32_t myData = 2934875;
|
||||||
|
MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, 4);
|
||||||
|
ASSERT_NE(nullptr, buffer.GetMappedRange());
|
||||||
|
ASSERT_NE(nullptr, buffer.GetConstMappedRange());
|
||||||
|
memcpy(buffer.GetMappedRange(), &myData, sizeof(myData));
|
||||||
|
buffer.Unmap();
|
||||||
|
|
||||||
|
EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map-writing a zero-sized buffer.
|
||||||
|
TEST_P(BufferMappingTests, MapWrite_ZeroSized) {
|
||||||
|
wgpu::Buffer buffer = CreateMapWriteBuffer(0);
|
||||||
|
|
||||||
|
MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, 0);
|
||||||
|
ASSERT_NE(buffer.GetConstMappedRange(), nullptr);
|
||||||
|
ASSERT_NE(buffer.GetMappedRange(), nullptr);
|
||||||
|
buffer.Unmap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map-writing with a non-zero offset.
|
||||||
|
TEST_P(BufferMappingTests, MapWrite_NonZeroOffset) {
|
||||||
|
wgpu::Buffer buffer = CreateMapWriteBuffer(8);
|
||||||
|
|
||||||
|
uint32_t myData = 2934875;
|
||||||
|
MapAsyncAndWait(buffer, wgpu::MapMode::Write, 4, 4);
|
||||||
|
memcpy(buffer.GetMappedRange(), &myData, sizeof(myData));
|
||||||
|
buffer.Unmap();
|
||||||
|
|
||||||
|
EXPECT_BUFFER_U32_EQ(myData, buffer, 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map, write and unmap twice. Test that both of these two iterations work.
|
||||||
|
TEST_P(BufferMappingTests, MapWrite_Twice) {
|
||||||
|
wgpu::Buffer buffer = CreateMapWriteBuffer(4);
|
||||||
|
|
||||||
|
uint32_t myData = 2934875;
|
||||||
|
MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, 4);
|
||||||
|
memcpy(buffer.GetMappedRange(), &myData, sizeof(myData));
|
||||||
|
buffer.Unmap();
|
||||||
|
|
||||||
|
EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
|
||||||
|
|
||||||
|
myData = 9999999;
|
||||||
|
MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, 4);
|
||||||
|
memcpy(buffer.GetMappedRange(), &myData, sizeof(myData));
|
||||||
|
buffer.Unmap();
|
||||||
|
|
||||||
|
EXPECT_BUFFER_U32_EQ(myData, buffer, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test mapping a large buffer.
|
||||||
|
TEST_P(BufferMappingTests, MapWrite_Large) {
|
||||||
|
constexpr uint32_t kDataSize = 1000 * 1000;
|
||||||
|
wgpu::Buffer buffer = CreateMapWriteBuffer(kDataSize * sizeof(uint32_t));
|
||||||
|
|
||||||
|
std::vector<uint32_t> myData;
|
||||||
|
for (uint32_t i = 0; i < kDataSize; ++i) {
|
||||||
|
myData.push_back(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
MapAsyncAndWait(buffer, wgpu::MapMode::Write, 0, 4);
|
||||||
|
memcpy(buffer.GetMappedRange(), myData.data(), kDataSize * sizeof(uint32_t));
|
||||||
|
buffer.Unmap();
|
||||||
|
|
||||||
|
EXPECT_BUFFER_U32_RANGE_EQ(myData.data(), buffer, 0, kDataSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the map offset isn't updated when the call is an error.
|
||||||
|
TEST_P(BufferMappingTests, OffsetNotUpdatedOnError) {
|
||||||
|
uint32_t data[3] = {0xCA7, 0xB0A7, 0xBA7};
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(sizeof(data));
|
||||||
|
queue.WriteBuffer(buffer, 0, data, sizeof(data));
|
||||||
|
|
||||||
|
// Map the buffer but do not wait on the result yet.
|
||||||
|
bool done = false;
|
||||||
|
buffer.MapAsync(
|
||||||
|
wgpu::MapMode::Read, 4, 4,
|
||||||
|
[](WGPUBufferMapAsyncStatus status, void* userdata) {
|
||||||
|
ASSERT_EQ(WGPUBufferMapAsyncStatus_Success, status);
|
||||||
|
*static_cast<bool*>(userdata) = true;
|
||||||
|
},
|
||||||
|
&done);
|
||||||
|
|
||||||
|
// Call MapAsync another time, it is an error because the buffer is already being mapped so
|
||||||
|
// mMapOffset is not updated.
|
||||||
|
ASSERT_DEVICE_ERROR(buffer.MapAsync(wgpu::MapMode::Read, 8, 4, nullptr, nullptr));
|
||||||
|
|
||||||
|
while (!done) {
|
||||||
|
WaitABit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// mMapOffset has not been updated so it should still be 4, which is data[1]
|
||||||
|
ASSERT_EQ(0, memcmp(buffer.GetConstMappedRange(), &data[1], sizeof(uint32_t)));
|
||||||
|
}
|
||||||
|
|
||||||
|
DAWN_INSTANTIATE_TEST(BufferMappingTests,
|
||||||
|
D3D12Backend(),
|
||||||
|
MetalBackend(),
|
||||||
|
OpenGLBackend(),
|
||||||
|
VulkanBackend());
|
||||||
|
|
||||||
class CreateBufferMappedTests : public DawnTest {
|
class CreateBufferMappedTests : public DawnTest {
|
||||||
protected:
|
protected:
|
||||||
static void MapReadCallback(WGPUBufferMapAsyncStatus status,
|
static void MapReadCallback(WGPUBufferMapAsyncStatus status,
|
||||||
|
@ -1058,6 +1268,42 @@ TEST_P(BufferTests, CreateBufferOOMMapWriteAsync) {
|
||||||
RunTest(descriptor);
|
RunTest(descriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test that mapping an OOM buffer fails gracefully
|
||||||
|
TEST_P(BufferTests, CreateBufferOOMMapAsync) {
|
||||||
|
// TODO(http://crbug.com/dawn/27): Missing support.
|
||||||
|
DAWN_SKIP_TEST_IF(IsOpenGL());
|
||||||
|
DAWN_SKIP_TEST_IF(IsAsan());
|
||||||
|
|
||||||
|
auto RunTest = [this](const wgpu::BufferDescriptor& descriptor) {
|
||||||
|
wgpu::Buffer buffer;
|
||||||
|
ASSERT_DEVICE_ERROR(buffer = device.CreateBuffer(&descriptor));
|
||||||
|
|
||||||
|
bool done = false;
|
||||||
|
ASSERT_DEVICE_ERROR(buffer.MapAsync(
|
||||||
|
wgpu::MapMode::Write, 0, 4,
|
||||||
|
[](WGPUBufferMapAsyncStatus status, void* userdata) {
|
||||||
|
EXPECT_EQ(status, WGPUBufferMapAsyncStatus_Error);
|
||||||
|
*static_cast<bool*>(userdata) = true;
|
||||||
|
},
|
||||||
|
&done));
|
||||||
|
|
||||||
|
while (!done) {
|
||||||
|
WaitABit();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
wgpu::BufferDescriptor descriptor;
|
||||||
|
descriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::MapWrite;
|
||||||
|
|
||||||
|
// Test an enormous buffer
|
||||||
|
descriptor.size = std::numeric_limits<uint64_t>::max();
|
||||||
|
RunTest(descriptor);
|
||||||
|
|
||||||
|
// UINT64_MAX may be special cased. Test a smaller, but really large buffer also fails
|
||||||
|
descriptor.size = 1ull << 50;
|
||||||
|
RunTest(descriptor);
|
||||||
|
}
|
||||||
|
|
||||||
DAWN_INSTANTIATE_TEST(BufferTests,
|
DAWN_INSTANTIATE_TEST(BufferTests,
|
||||||
D3D12Backend(),
|
D3D12Backend(),
|
||||||
MetalBackend(),
|
MetalBackend(),
|
||||||
|
|
|
@ -58,6 +58,16 @@ static void ToMockBufferMapWriteCallback(WGPUBufferMapAsyncStatus status,
|
||||||
userdata);
|
userdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class MockBufferMapAsyncCallback {
|
||||||
|
public:
|
||||||
|
MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
|
||||||
|
};
|
||||||
|
|
||||||
|
static std::unique_ptr<MockBufferMapAsyncCallback> mockBufferMapAsyncCallback;
|
||||||
|
static void ToMockBufferMapAsyncCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
|
||||||
|
mockBufferMapAsyncCallback->Call(status, userdata);
|
||||||
|
}
|
||||||
|
|
||||||
class BufferValidationTest : public ValidationTest {
|
class BufferValidationTest : public ValidationTest {
|
||||||
protected:
|
protected:
|
||||||
wgpu::Buffer CreateMapReadBuffer(uint64_t size) {
|
wgpu::Buffer CreateMapReadBuffer(uint64_t size) {
|
||||||
|
@ -67,6 +77,7 @@ class BufferValidationTest : public ValidationTest {
|
||||||
|
|
||||||
return device.CreateBuffer(&descriptor);
|
return device.CreateBuffer(&descriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
wgpu::Buffer CreateMapWriteBuffer(uint64_t size) {
|
wgpu::Buffer CreateMapWriteBuffer(uint64_t size) {
|
||||||
wgpu::BufferDescriptor descriptor;
|
wgpu::BufferDescriptor descriptor;
|
||||||
descriptor.size = size;
|
descriptor.size = size;
|
||||||
|
@ -92,6 +103,13 @@ class BufferValidationTest : public ValidationTest {
|
||||||
return device.CreateBuffer(&descriptor);
|
return device.CreateBuffer(&descriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void AssertMapAsyncError(wgpu::Buffer buffer, wgpu::MapMode mode, size_t offset, size_t size) {
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
|
||||||
|
|
||||||
|
ASSERT_DEVICE_ERROR(
|
||||||
|
buffer.MapAsync(mode, offset, size, ToMockBufferMapAsyncCallback, nullptr));
|
||||||
|
}
|
||||||
|
|
||||||
wgpu::Queue queue;
|
wgpu::Queue queue;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -100,6 +118,7 @@ class BufferValidationTest : public ValidationTest {
|
||||||
|
|
||||||
mockBufferMapReadCallback = std::make_unique<MockBufferMapReadCallback>();
|
mockBufferMapReadCallback = std::make_unique<MockBufferMapReadCallback>();
|
||||||
mockBufferMapWriteCallback = std::make_unique<MockBufferMapWriteCallback>();
|
mockBufferMapWriteCallback = std::make_unique<MockBufferMapWriteCallback>();
|
||||||
|
mockBufferMapAsyncCallback = std::make_unique<MockBufferMapAsyncCallback>();
|
||||||
queue = device.GetDefaultQueue();
|
queue = device.GetDefaultQueue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,6 +126,7 @@ class BufferValidationTest : public ValidationTest {
|
||||||
// Delete mocks so that expectations are checked
|
// Delete mocks so that expectations are checked
|
||||||
mockBufferMapReadCallback = nullptr;
|
mockBufferMapReadCallback = nullptr;
|
||||||
mockBufferMapWriteCallback = nullptr;
|
mockBufferMapWriteCallback = nullptr;
|
||||||
|
mockBufferMapAsyncCallback = nullptr;
|
||||||
|
|
||||||
ValidationTest::TearDown();
|
ValidationTest::TearDown();
|
||||||
}
|
}
|
||||||
|
@ -164,7 +184,272 @@ TEST_F(BufferValidationTest, CreationMapUsageRestrictions) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test the success case for mapping buffer for reading
|
// Test the success case for mapping buffer for reading
|
||||||
TEST_F(BufferValidationTest, MapReadSuccess) {
|
TEST_F(BufferValidationTest, MapAsync_ReadSuccess) {
|
||||||
|
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
||||||
|
|
||||||
|
buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
|
||||||
|
buf.Unmap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the success case for mapping buffer for writing
|
||||||
|
TEST_F(BufferValidationTest, MapAsync_WriteSuccess) {
|
||||||
|
wgpu::Buffer buf = CreateMapWriteBuffer(4);
|
||||||
|
|
||||||
|
buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
|
||||||
|
buf.Unmap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map async with a buffer that's an error
|
||||||
|
TEST_F(BufferValidationTest, MapAsync_ErrorBuffer) {
|
||||||
|
wgpu::BufferDescriptor desc;
|
||||||
|
desc.size = 4;
|
||||||
|
desc.usage = wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
|
||||||
|
wgpu::Buffer buffer;
|
||||||
|
ASSERT_DEVICE_ERROR(buffer = device.CreateBuffer(&desc));
|
||||||
|
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 4);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map async with an invalid offset and size alignment.
|
||||||
|
TEST_F(BufferValidationTest, MapAsync_OffsetSizeAlignment) {
|
||||||
|
// Control case, both aligned to 4 is ok.
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(8);
|
||||||
|
buffer.MapAsync(wgpu::MapMode::Read, 4, 4, nullptr, nullptr);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapWriteBuffer(8);
|
||||||
|
buffer.MapAsync(wgpu::MapMode::Write, 4, 4, nullptr, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error case, offset aligned to 2 is an error.
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(8);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Read, 2, 4);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapWriteBuffer(8);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Write, 2, 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error case, size aligned to 2 is an error.
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(8);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 6);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapWriteBuffer(8);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 6);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map async with a buffer that has the wrong usage
|
||||||
|
TEST_F(BufferValidationTest, MapAsync_WrongUsage) {
|
||||||
|
{
|
||||||
|
wgpu::BufferDescriptor desc;
|
||||||
|
desc.usage = wgpu::BufferUsage::Vertex;
|
||||||
|
desc.size = 4;
|
||||||
|
wgpu::Buffer buffer = device.CreateBuffer(&desc);
|
||||||
|
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 4);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 4);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(4);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 4);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapWriteBuffer(4);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map async with a wrong mode
|
||||||
|
TEST_F(BufferValidationTest, MapAsync_WrongMode) {
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(4);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::None, 0, 4);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(4);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Read | wgpu::MapMode::Write, 0, 4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map async with a buffer that's already mapped
|
||||||
|
TEST_F(BufferValidationTest, MapAsync_AlreadyMapped) {
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(4);
|
||||||
|
buffer.MapAsync(wgpu::MapMode::Read, 0, 4, nullptr, nullptr);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 4);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = BufferMappedAtCreation(4, wgpu::BufferUsage::MapRead);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 4);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapWriteBuffer(4);
|
||||||
|
buffer.MapAsync(wgpu::MapMode::Write, 0, 4, nullptr, nullptr);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 4);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = BufferMappedAtCreation(4, wgpu::BufferUsage::MapWrite);
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map async with a buffer that's destroyed
|
||||||
|
TEST_F(BufferValidationTest, MapAsync_Destroy) {
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapReadBuffer(4);
|
||||||
|
buffer.Destroy();
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Read, 0, 4);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buffer = CreateMapWriteBuffer(4);
|
||||||
|
buffer.Destroy();
|
||||||
|
AssertMapAsyncError(buffer, wgpu::MapMode::Write, 0, 4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map async but unmapping before the result is ready.
|
||||||
|
TEST_F(BufferValidationTest, MapAsync_UnmapBeforeResult) {
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
||||||
|
buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Unknown, _))
|
||||||
|
.Times(1);
|
||||||
|
buf.Unmap();
|
||||||
|
|
||||||
|
// The callback shouldn't be called again.
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapWriteBuffer(4);
|
||||||
|
buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Unknown, _))
|
||||||
|
.Times(1);
|
||||||
|
buf.Unmap();
|
||||||
|
|
||||||
|
// The callback shouldn't be called again.
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// When a MapAsync is cancelled with Unmap it might still be in flight, test doing a new request
|
||||||
|
// works as expected and we don't get the cancelled request's data.
|
||||||
|
TEST_F(BufferValidationTest, MapAsync_UnmapBeforeResultAndMapAgain) {
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
||||||
|
buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, this + 0);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Unknown, this + 0))
|
||||||
|
.Times(1);
|
||||||
|
buf.Unmap();
|
||||||
|
|
||||||
|
buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, this + 1);
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, this + 1))
|
||||||
|
.Times(1);
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapWriteBuffer(4);
|
||||||
|
buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, this + 0);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Unknown, this + 0))
|
||||||
|
.Times(1);
|
||||||
|
buf.Unmap();
|
||||||
|
|
||||||
|
buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, this + 1);
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, this + 1))
|
||||||
|
.Times(1);
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test map async but destroying before the result is ready.
|
||||||
|
TEST_F(BufferValidationTest, MapAsync_DestroyBeforeResult) {
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
||||||
|
buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Unknown, _))
|
||||||
|
.Times(1);
|
||||||
|
buf.Destroy();
|
||||||
|
|
||||||
|
// The callback shouldn't be called again.
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapWriteBuffer(4);
|
||||||
|
buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Unknown, _))
|
||||||
|
.Times(1);
|
||||||
|
buf.Destroy();
|
||||||
|
|
||||||
|
// The callback shouldn't be called again.
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the MapCallback isn't fired twice when unmap() is called inside the callback
|
||||||
|
TEST_F(BufferValidationTest, MapAsync_UnmapCalledInCallback) {
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
||||||
|
buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
|
||||||
|
.WillOnce(InvokeWithoutArgs([&]() { buf.Unmap(); }));
|
||||||
|
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapWriteBuffer(4);
|
||||||
|
buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
|
||||||
|
.WillOnce(InvokeWithoutArgs([&]() { buf.Unmap(); }));
|
||||||
|
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the MapCallback isn't fired twice when destroy() is called inside the callback
|
||||||
|
TEST_F(BufferValidationTest, MapAsync_DestroyCalledInCallback) {
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
||||||
|
buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
|
||||||
|
.WillOnce(InvokeWithoutArgs([&]() { buf.Destroy(); }));
|
||||||
|
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapWriteBuffer(4);
|
||||||
|
buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
|
||||||
|
.WillOnce(InvokeWithoutArgs([&]() { buf.Destroy(); }));
|
||||||
|
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the success case for mapping buffer for reading
|
||||||
|
TEST_F(BufferValidationTest, MapReadAsyncSuccess) {
|
||||||
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
||||||
|
|
||||||
buf.MapReadAsync(ToMockBufferMapReadCallback, nullptr);
|
buf.MapReadAsync(ToMockBufferMapReadCallback, nullptr);
|
||||||
|
@ -178,7 +463,7 @@ TEST_F(BufferValidationTest, MapReadSuccess) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test the success case for mapping buffer for writing
|
// Test the success case for mapping buffer for writing
|
||||||
TEST_F(BufferValidationTest, MapWriteSuccess) {
|
TEST_F(BufferValidationTest, MapWriteAsyncSuccess) {
|
||||||
wgpu::Buffer buf = CreateMapWriteBuffer(4);
|
wgpu::Buffer buf = CreateMapWriteBuffer(4);
|
||||||
|
|
||||||
buf.MapWriteAsync(ToMockBufferMapWriteCallback, nullptr);
|
buf.MapWriteAsync(ToMockBufferMapWriteCallback, nullptr);
|
||||||
|
@ -605,6 +890,30 @@ TEST_F(BufferValidationTest, SubmitMappedBuffer) {
|
||||||
wgpu::Buffer bufA = device.CreateBuffer(&descriptorA);
|
wgpu::Buffer bufA = device.CreateBuffer(&descriptorA);
|
||||||
wgpu::Buffer bufB = device.CreateBuffer(&descriptorB);
|
wgpu::Buffer bufB = device.CreateBuffer(&descriptorB);
|
||||||
|
|
||||||
|
bufA.MapAsync(wgpu::MapMode::Write, 0, 4, nullptr, nullptr);
|
||||||
|
|
||||||
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
|
encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
|
||||||
|
wgpu::CommandBuffer commands = encoder.Finish();
|
||||||
|
ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer bufA = device.CreateBuffer(&descriptorA);
|
||||||
|
wgpu::Buffer bufB = device.CreateBuffer(&descriptorB);
|
||||||
|
|
||||||
|
bufB.MapAsync(wgpu::MapMode::Read, 0, 4, nullptr, nullptr);
|
||||||
|
|
||||||
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
|
encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
|
||||||
|
wgpu::CommandBuffer commands = encoder.Finish();
|
||||||
|
ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer bufA = device.CreateBuffer(&descriptorA);
|
||||||
|
wgpu::Buffer bufB = device.CreateBuffer(&descriptorB);
|
||||||
|
|
||||||
bufA.MapWriteAsync(ToMockBufferMapWriteCallback, nullptr);
|
bufA.MapWriteAsync(ToMockBufferMapWriteCallback, nullptr);
|
||||||
|
|
||||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
|
@ -721,6 +1030,24 @@ TEST_F(BufferValidationTest, UnmapUnmappedBuffer) {
|
||||||
buf.Unmap();
|
buf.Unmap();
|
||||||
buf.Unmap();
|
buf.Unmap();
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
||||||
|
// Buffer starts unmapped. Unmap should succeed.
|
||||||
|
buf.Unmap();
|
||||||
|
buf.MapAsync(wgpu::MapMode::Read, 0, 4, nullptr, nullptr);
|
||||||
|
buf.Unmap();
|
||||||
|
// Unmapping twice should succeed
|
||||||
|
buf.Unmap();
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapWriteBuffer(4);
|
||||||
|
// Buffer starts unmapped. Unmap should succeed.
|
||||||
|
buf.Unmap();
|
||||||
|
buf.MapAsync(wgpu::MapMode::Write, 0, 4, nullptr, nullptr);
|
||||||
|
// Unmapping twice should succeed
|
||||||
|
buf.Unmap();
|
||||||
|
buf.Unmap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that it is invalid to call GetMappedRange on an unmapped buffer.
|
// Test that it is invalid to call GetMappedRange on an unmapped buffer.
|
||||||
|
@ -779,6 +1106,32 @@ TEST_F(BufferValidationTest, GetMappedRangeOnUnmappedBuffer) {
|
||||||
WaitForAllOperations(device);
|
WaitForAllOperations(device);
|
||||||
buf.Unmap();
|
buf.Unmap();
|
||||||
|
|
||||||
|
ASSERT_EQ(nullptr, buf.GetMappedRange());
|
||||||
|
ASSERT_EQ(nullptr, buf.GetConstMappedRange());
|
||||||
|
}
|
||||||
|
// Unmapped after MapAsync read case.
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
||||||
|
|
||||||
|
buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
|
||||||
|
.Times(1);
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
buf.Unmap();
|
||||||
|
|
||||||
|
ASSERT_EQ(nullptr, buf.GetMappedRange());
|
||||||
|
ASSERT_EQ(nullptr, buf.GetConstMappedRange());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmapped after MapAsync write case.
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapWriteBuffer(4);
|
||||||
|
buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
|
||||||
|
.Times(1);
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
buf.Unmap();
|
||||||
|
|
||||||
ASSERT_EQ(nullptr, buf.GetMappedRange());
|
ASSERT_EQ(nullptr, buf.GetMappedRange());
|
||||||
ASSERT_EQ(nullptr, buf.GetConstMappedRange());
|
ASSERT_EQ(nullptr, buf.GetConstMappedRange());
|
||||||
}
|
}
|
||||||
|
@ -841,6 +1194,32 @@ TEST_F(BufferValidationTest, GetMappedRangeOnDestroyedBuffer) {
|
||||||
WaitForAllOperations(device);
|
WaitForAllOperations(device);
|
||||||
buf.Destroy();
|
buf.Destroy();
|
||||||
|
|
||||||
|
ASSERT_EQ(nullptr, buf.GetMappedRange());
|
||||||
|
ASSERT_EQ(nullptr, buf.GetConstMappedRange());
|
||||||
|
}
|
||||||
|
// Destroyed after MapAsync read case.
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
||||||
|
|
||||||
|
buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
|
||||||
|
.Times(1);
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
buf.Destroy();
|
||||||
|
|
||||||
|
ASSERT_EQ(nullptr, buf.GetMappedRange());
|
||||||
|
ASSERT_EQ(nullptr, buf.GetConstMappedRange());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroyed after MapAsync write case.
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapWriteBuffer(4);
|
||||||
|
buf.MapAsync(wgpu::MapMode::Write, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
|
||||||
|
.Times(1);
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
buf.Destroy();
|
||||||
|
|
||||||
ASSERT_EQ(nullptr, buf.GetMappedRange());
|
ASSERT_EQ(nullptr, buf.GetMappedRange());
|
||||||
ASSERT_EQ(nullptr, buf.GetConstMappedRange());
|
ASSERT_EQ(nullptr, buf.GetConstMappedRange());
|
||||||
}
|
}
|
||||||
|
@ -848,15 +1227,27 @@ TEST_F(BufferValidationTest, GetMappedRangeOnDestroyedBuffer) {
|
||||||
|
|
||||||
// Test that it is invalid to call GetMappedRange on a buffer afterMapReadAsync
|
// Test that it is invalid to call GetMappedRange on a buffer afterMapReadAsync
|
||||||
TEST_F(BufferValidationTest, GetMappedRangeOnMappedForReading) {
|
TEST_F(BufferValidationTest, GetMappedRangeOnMappedForReading) {
|
||||||
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
||||||
|
|
||||||
buf.MapReadAsync(ToMockBufferMapReadCallback, nullptr);
|
buf.MapReadAsync(ToMockBufferMapReadCallback, nullptr);
|
||||||
EXPECT_CALL(*mockBufferMapReadCallback,
|
EXPECT_CALL(*mockBufferMapReadCallback,
|
||||||
Call(WGPUBufferMapAsyncStatus_Success, Ne(nullptr), 4u, _))
|
Call(WGPUBufferMapAsyncStatus_Success, Ne(nullptr), 4u, _))
|
||||||
.Times(1);
|
.Times(1);
|
||||||
WaitForAllOperations(device);
|
WaitForAllOperations(device);
|
||||||
|
|
||||||
ASSERT_EQ(nullptr, buf.GetMappedRange());
|
ASSERT_EQ(nullptr, buf.GetMappedRange());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
wgpu::Buffer buf = CreateMapReadBuffer(4);
|
||||||
|
|
||||||
|
buf.MapAsync(wgpu::MapMode::Read, 0, 4, ToMockBufferMapAsyncCallback, nullptr);
|
||||||
|
EXPECT_CALL(*mockBufferMapAsyncCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
|
||||||
|
.Times(1);
|
||||||
|
WaitForAllOperations(device);
|
||||||
|
|
||||||
|
ASSERT_EQ(nullptr, buf.GetMappedRange());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test valid cases to call GetMappedRange on a buffer.
|
// Test valid cases to call GetMappedRange on a buffer.
|
||||||
|
|
Loading…
Reference in New Issue