Implement CreateBufferMapped for non-mappable buffers

This uses an intermediate staging buffer to copy data into the buffer.

Bug: dawn:7
Change-Id: I3bda19a8450ef0eddc5b4382ce1b9120f074b917
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/7500
Commit-Queue: Austin Eng <enga@chromium.org>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
This commit is contained in:
Austin Eng 2019-06-05 18:35:31 +00:00 committed by Commit Bot service account
parent 233ce73c50
commit 9cd21f1bf9
16 changed files with 348 additions and 50 deletions

View File

@ -43,7 +43,16 @@ namespace dawn_native {
return buffer; return buffer;
} }
void ClearMappedData() {
mFakeMappedData.reset();
}
private: private:
bool IsMapWritable() const override {
UNREACHABLE();
return false;
}
MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override { MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override {
UNREACHABLE(); UNREACHABLE();
return {}; return {};
@ -62,8 +71,7 @@ namespace dawn_native {
UNREACHABLE(); UNREACHABLE();
} }
void UnmapImpl() override { void UnmapImpl() override {
ASSERT(mFakeMappedData); UNREACHABLE();
mFakeMappedData.reset();
} }
void DestroyImpl() override { void DestroyImpl() override {
UNREACHABLE(); UNREACHABLE();
@ -131,7 +139,7 @@ namespace dawn_native {
return ErrorBuffer::MakeMapped(device, size, mappedPointer); return ErrorBuffer::MakeMapped(device, size, mappedPointer);
} }
uint32_t BufferBase::GetSize() const { uint64_t BufferBase::GetSize() const {
ASSERT(!IsError()); ASSERT(!IsError());
return mSize; return mSize;
} }
@ -146,13 +154,23 @@ namespace dawn_native {
ASSERT(mappedPointer != nullptr); ASSERT(mappedPointer != nullptr);
mState = BufferState::Mapped; mState = BufferState::Mapped;
if ((mUsage & dawn::BufferUsageBit::MapWrite) == 0) {
// TODO(enga): Support non-mappable buffers with a staging buffer.
return DAWN_VALIDATION_ERROR("MapWrite usage required");
}
if (IsMapWritable()) {
DAWN_TRY(MapAtCreationImpl(mappedPointer)); DAWN_TRY(MapAtCreationImpl(mappedPointer));
ASSERT(*mappedPointer != nullptr); ASSERT(*mappedPointer != nullptr);
return {};
}
// If any of these fail, the buffer will be deleted and replaced with an
// error buffer.
// TODO(enga): Suballocate and reuse memory from a larger staging buffer so we don't create
// many small buffers.
DynamicUploader* uploader = nullptr;
DAWN_TRY_ASSIGN(uploader, GetDevice()->GetDynamicUploader());
DAWN_TRY_ASSIGN(mStagingBuffer, uploader->CreateStagingBuffer(GetSize()));
ASSERT(mStagingBuffer->GetMappedPointer() != nullptr);
*mappedPointer = reinterpret_cast<uint8_t*>(mStagingBuffer->GetMappedPointer());
return {}; return {};
} }
@ -268,33 +286,58 @@ namespace dawn_native {
} }
void BufferBase::Destroy() { void BufferBase::Destroy() {
if (IsError()) {
// It is an error to call Destroy() on an ErrorBuffer, but we still need to reclaim the
// fake mapped staging data.
reinterpret_cast<ErrorBuffer*>(this)->ClearMappedData();
}
if (GetDevice()->ConsumedError(ValidateDestroy())) { if (GetDevice()->ConsumedError(ValidateDestroy())) {
return; return;
} }
ASSERT(!IsError()); ASSERT(!IsError());
if (mState == BufferState::Mapped) { if (mState == BufferState::Mapped) {
if (mStagingBuffer == nullptr) {
Unmap(); Unmap();
} }
mStagingBuffer.reset();
}
DestroyInternal(); DestroyInternal();
} }
MaybeError BufferBase::CopyFromStagingBuffer() {
ASSERT(mStagingBuffer);
DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0, GetSize()));
DynamicUploader* uploader = nullptr;
DAWN_TRY_ASSIGN(uploader, GetDevice()->GetDynamicUploader());
uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
return {};
}
void BufferBase::Unmap() { void BufferBase::Unmap() {
if (IsError()) { if (IsError()) {
// It is an error to call Unmap() on an ErrorBuffer, but we still need to reclaim the // It is an error to call Unmap() on an ErrorBuffer, but we still need to reclaim the
// fake mapped staging data. // fake mapped staging data.
UnmapImpl(); reinterpret_cast<ErrorBuffer*>(this)->ClearMappedData();
} }
if (GetDevice()->ConsumedError(ValidateUnmap())) { if (GetDevice()->ConsumedError(ValidateUnmap())) {
return; return;
} }
ASSERT(!IsError()); ASSERT(!IsError());
if (mStagingBuffer != nullptr) {
GetDevice()->ConsumedError(CopyFromStagingBuffer());
} else {
// A map request can only be called once, so this will fire only if the request wasn't // A map request can only be called once, so this will fire only if the request wasn't
// completed before the Unmap // completed before the Unmap.
// Callbacks are not fired if there is no callback registered, so this is correct for
// CreateBufferMapped.
CallMapReadCallback(mMapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_UNKNOWN, nullptr, 0u); CallMapReadCallback(mMapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_UNKNOWN, nullptr, 0u);
CallMapWriteCallback(mMapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_UNKNOWN, nullptr, 0u); CallMapWriteCallback(mMapSerial, DAWN_BUFFER_MAP_ASYNC_STATUS_UNKNOWN, nullptr, 0u);
UnmapImpl(); UnmapImpl();
}
mState = BufferState::Unmapped; mState = BufferState::Unmapped;
mMapReadCallback = nullptr; mMapReadCallback = nullptr;
mMapWriteCallback = nullptr; mMapWriteCallback = nullptr;
@ -304,12 +347,13 @@ namespace dawn_native {
MaybeError BufferBase::ValidateSetSubData(uint32_t start, uint32_t count) const { MaybeError BufferBase::ValidateSetSubData(uint32_t start, uint32_t count) const {
DAWN_TRY(GetDevice()->ValidateObject(this)); DAWN_TRY(GetDevice()->ValidateObject(this));
if (mState == BufferState::Destroyed) { switch (mState) {
return DAWN_VALIDATION_ERROR("Buffer is destroyed"); case BufferState::Mapped:
}
if (mState == BufferState::Mapped) {
return DAWN_VALIDATION_ERROR("Buffer is mapped"); return DAWN_VALIDATION_ERROR("Buffer is mapped");
case BufferState::Destroyed:
return DAWN_VALIDATION_ERROR("Buffer is destroyed");
case BufferState::Unmapped:
break;
} }
if (count > GetSize()) { if (count > GetSize()) {
@ -341,12 +385,13 @@ namespace dawn_native {
MaybeError BufferBase::ValidateMap(dawn::BufferUsageBit requiredUsage) const { MaybeError BufferBase::ValidateMap(dawn::BufferUsageBit requiredUsage) const {
DAWN_TRY(GetDevice()->ValidateObject(this)); DAWN_TRY(GetDevice()->ValidateObject(this));
if (mState == BufferState::Destroyed) { switch (mState) {
return DAWN_VALIDATION_ERROR("Buffer is destroyed"); case BufferState::Mapped:
}
if (mState == BufferState::Mapped) {
return DAWN_VALIDATION_ERROR("Buffer already mapped"); return DAWN_VALIDATION_ERROR("Buffer already mapped");
case BufferState::Destroyed:
return DAWN_VALIDATION_ERROR("Buffer is destroyed");
case BufferState::Unmapped:
break;
} }
if (!(mUsage & requiredUsage)) { if (!(mUsage & requiredUsage)) {
@ -359,12 +404,16 @@ namespace dawn_native {
MaybeError BufferBase::ValidateUnmap() const { MaybeError BufferBase::ValidateUnmap() const {
DAWN_TRY(GetDevice()->ValidateObject(this)); DAWN_TRY(GetDevice()->ValidateObject(this));
if ((mUsage & (dawn::BufferUsageBit::MapRead | dawn::BufferUsageBit::MapWrite)) == 0) { switch (mState) {
case BufferState::Mapped:
// A buffer may be in the Mapped state if it was created with CreateBufferMapped
// even if it did not have a mappable usage.
return {};
case BufferState::Unmapped:
if ((mUsage & (dawn::BufferUsageBit::MapRead | dawn::BufferUsageBit::MapWrite)) ==
0) {
return DAWN_VALIDATION_ERROR("Buffer does not have map usage"); return DAWN_VALIDATION_ERROR("Buffer does not have map usage");
} }
switch (mState) {
case BufferState::Unmapped:
case BufferState::Mapped:
return {}; return {};
case BufferState::Destroyed: case BufferState::Destroyed:
return DAWN_VALIDATION_ERROR("Buffer is destroyed"); return DAWN_VALIDATION_ERROR("Buffer is destroyed");

View File

@ -49,7 +49,7 @@ namespace dawn_native {
uint64_t size, uint64_t size,
uint8_t** mappedPointer); uint8_t** mappedPointer);
uint32_t GetSize() const; uint64_t GetSize() const;
dawn::BufferUsageBit GetUsage() const; dawn::BufferUsageBit GetUsage() const;
MaybeError MapAtCreation(uint8_t** mappedPointer); MaybeError MapAtCreation(uint8_t** mappedPointer);
@ -85,6 +85,9 @@ namespace dawn_native {
virtual void UnmapImpl() = 0; virtual void UnmapImpl() = 0;
virtual void DestroyImpl() = 0; virtual void DestroyImpl() = 0;
virtual bool IsMapWritable() const = 0;
MaybeError CopyFromStagingBuffer();
MaybeError ValidateSetSubData(uint32_t start, uint32_t count) const; MaybeError ValidateSetSubData(uint32_t start, uint32_t count) const;
MaybeError ValidateMap(dawn::BufferUsageBit requiredUsage) const; MaybeError ValidateMap(dawn::BufferUsageBit requiredUsage) const;
MaybeError ValidateUnmap() const; MaybeError ValidateUnmap() const;
@ -98,6 +101,8 @@ namespace dawn_native {
void* mMapUserdata = 0; void* mMapUserdata = 0;
uint32_t mMapSerial = 0; uint32_t mMapSerial = 0;
std::unique_ptr<StagingBufferBase> mStagingBuffer;
BufferState mState; BufferState mState;
}; };

View File

@ -484,7 +484,7 @@ namespace dawn_native {
if (!readOnly && !singleUse) { if (!readOnly && !singleUse) {
return DAWN_VALIDATION_ERROR( return DAWN_VALIDATION_ERROR(
"Buffer used as writeable usage and another usage in pass"); "Buffer used as writable usage and another usage in pass");
} }
} }

View File

@ -79,7 +79,10 @@ namespace dawn_native { namespace d3d12 {
resourceDescriptor.SampleDesc.Count = 1; resourceDescriptor.SampleDesc.Count = 1;
resourceDescriptor.SampleDesc.Quality = 0; resourceDescriptor.SampleDesc.Quality = 0;
resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR; resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
resourceDescriptor.Flags = D3D12ResourceFlags(GetUsage()); // Add TransferDst for non-mappable buffer initialization in CreateBufferMapped
// and robust resource initialization.
resourceDescriptor.Flags =
D3D12ResourceFlags(GetUsage() | dawn::BufferUsageBit::TransferDst);
auto heapType = D3D12HeapType(GetUsage()); auto heapType = D3D12HeapType(GetUsage());
auto bufferUsage = D3D12_RESOURCE_STATE_COMMON; auto bufferUsage = D3D12_RESOURCE_STATE_COMMON;
@ -160,6 +163,11 @@ namespace dawn_native { namespace d3d12 {
} }
} }
bool Buffer::IsMapWritable() const {
// TODO(enga): Handle CPU-visible memory on UMA
return (GetUsage() & (dawn::BufferUsageBit::MapRead | dawn::BufferUsageBit::MapWrite)) != 0;
}
MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) { MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
mWrittenMappedRange = {0, GetSize()}; mWrittenMappedRange = {0, GetSize()};
ASSERT_SUCCESS( ASSERT_SUCCESS(

View File

@ -44,6 +44,7 @@ namespace dawn_native { namespace d3d12 {
void UnmapImpl() override; void UnmapImpl() override;
void DestroyImpl() override; void DestroyImpl() override;
bool IsMapWritable() const override;
virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override; virtual MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
ComPtr<ID3D12Resource> mResource; ComPtr<ID3D12Resource> mResource;

View File

@ -71,6 +71,12 @@ namespace dawn_native { namespace d3d12 {
} }
Device::~Device() { Device::~Device() {
// Immediately forget about all pending commands
if (mPendingCommands.open) {
mPendingCommands.commandList->Close();
mPendingCommands.open = false;
mPendingCommands.commandList = nullptr;
}
NextSerial(); NextSerial();
WaitForSerial(mLastSubmittedSerial); // Wait for all in-flight commands to finish executing WaitForSerial(mLastSubmittedSerial); // Wait for all in-flight commands to finish executing
TickImpl(); // Call tick one last time so resources are cleaned up TickImpl(); // Call tick one last time so resources are cleaned up

View File

@ -40,6 +40,7 @@ namespace dawn_native { namespace metal {
void UnmapImpl() override; void UnmapImpl() override;
void DestroyImpl() override; void DestroyImpl() override;
bool IsMapWritable() const override;
MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override; MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
id<MTLBuffer> mMtlBuffer = nil; id<MTLBuffer> mMtlBuffer = nil;

View File

@ -47,6 +47,11 @@ namespace dawn_native { namespace metal {
} }
} }
bool Buffer::IsMapWritable() const {
// TODO(enga): Handle CPU-visible memory on UMA
return (GetUsage() & (dawn::BufferUsageBit::MapRead | dawn::BufferUsageBit::MapWrite)) != 0;
}
MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) { MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
*mappedPointer = reinterpret_cast<uint8_t*>([mMtlBuffer contents]); *mappedPointer = reinterpret_cast<uint8_t*>([mMtlBuffer contents]);
return {}; return {};

View File

@ -56,6 +56,18 @@ namespace dawn_native { namespace null {
return new Backend(instance); return new Backend(instance);
} }
struct CopyFromStagingToBufferOperation : PendingOperation {
virtual void Execute() {
destination->CopyFromStaging(staging, sourceOffset, destinationOffset, size);
}
StagingBufferBase* staging;
Buffer* destination;
uint64_t sourceOffset;
uint64_t destinationOffset;
uint64_t size;
};
// Device // Device
Device::Device(Adapter* adapter, const DeviceDescriptor* descriptor) Device::Device(Adapter* adapter, const DeviceDescriptor* descriptor)
@ -69,8 +81,7 @@ namespace dawn_native { namespace null {
Device::~Device() { Device::~Device() {
mDynamicUploader = nullptr; mDynamicUploader = nullptr;
// Ensure any in-flight maps have been cleaned up. mPendingOperations.clear();
SubmitPendingOperations();
ASSERT(mMemoryUsage == 0); ASSERT(mMemoryUsage == 0);
} }
@ -140,7 +151,16 @@ namespace dawn_native { namespace null {
BufferBase* destination, BufferBase* destination,
uint64_t destinationOffset, uint64_t destinationOffset,
uint64_t size) { uint64_t size) {
return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer."); auto operation = std::make_unique<CopyFromStagingToBufferOperation>();
operation->staging = source;
operation->destination = reinterpret_cast<Buffer*>(destination);
operation->sourceOffset = sourceOffset;
operation->destinationOffset = destinationOffset;
operation->size = size;
ToBackend(GetDevice())->AddPendingOperation(std::move(operation));
return {};
} }
MaybeError Device::IncrementMemoryUsage(size_t bytes) { MaybeError Device::IncrementMemoryUsage(size_t bytes) {
@ -201,8 +221,8 @@ namespace dawn_native { namespace null {
Buffer::Buffer(Device* device, const BufferDescriptor* descriptor) Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
: BufferBase(device, descriptor) { : BufferBase(device, descriptor) {
if (GetUsage() & (dawn::BufferUsageBit::TransferDst | dawn::BufferUsageBit::MapRead | if (GetUsage() & (dawn::BufferUsageBit::TransferDst | dawn::BufferUsageBit::TransferSrc |
dawn::BufferUsageBit::MapWrite)) { dawn::BufferUsageBit::MapRead | dawn::BufferUsageBit::MapWrite)) {
mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]); mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]);
} }
} }
@ -212,6 +232,12 @@ namespace dawn_native { namespace null {
ToBackend(GetDevice())->DecrementMemoryUsage(GetSize()); ToBackend(GetDevice())->DecrementMemoryUsage(GetSize());
} }
bool Buffer::IsMapWritable() const {
// Only return true for mappable buffers so we can test cases that need / don't need a
// staging buffer.
return (GetUsage() & (dawn::BufferUsageBit::MapRead | dawn::BufferUsageBit::MapWrite)) != 0;
}
MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) { MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
*mappedPointer = mBackingData.get(); *mappedPointer = mBackingData.get();
return {}; return {};
@ -225,6 +251,14 @@ namespace dawn_native { namespace null {
} }
} }
void Buffer::CopyFromStaging(StagingBufferBase* staging,
uint64_t sourceOffset,
uint64_t destinationOffset,
uint64_t size) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(staging->GetMappedPointer());
memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
}
MaybeError Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const uint8_t* data) { MaybeError Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const uint8_t* data) {
ASSERT(start + count <= GetSize()); ASSERT(start + count <= GetSize());
ASSERT(mBackingData); ASSERT(mBackingData);
@ -243,13 +277,13 @@ namespace dawn_native { namespace null {
void Buffer::MapAsyncImplCommon(uint32_t serial, bool isWrite) { void Buffer::MapAsyncImplCommon(uint32_t serial, bool isWrite) {
ASSERT(mBackingData); ASSERT(mBackingData);
auto operation = new BufferMapReadOperation; auto operation = std::make_unique<BufferMapReadOperation>();
operation->buffer = this; operation->buffer = this;
operation->ptr = mBackingData.get(); operation->ptr = mBackingData.get();
operation->serial = serial; operation->serial = serial;
operation->isWrite = isWrite; operation->isWrite = isWrite;
ToBackend(GetDevice())->AddPendingOperation(std::unique_ptr<PendingOperation>(operation)); ToBackend(GetDevice())->AddPendingOperation(std::move(operation));
} }
void Buffer::UnmapImpl() { void Buffer::UnmapImpl() {
@ -324,10 +358,18 @@ namespace dawn_native { namespace null {
// StagingBuffer // StagingBuffer
StagingBuffer::StagingBuffer(size_t size, Device* device) : StagingBufferBase(size) { StagingBuffer::StagingBuffer(size_t size, Device* device)
: StagingBufferBase(size), mDevice(device) {
}
StagingBuffer::~StagingBuffer() {
if (mBuffer) {
mDevice->DecrementMemoryUsage(GetSize());
}
} }
MaybeError StagingBuffer::Initialize() { MaybeError StagingBuffer::Initialize() {
DAWN_TRY(mDevice->IncrementMemoryUsage(GetSize()));
mBuffer = std::make_unique<uint8_t[]>(GetSize()); mBuffer = std::make_unique<uint8_t[]>(GetSize());
mMappedPointer = mBuffer.get(); mMappedPointer = mBuffer.get();
return {}; return {};

View File

@ -142,6 +142,10 @@ namespace dawn_native { namespace null {
~Buffer(); ~Buffer();
void MapReadOperationCompleted(uint32_t serial, void* ptr, bool isWrite); void MapReadOperationCompleted(uint32_t serial, void* ptr, bool isWrite);
void CopyFromStaging(StagingBufferBase* staging,
uint64_t sourceOffset,
uint64_t destinationOffset,
uint64_t size);
private: private:
// Dawn API // Dawn API
@ -151,6 +155,7 @@ namespace dawn_native { namespace null {
void UnmapImpl() override; void UnmapImpl() override;
void DestroyImpl() override; void DestroyImpl() override;
bool IsMapWritable() const override;
MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override; MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
void MapAsyncImplCommon(uint32_t serial, bool isWrite); void MapAsyncImplCommon(uint32_t serial, bool isWrite);
@ -201,9 +206,11 @@ namespace dawn_native { namespace null {
class StagingBuffer : public StagingBufferBase { class StagingBuffer : public StagingBufferBase {
public: public:
StagingBuffer(size_t size, Device* device); StagingBuffer(size_t size, Device* device);
~StagingBuffer() override;
MaybeError Initialize() override; MaybeError Initialize() override;
private: private:
Device* mDevice;
std::unique_ptr<uint8_t[]> mBuffer; std::unique_ptr<uint8_t[]> mBuffer;
}; };

View File

@ -35,6 +35,12 @@ namespace dawn_native { namespace opengl {
return mBuffer; return mBuffer;
} }
bool Buffer::IsMapWritable() const {
// TODO(enga): All buffers in GL can be mapped. Investigate if mapping them will cause the
// driver to migrate it to shared memory.
return true;
}
MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) { MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
glBindBuffer(GL_ARRAY_BUFFER, mBuffer); glBindBuffer(GL_ARRAY_BUFFER, mBuffer);
void* data = glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY); void* data = glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);

View File

@ -38,6 +38,7 @@ namespace dawn_native { namespace opengl {
void UnmapImpl() override; void UnmapImpl() override;
void DestroyImpl() override; void DestroyImpl() override;
bool IsMapWritable() const override;
MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override; MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
GLuint mBuffer = 0; GLuint mBuffer = 0;

View File

@ -109,7 +109,9 @@ namespace dawn_native { namespace vulkan {
createInfo.pNext = nullptr; createInfo.pNext = nullptr;
createInfo.flags = 0; createInfo.flags = 0;
createInfo.size = GetSize(); createInfo.size = GetSize();
createInfo.usage = VulkanBufferUsage(GetUsage()); // Add TransferDst for non-mappable buffer initialization in CreateBufferMapped
// and robust resource initialization.
createInfo.usage = VulkanBufferUsage(GetUsage() | dawn::BufferUsageBit::TransferDst);
createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
createInfo.queueFamilyIndexCount = 0; createInfo.queueFamilyIndexCount = 0;
createInfo.pQueueFamilyIndices = 0; createInfo.pQueueFamilyIndices = 0;
@ -188,6 +190,11 @@ namespace dawn_native { namespace vulkan {
mLastUsage = usage; mLastUsage = usage;
} }
bool Buffer::IsMapWritable() const {
// TODO(enga): Handle CPU-visible memory on UMA
return mMemoryAllocation.GetMappedPointer() != nullptr;
}
MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) { MaybeError Buffer::MapAtCreationImpl(uint8_t** mappedPointer) {
*mappedPointer = mMemoryAllocation.GetMappedPointer(); *mappedPointer = mMemoryAllocation.GetMappedPointer();
return {}; return {};

View File

@ -47,6 +47,7 @@ namespace dawn_native { namespace vulkan {
void UnmapImpl() override; void UnmapImpl() override;
void DestroyImpl() override; void DestroyImpl() override;
bool IsMapWritable() const override;
MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override; MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override;
VkBuffer mHandle = VK_NULL_HANDLE; VkBuffer mHandle = VK_NULL_HANDLE;

View File

@ -226,10 +226,34 @@ DAWN_INSTANTIATE_TEST(BufferSetSubDataTests,
OpenGLBackend, OpenGLBackend,
VulkanBackend); VulkanBackend);
class CreateBufferMappedTests : public DawnTest {}; class CreateBufferMappedTests : public DawnTest {
protected:
static void MapReadCallback(DawnBufferMapAsyncStatus status,
const void* data,
uint64_t,
void* userdata) {
ASSERT_EQ(DAWN_BUFFER_MAP_ASYNC_STATUS_SUCCESS, status);
ASSERT_NE(nullptr, data);
// Test that the simplest CreateBufferMapped works. static_cast<CreateBufferMappedTests*>(userdata)->mappedData = data;
TEST_P(CreateBufferMappedTests, SmallSyncWrite) { }
const void* MapReadAsyncAndWait(const dawn::Buffer& buffer) {
buffer.MapReadAsync(MapReadCallback, this);
while (mappedData == nullptr) {
WaitABit();
}
return mappedData;
}
private:
const void* mappedData = nullptr;
};
// Test that the simplest CreateBufferMapped works for MapWrite buffers.
TEST_P(CreateBufferMappedTests, MapWriteUsageSmall) {
dawn::BufferDescriptor descriptor; dawn::BufferDescriptor descriptor;
descriptor.nextInChain = nullptr; descriptor.nextInChain = nullptr;
descriptor.size = 4; descriptor.size = 4;
@ -244,8 +268,42 @@ TEST_P(CreateBufferMappedTests, SmallSyncWrite) {
EXPECT_BUFFER_U32_EQ(myData, result.buffer, 0); EXPECT_BUFFER_U32_EQ(myData, result.buffer, 0);
} }
// Test CreateBufferMapped for a large buffer // Test that the simplest CreateBufferMapped works for MapRead buffers.
TEST_P(CreateBufferMappedTests, LargeSyncWrite) { TEST_P(CreateBufferMappedTests, MapReadUsageSmall) {
dawn::BufferDescriptor descriptor;
descriptor.nextInChain = nullptr;
descriptor.size = 4;
descriptor.usage = dawn::BufferUsageBit::MapRead;
uint32_t myData = 230502;
dawn::CreateBufferMappedResult result = device.CreateBufferMapped(&descriptor);
ASSERT_EQ(result.dataLength, descriptor.size);
memcpy(result.data, &myData, sizeof(myData));
result.buffer.Unmap();
const void* mappedData = MapReadAsyncAndWait(result.buffer);
ASSERT_EQ(myData, *reinterpret_cast<const uint32_t*>(mappedData));
result.buffer.Unmap();
}
// Test that the simplest CreateBufferMapped works for non-mappable buffers.
TEST_P(CreateBufferMappedTests, NonMappableUsageSmall) {
dawn::BufferDescriptor descriptor;
descriptor.nextInChain = nullptr;
descriptor.size = 4;
descriptor.usage = dawn::BufferUsageBit::TransferSrc;
uint32_t myData = 4239;
dawn::CreateBufferMappedResult result = device.CreateBufferMapped(&descriptor);
ASSERT_EQ(result.dataLength, descriptor.size);
memcpy(result.data, &myData, sizeof(myData));
result.buffer.Unmap();
EXPECT_BUFFER_U32_EQ(myData, result.buffer, 0);
}
// Test CreateBufferMapped for a large MapWrite buffer
TEST_P(CreateBufferMappedTests, MapWriteUsageLarge) {
constexpr uint64_t kDataSize = 1000 * 1000; constexpr uint64_t kDataSize = 1000 * 1000;
std::vector<uint32_t> myData; std::vector<uint32_t> myData;
for (uint32_t i = 0; i < kDataSize; ++i) { for (uint32_t i = 0; i < kDataSize; ++i) {
@ -265,9 +323,53 @@ TEST_P(CreateBufferMappedTests, LargeSyncWrite) {
EXPECT_BUFFER_U32_RANGE_EQ(myData.data(), result.buffer, 0, kDataSize); EXPECT_BUFFER_U32_RANGE_EQ(myData.data(), result.buffer, 0, kDataSize);
} }
// Test CreateBufferMapped for a large MapRead buffer
TEST_P(CreateBufferMappedTests, MapReadUsageLarge) {
constexpr uint64_t kDataSize = 1000 * 1000;
std::vector<uint32_t> myData;
for (uint32_t i = 0; i < kDataSize; ++i) {
myData.push_back(i);
}
dawn::BufferDescriptor descriptor;
descriptor.nextInChain = nullptr;
descriptor.size = static_cast<uint64_t>(kDataSize * sizeof(uint32_t));
descriptor.usage = dawn::BufferUsageBit::MapRead;
dawn::CreateBufferMappedResult result = device.CreateBufferMapped(&descriptor);
ASSERT_EQ(result.dataLength, descriptor.size);
memcpy(result.data, myData.data(), kDataSize * sizeof(uint32_t));
result.buffer.Unmap();
const void* mappedData = MapReadAsyncAndWait(result.buffer);
ASSERT_EQ(0, memcmp(mappedData, myData.data(), kDataSize * sizeof(uint32_t)));
result.buffer.Unmap();
}
// Test CreateBufferMapped for a large non-mappable buffer
TEST_P(CreateBufferMappedTests, NonMappableUsageLarge) {
constexpr uint64_t kDataSize = 1000 * 1000;
std::vector<uint32_t> myData;
for (uint32_t i = 0; i < kDataSize; ++i) {
myData.push_back(i);
}
dawn::BufferDescriptor descriptor;
descriptor.nextInChain = nullptr;
descriptor.size = static_cast<uint64_t>(kDataSize * sizeof(uint32_t));
descriptor.usage = dawn::BufferUsageBit::TransferSrc;
dawn::CreateBufferMappedResult result = device.CreateBufferMapped(&descriptor);
ASSERT_EQ(result.dataLength, descriptor.size);
memcpy(result.data, myData.data(), kDataSize * sizeof(uint32_t));
result.buffer.Unmap();
EXPECT_BUFFER_U32_RANGE_EQ(myData.data(), result.buffer, 0, kDataSize);
}
// Test that CreateBufferMapped returns zero-initialized data // Test that CreateBufferMapped returns zero-initialized data
// TODO(enga): This should use the testing toggle to initialize resources to 1. // TODO(enga): This should use the testing toggle to initialize resources to 1.
TEST_P(CreateBufferMappedTests, ZeroInitialized) { TEST_P(CreateBufferMappedTests, MappableZeroInitialized) {
dawn::BufferDescriptor descriptor; dawn::BufferDescriptor descriptor;
descriptor.nextInChain = nullptr; descriptor.nextInChain = nullptr;
descriptor.size = 4; descriptor.size = 4;
@ -279,6 +381,20 @@ TEST_P(CreateBufferMappedTests, ZeroInitialized) {
result.buffer.Unmap(); result.buffer.Unmap();
} }
// Test that CreateBufferMapped returns zero-initialized data
// TODO(enga): This should use the testing toggle to initialize resources to 1.
TEST_P(CreateBufferMappedTests, NonMappableZeroInitialized) {
dawn::BufferDescriptor descriptor;
descriptor.nextInChain = nullptr;
descriptor.size = 4;
descriptor.usage = dawn::BufferUsageBit::TransferSrc;
dawn::CreateBufferMappedResult result = device.CreateBufferMapped(&descriptor);
ASSERT_EQ(result.dataLength, descriptor.size);
ASSERT_EQ(*result.data, 0);
result.buffer.Unmap();
}
// Test that mapping a buffer is valid after CreateBufferMapped and Unmap // Test that mapping a buffer is valid after CreateBufferMapped and Unmap
TEST_P(CreateBufferMappedTests, CreateThenMapSuccess) { TEST_P(CreateBufferMappedTests, CreateThenMapSuccess) {
dawn::BufferDescriptor descriptor; dawn::BufferDescriptor descriptor;

View File

@ -198,6 +198,15 @@ TEST_F(BufferValidationTest, CreateBufferMappedSuccess) {
result.buffer.Unmap(); result.buffer.Unmap();
} }
// Test the success case for non-mappable CreateBufferMapped
TEST_F(BufferValidationTest, NonMappableCreateBufferMappedSuccess) {
dawn::CreateBufferMappedResult result =
CreateBufferMapped(4, dawn::BufferUsageBit::TransferSrc);
ASSERT_NE(result.data, nullptr);
ASSERT_EQ(result.dataLength, 4u);
result.buffer.Unmap();
}
// Test map reading a buffer with wrong current usage // Test map reading a buffer with wrong current usage
TEST_F(BufferValidationTest, MapReadWrongUsage) { TEST_F(BufferValidationTest, MapReadWrongUsage) {
dawn::BufferDescriptor descriptor; dawn::BufferDescriptor descriptor;
@ -586,7 +595,7 @@ TEST_F(BufferValidationTest, SetSubDataDestroyedBuffer) {
} }
// Test that is is invalid to Map a mapped buffer // Test that is is invalid to Map a mapped buffer
TEST_F(BufferValidationTest, MapMappedbuffer) { TEST_F(BufferValidationTest, MapMappedBuffer) {
{ {
dawn::Buffer buf = CreateMapReadBuffer(4); dawn::Buffer buf = CreateMapReadBuffer(4);
buf.MapReadAsync(ToMockBufferMapReadCallback, nullptr); buf.MapReadAsync(ToMockBufferMapReadCallback, nullptr);
@ -601,6 +610,20 @@ TEST_F(BufferValidationTest, MapMappedbuffer) {
} }
} }
// Test that is is invalid to Map a CreateBufferMapped buffer
TEST_F(BufferValidationTest, MapCreateBufferMappedBuffer) {
{
dawn::Buffer buf = CreateBufferMapped(4, dawn::BufferUsageBit::MapRead).buffer;
ASSERT_DEVICE_ERROR(buf.MapReadAsync(ToMockBufferMapReadCallback, nullptr));
queue.Submit(0, nullptr);
}
{
dawn::Buffer buf = CreateBufferMapped(4, dawn::BufferUsageBit::MapWrite).buffer;
ASSERT_DEVICE_ERROR(buf.MapWriteAsync(ToMockBufferMapWriteCallback, nullptr));
queue.Submit(0, nullptr);
}
}
// Test that it is invalid to call SetSubData on a mapped buffer // Test that it is invalid to call SetSubData on a mapped buffer
TEST_F(BufferValidationTest, SetSubDataMappedBuffer) { TEST_F(BufferValidationTest, SetSubDataMappedBuffer) {
{ {
@ -665,6 +688,26 @@ TEST_F(BufferValidationTest, SubmitMappedBuffer) {
bufB.MapReadAsync(ToMockBufferMapReadCallback, nullptr); bufB.MapReadAsync(ToMockBufferMapReadCallback, nullptr);
dawn::CommandEncoder encoder = device.CreateCommandEncoder();
encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
dawn::CommandBuffer commands = encoder.Finish();
ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
queue.Submit(0, nullptr);
}
{
dawn::Buffer bufA = device.CreateBufferMapped(&descriptorA).buffer;
dawn::Buffer bufB = device.CreateBuffer(&descriptorB);
dawn::CommandEncoder encoder = device.CreateCommandEncoder();
encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
dawn::CommandBuffer commands = encoder.Finish();
ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
queue.Submit(0, nullptr);
}
{
dawn::Buffer bufA = device.CreateBuffer(&descriptorA);
dawn::Buffer bufB = device.CreateBufferMapped(&descriptorB).buffer;
dawn::CommandEncoder encoder = device.CreateCommandEncoder(); dawn::CommandEncoder encoder = device.CreateCommandEncoder();
encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4); encoder.CopyBufferToBuffer(bufA, 0, bufB, 0, 4);
dawn::CommandBuffer commands = encoder.Finish(); dawn::CommandBuffer commands = encoder.Finish();