Corentin Wallez f7fab5a259 dawn_native: Use correct integer width for SetSubData.
SetSubData's count is a uint64_t while Queue::WriteBuffer's is a size_t
so we add a small validation check that no narrowing will occur.

No test is added because SetSubData will be removed soon, and it will be
hard to test because of the Null backend's artificial OOM.

This was found while debugging an unrelated fuzzer issue.

Bug: chromium:1099621
Change-Id: I27a9da2b94f51e889c5573f88d4a0a73fea5985c
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/23961
Reviewed-by: Stephen White <senorblanco@chromium.org>
Reviewed-by: Austin Eng <enga@chromium.org>
Commit-Queue: Corentin Wallez <cwallez@chromium.org>
2020-06-30 18:46:32 +00:00

501 lines
18 KiB
C++

// Copyright 2017 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dawn_native/Buffer.h"
#include "common/Assert.h"
#include "dawn_native/Device.h"
#include "dawn_native/DynamicUploader.h"
#include "dawn_native/ErrorData.h"
#include "dawn_native/MapRequestTracker.h"
#include "dawn_native/Queue.h"
#include "dawn_native/ValidationUtils_autogen.h"
#include <cstdio>
#include <cstring>
#include <utility>
namespace dawn_native {
namespace {
class ErrorBuffer final : public BufferBase {
public:
ErrorBuffer(DeviceBase* device) : BufferBase(device, ObjectBase::kError) {
}
static ErrorBuffer* MakeMapped(DeviceBase* device,
uint64_t size,
uint8_t** mappedPointer) {
ASSERT(mappedPointer != nullptr);
ErrorBuffer* buffer = new ErrorBuffer(device);
buffer->mFakeMappedData =
std::unique_ptr<uint8_t[]>(new (std::nothrow) uint8_t[size]);
*mappedPointer = buffer->mFakeMappedData.get();
return buffer;
}
void ClearMappedData() {
mFakeMappedData.reset();
}
private:
bool IsMapWritable() const override {
UNREACHABLE();
return false;
}
MaybeError MapAtCreationImpl(uint8_t** mappedPointer) override {
UNREACHABLE();
return {};
}
MaybeError MapReadAsyncImpl(uint32_t serial) override {
UNREACHABLE();
return {};
}
MaybeError MapWriteAsyncImpl(uint32_t serial) override {
UNREACHABLE();
return {};
}
void* GetMappedPointerImpl() override {
return mFakeMappedData.get();
}
void UnmapImpl() override {
UNREACHABLE();
}
void DestroyImpl() override {
UNREACHABLE();
}
std::unique_ptr<uint8_t[]> mFakeMappedData;
};
} // anonymous namespace
MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
if (descriptor->nextInChain != nullptr) {
return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
}
DAWN_TRY(ValidateBufferUsage(descriptor->usage));
wgpu::BufferUsage usage = descriptor->usage;
const wgpu::BufferUsage kMapWriteAllowedUsages =
wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
if (usage & wgpu::BufferUsage::MapWrite && (usage & kMapWriteAllowedUsages) != usage) {
return DAWN_VALIDATION_ERROR("Only CopySrc is allowed with MapWrite");
}
const wgpu::BufferUsage kMapReadAllowedUsages =
wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
if (usage & wgpu::BufferUsage::MapRead && (usage & kMapReadAllowedUsages) != usage) {
return DAWN_VALIDATION_ERROR("Only CopyDst is allowed with MapRead");
}
return {};
}
// Buffer
BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
: ObjectBase(device),
mSize(descriptor->size),
mUsage(descriptor->usage),
mState(BufferState::Unmapped) {
// Add readonly storage usage if the buffer has a storage usage. The validation rules in
// ValidatePassResourceUsage will make sure we don't use both at the same
// time.
if (mUsage & wgpu::BufferUsage::Storage) {
mUsage |= kReadOnlyStorageBuffer;
}
}
BufferBase::BufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
: ObjectBase(device, tag), mState(BufferState::Unmapped) {
}
BufferBase::~BufferBase() {
if (mState == BufferState::Mapped) {
ASSERT(!IsError());
CallMapReadCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
CallMapWriteCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
}
}
// static
BufferBase* BufferBase::MakeError(DeviceBase* device) {
return new ErrorBuffer(device);
}
// static
BufferBase* BufferBase::MakeErrorMapped(DeviceBase* device,
uint64_t size,
uint8_t** mappedPointer) {
return ErrorBuffer::MakeMapped(device, size, mappedPointer);
}
uint64_t BufferBase::GetSize() const {
ASSERT(!IsError());
return mSize;
}
wgpu::BufferUsage BufferBase::GetUsage() const {
ASSERT(!IsError());
return mUsage;
}
MaybeError BufferBase::MapAtCreation(uint8_t** mappedPointer) {
ASSERT(!IsError());
ASSERT(mappedPointer != nullptr);
mState = BufferState::MappedAtCreation;
// 0-sized buffers are not supposed to be written to, Return back any non-null pointer.
// Handle 0-sized buffers first so we don't try to map them in the backend.
if (mSize == 0) {
*mappedPointer = reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
return {};
}
// Mappable buffers don't use a staging buffer and are just as if mapped through MapAsync.
if (IsMapWritable()) {
DAWN_TRY(MapAtCreationImpl(mappedPointer));
ASSERT(*mappedPointer != nullptr);
return {};
}
// If any of these fail, the buffer will be deleted and replaced with an
// error buffer.
// TODO(enga): Suballocate and reuse memory from a larger staging buffer so we don't create
// many small buffers.
DAWN_TRY_ASSIGN(mStagingBuffer, GetDevice()->CreateStagingBuffer(GetSize()));
ASSERT(mStagingBuffer->GetMappedPointer() != nullptr);
*mappedPointer = reinterpret_cast<uint8_t*>(mStagingBuffer->GetMappedPointer());
return {};
}
MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
ASSERT(!IsError());
switch (mState) {
case BufferState::Destroyed:
return DAWN_VALIDATION_ERROR("Destroyed buffer used in a submit");
case BufferState::Mapped:
case BufferState::MappedAtCreation:
return DAWN_VALIDATION_ERROR("Buffer used in a submit while mapped");
case BufferState::Unmapped:
return {};
default:
UNREACHABLE();
}
}
void BufferBase::CallMapReadCallback(uint32_t serial,
WGPUBufferMapAsyncStatus status,
const void* pointer,
uint64_t dataLength) {
ASSERT(!IsError());
if (mMapReadCallback != nullptr && serial == mMapSerial) {
ASSERT(mMapWriteCallback == nullptr);
// Tag the callback as fired before firing it, otherwise it could fire a second time if
// for example buffer.Unmap() is called inside the application-provided callback.
WGPUBufferMapReadCallback callback = mMapReadCallback;
mMapReadCallback = nullptr;
if (GetDevice()->IsLost()) {
callback(WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0, mMapUserdata);
} else {
callback(status, pointer, dataLength, mMapUserdata);
}
}
}
void BufferBase::CallMapWriteCallback(uint32_t serial,
WGPUBufferMapAsyncStatus status,
void* pointer,
uint64_t dataLength) {
ASSERT(!IsError());
if (mMapWriteCallback != nullptr && serial == mMapSerial) {
ASSERT(mMapReadCallback == nullptr);
// Tag the callback as fired before firing it, otherwise it could fire a second time if
// for example buffer.Unmap() is called inside the application-provided callback.
WGPUBufferMapWriteCallback callback = mMapWriteCallback;
mMapWriteCallback = nullptr;
if (GetDevice()->IsLost()) {
callback(WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0, mMapUserdata);
} else {
callback(status, pointer, dataLength, mMapUserdata);
}
}
}
void BufferBase::SetSubData(uint64_t start, uint64_t count, const void* data) {
if (count > uint64_t(std::numeric_limits<size_t>::max())) {
GetDevice()->HandleError(InternalErrorType::Validation, "count too big");
}
Ref<QueueBase> queue = AcquireRef(GetDevice()->GetDefaultQueue());
GetDevice()->EmitDeprecationWarning(
"Buffer::SetSubData is deprecated, use Queue::WriteBuffer instead");
queue->WriteBuffer(this, start, data, static_cast<size_t>(count));
}
void BufferBase::MapReadAsync(WGPUBufferMapReadCallback callback, void* userdata) {
WGPUBufferMapAsyncStatus status;
if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapRead, &status))) {
callback(status, nullptr, 0, userdata);
return;
}
ASSERT(!IsError());
ASSERT(mMapWriteCallback == nullptr);
// TODO(cwallez@chromium.org): what to do on wraparound? Could cause crashes.
mMapSerial++;
mMapReadCallback = callback;
mMapUserdata = userdata;
mState = BufferState::Mapped;
if (GetDevice()->ConsumedError(MapReadAsyncImpl(mMapSerial))) {
CallMapReadCallback(mMapSerial, WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0);
return;
}
MapRequestTracker* tracker = GetDevice()->GetMapRequestTracker();
tracker->Track(this, mMapSerial, false);
}
void BufferBase::MapWriteAsync(WGPUBufferMapWriteCallback callback, void* userdata) {
WGPUBufferMapAsyncStatus status;
if (GetDevice()->ConsumedError(ValidateMap(wgpu::BufferUsage::MapWrite, &status))) {
callback(status, nullptr, 0, userdata);
return;
}
ASSERT(!IsError());
ASSERT(mMapReadCallback == nullptr);
// TODO(cwallez@chromium.org): what to do on wraparound? Could cause crashes.
mMapSerial++;
mMapWriteCallback = callback;
mMapUserdata = userdata;
mState = BufferState::Mapped;
if (GetDevice()->ConsumedError(MapWriteAsyncImpl(mMapSerial))) {
CallMapWriteCallback(mMapSerial, WGPUBufferMapAsyncStatus_DeviceLost, nullptr, 0);
return;
}
MapRequestTracker* tracker = GetDevice()->GetMapRequestTracker();
tracker->Track(this, mMapSerial, true);
}
void* BufferBase::GetMappedRange() {
if (GetDevice()->ConsumedError(ValidateGetMappedRange(true))) {
return nullptr;
}
if (mStagingBuffer != nullptr) {
return mStagingBuffer->GetMappedPointer();
}
return GetMappedPointerImpl();
}
const void* BufferBase::GetConstMappedRange() {
if (GetDevice()->ConsumedError(ValidateGetMappedRange(false))) {
return nullptr;
}
if (mStagingBuffer != nullptr) {
return mStagingBuffer->GetMappedPointer();
}
return GetMappedPointerImpl();
}
void BufferBase::Destroy() {
if (IsError()) {
// It is an error to call Destroy() on an ErrorBuffer, but we still need to reclaim the
// fake mapped staging data.
reinterpret_cast<ErrorBuffer*>(this)->ClearMappedData();
}
if (GetDevice()->ConsumedError(ValidateDestroy())) {
return;
}
ASSERT(!IsError());
if (mState == BufferState::Mapped) {
Unmap();
} else if (mState == BufferState::MappedAtCreation) {
if (mStagingBuffer != nullptr) {
mStagingBuffer.reset();
} else if (mSize != 0) {
ASSERT(IsMapWritable());
Unmap();
}
}
DestroyInternal();
}
MaybeError BufferBase::CopyFromStagingBuffer() {
ASSERT(mStagingBuffer);
if (GetSize() == 0) {
return {};
}
DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0, GetSize()));
DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
return {};
}
void BufferBase::Unmap() {
if (IsError()) {
// It is an error to call Unmap() on an ErrorBuffer, but we still need to reclaim the
// fake mapped staging data.
reinterpret_cast<ErrorBuffer*>(this)->ClearMappedData();
}
if (GetDevice()->ConsumedError(ValidateUnmap())) {
return;
}
ASSERT(!IsError());
if (mState == BufferState::Mapped) {
// A map request can only be called once, so this will fire only if the request wasn't
// completed before the Unmap.
// Callbacks are not fired if there is no callback registered, so this is correct for
// CreateBufferMapped.
CallMapReadCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
CallMapWriteCallback(mMapSerial, WGPUBufferMapAsyncStatus_Unknown, nullptr, 0u);
UnmapImpl();
mMapReadCallback = nullptr;
mMapWriteCallback = nullptr;
mMapUserdata = 0;
} else if (mState == BufferState::MappedAtCreation) {
if (mStagingBuffer != nullptr) {
GetDevice()->ConsumedError(CopyFromStagingBuffer());
} else if (mSize != 0) {
ASSERT(IsMapWritable());
UnmapImpl();
}
}
mState = BufferState::Unmapped;
}
MaybeError BufferBase::ValidateMap(wgpu::BufferUsage requiredUsage,
WGPUBufferMapAsyncStatus* status) const {
*status = WGPUBufferMapAsyncStatus_DeviceLost;
DAWN_TRY(GetDevice()->ValidateIsAlive());
*status = WGPUBufferMapAsyncStatus_Error;
DAWN_TRY(GetDevice()->ValidateObject(this));
switch (mState) {
case BufferState::Mapped:
case BufferState::MappedAtCreation:
return DAWN_VALIDATION_ERROR("Buffer already mapped");
case BufferState::Destroyed:
return DAWN_VALIDATION_ERROR("Buffer is destroyed");
case BufferState::Unmapped:
break;
}
if (!(mUsage & requiredUsage)) {
return DAWN_VALIDATION_ERROR("Buffer needs the correct map usage bit");
}
*status = WGPUBufferMapAsyncStatus_Success;
return {};
}
MaybeError BufferBase::ValidateGetMappedRange(bool writable) const {
DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
switch (mState) {
// Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
case BufferState::MappedAtCreation:
return {};
case BufferState::Mapped:
if (writable && !(mUsage & wgpu::BufferUsage::MapWrite)) {
return DAWN_VALIDATION_ERROR("GetMappedRange requires the MapWrite usage");
}
return {};
case BufferState::Unmapped:
case BufferState::Destroyed:
return DAWN_VALIDATION_ERROR("Buffer is not mapped");
default:
UNREACHABLE();
}
}
MaybeError BufferBase::ValidateUnmap() const {
DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this));
switch (mState) {
case BufferState::Mapped:
case BufferState::MappedAtCreation:
// A buffer may be in the Mapped state if it was created with CreateBufferMapped
// even if it did not have a mappable usage.
return {};
case BufferState::Unmapped:
if ((mUsage & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) == 0) {
return DAWN_VALIDATION_ERROR("Buffer does not have map usage");
}
return {};
case BufferState::Destroyed:
return DAWN_VALIDATION_ERROR("Buffer is destroyed");
default:
UNREACHABLE();
}
}
MaybeError BufferBase::ValidateDestroy() const {
DAWN_TRY(GetDevice()->ValidateObject(this));
return {};
}
void BufferBase::DestroyInternal() {
if (mState != BufferState::Destroyed) {
DestroyImpl();
}
mState = BufferState::Destroyed;
}
void BufferBase::OnMapCommandSerialFinished(uint32_t mapSerial, bool isWrite) {
void* data = GetMappedPointerImpl();
if (isWrite) {
CallMapWriteCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
} else {
CallMapReadCallback(mapSerial, WGPUBufferMapAsyncStatus_Success, data, GetSize());
}
}
} // namespace dawn_native