Remove device dependencies from ringbuffer.

Allows ringbuffer sub-allocator to be used for non-staging memory.

BUG=dawn:155

Change-Id: Id0021907f520909aaebaf79e992124a47797d38d
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/9760
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Reviewed-by: Austin Eng <enga@chromium.org>
Commit-Queue: Bryan Bernhart <bryan.bernhart@intel.com>
This commit is contained in:
Bryan Bernhart
2019-09-18 22:06:41 +00:00
committed by Commit Bot service account
parent b11bd2dfe5
commit 450e212cf5
17 changed files with 290 additions and 378 deletions

View File

@@ -18,15 +18,9 @@
namespace dawn_native {
DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) {
}
ResultOrError<std::unique_ptr<StagingBufferBase>> DynamicUploader::CreateStagingBuffer(
size_t size) {
std::unique_ptr<StagingBufferBase> stagingBuffer;
DAWN_TRY_ASSIGN(stagingBuffer, mDevice->CreateStagingBuffer(size));
DAWN_TRY(stagingBuffer->Initialize());
return stagingBuffer;
DynamicUploader::DynamicUploader(DeviceBase* device, size_t size) : mDevice(device) {
mRingBuffers.emplace_back(
std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, RingBufferAllocator(size)}));
}
void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) {
@@ -34,73 +28,78 @@ namespace dawn_native {
mDevice->GetPendingCommandSerial());
}
MaybeError DynamicUploader::CreateAndAppendBuffer(size_t size) {
std::unique_ptr<RingBuffer> ringBuffer = std::make_unique<RingBuffer>(mDevice, size);
DAWN_TRY(ringBuffer->Initialize());
mRingBuffers.emplace_back(std::move(ringBuffer));
return {};
}
ResultOrError<UploadHandle> DynamicUploader::Allocate(uint32_t size) {
ResultOrError<UploadHandle> DynamicUploader::Allocate(size_t allocationSize, Serial serial) {
// Note: Validation ensures size is already aligned.
// First-fit: find next smallest buffer large enough to satisfy the allocation request.
RingBuffer* targetRingBuffer = GetLargestBuffer();
RingBuffer* targetRingBuffer = mRingBuffers.back().get();
for (auto& ringBuffer : mRingBuffers) {
const RingBufferAllocator& ringBufferAllocator = ringBuffer->mAllocator;
// Prevent overflow.
ASSERT(ringBuffer->GetSize() >= ringBuffer->GetUsedSize());
const size_t remainingSize = ringBuffer->GetSize() - ringBuffer->GetUsedSize();
if (size <= remainingSize) {
ASSERT(ringBufferAllocator.GetSize() >= ringBufferAllocator.GetUsedSize());
const size_t remainingSize =
ringBufferAllocator.GetSize() - ringBufferAllocator.GetUsedSize();
if (allocationSize <= remainingSize) {
targetRingBuffer = ringBuffer.get();
break;
}
}
UploadHandle uploadHandle = UploadHandle{};
size_t startOffset = kInvalidOffset;
if (targetRingBuffer != nullptr) {
uploadHandle = targetRingBuffer->SubAllocate(size);
startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
}
// Upon failure, append a newly created (and much larger) ring buffer to fulfill the
// request.
if (uploadHandle.mappedBuffer == nullptr) {
if (startOffset == kInvalidOffset) {
// Compute the new max size (in powers of two to preserve alignment).
size_t newMaxSize = targetRingBuffer->GetSize() * 2;
while (newMaxSize < size) {
size_t newMaxSize = targetRingBuffer->mAllocator.GetSize() * 2;
while (newMaxSize < allocationSize) {
newMaxSize *= 2;
}
// TODO(bryan.bernhart@intel.com): Fall-back to no sub-allocations should this fail.
DAWN_TRY(CreateAndAppendBuffer(newMaxSize));
targetRingBuffer = GetLargestBuffer();
uploadHandle = targetRingBuffer->SubAllocate(size);
mRingBuffers.emplace_back(std::unique_ptr<RingBuffer>(
new RingBuffer{nullptr, RingBufferAllocator(newMaxSize)}));
targetRingBuffer = mRingBuffers.back().get();
startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
}
uploadHandle.stagingBuffer = targetRingBuffer->GetStagingBuffer();
ASSERT(startOffset != kInvalidOffset);
// Allocate the staging buffer backing the ringbuffer.
// Note: the first ringbuffer will be lazily created.
if (targetRingBuffer->mStagingBuffer == nullptr) {
std::unique_ptr<StagingBufferBase> stagingBuffer;
DAWN_TRY_ASSIGN(stagingBuffer,
mDevice->CreateStagingBuffer(targetRingBuffer->mAllocator.GetSize()));
targetRingBuffer->mStagingBuffer = std::move(stagingBuffer);
}
ASSERT(targetRingBuffer->mStagingBuffer != nullptr);
UploadHandle uploadHandle;
uploadHandle.stagingBuffer = targetRingBuffer->mStagingBuffer.get();
uploadHandle.mappedBuffer =
static_cast<uint8_t*>(uploadHandle.stagingBuffer->GetMappedPointer()) + startOffset;
uploadHandle.startOffset = startOffset;
return uploadHandle;
}
void DynamicUploader::Tick(Serial lastCompletedSerial) {
void DynamicUploader::Deallocate(Serial lastCompletedSerial) {
// Reclaim memory within the ring buffers by ticking (or removing requests no longer
// in-flight).
for (size_t i = 0; i < mRingBuffers.size(); ++i) {
mRingBuffers[i]->Tick(lastCompletedSerial);
mRingBuffers[i]->mAllocator.Deallocate(lastCompletedSerial);
// Never erase the last buffer as to prevent re-creating smaller buffers
// again. The last buffer is the largest.
if (mRingBuffers[i]->Empty() && i < mRingBuffers.size() - 1) {
if (mRingBuffers[i]->mAllocator.Empty() && i < mRingBuffers.size() - 1) {
mRingBuffers.erase(mRingBuffers.begin() + i);
}
}
mReleasedStagingBuffers.ClearUpTo(lastCompletedSerial);
}
RingBuffer* DynamicUploader::GetLargestBuffer() {
ASSERT(!mRingBuffers.empty());
return mRingBuffers.back().get();
}
bool DynamicUploader::IsEmpty() const {
return mRingBuffers.empty();
}
} // namespace dawn_native