Fix overflow in CommandAllocator

It was essentially checking "currentPtr + commandSize <= endPtr" and
commandSize could make currentPtr overflow, making the comparison
succeed when it shouldn't have. This was caught through flakiness of the
LargeCommands allocator test.

Added a test provoking an overflow in Allocate and checking nullptr is
returned.

BUG=

Change-Id: I8ae4dad5b33c9d2005027c4d45b110ee0c65dd9a
Reviewed-on: https://dawn-review.googlesource.com/c/2841
Commit-Queue: Corentin Wallez <cwallez@chromium.org>
Reviewed-by: Stephen White <senorblanco@chromium.org>
This commit is contained in:
Corentin Wallez 2019-03-01 12:04:58 +00:00 committed by Commit Bot service account
parent 0cdf9e09c4
commit 1c92c159ad
3 changed files with 93 additions and 26 deletions

View File

@ -177,34 +177,57 @@ namespace dawn_native {
// It should always be possible to allocate one id, for EndOfBlock tagging, // It should always be possible to allocate one id, for EndOfBlock tagging,
ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t))); ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr); ASSERT(mEndPtr >= mCurrentPtr);
ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
// The memory after the ID will contain the following:
// - the current ID
// - padding to align the command, maximum kMaxSupportedAlignment
// - the command of size commandSize
// - padding to align the next ID, maximum alignof(uint32_t)
// - the next ID of size sizeof(uint32_t)
//
// To avoid checking for overflows at every step of the computations we compute an upper
// bound of the space that will be needed in addition to the command data.
static constexpr size_t kWorstCaseAdditionalSize =
sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
// This can't overflow because by construction mCurrentPtr always has space for the next ID.
size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
// The good case were we have enough space for the command data and upper bound of the
// extra required space.
if ((remainingSize >= kWorstCaseAdditionalSize) &&
(remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr); uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
*idAlloc = commandId;
uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment); uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
uint8_t* nextPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t)); mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
// When there is not enough space, we signal the EndOfBlock, so that the iterator nows to return commandAlloc;
}
// When there is not enough space, we signal the EndOfBlock, so that the iterator knows to
// move to the next one. EndOfBlock on the last block means the end of the commands. // move to the next one. EndOfBlock on the last block means the end of the commands.
if (nextPtr + sizeof(uint32_t) > mEndPtr) { uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
// Even if we are not able to get another block, the list of commands will be
// well-formed and iterable as this block will be that last one.
*idAlloc = EndOfBlock; *idAlloc = EndOfBlock;
// Make sure we have space for current allocation, plus end of block and alignment // We'll request a block that can contain at least the command ID, the command and an
// padding for the first id. // additional ID to contain the EndOfBlock tag.
ASSERT(nextPtr > mCurrentPtr); size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
if (!GetNewBlock(static_cast<size_t>(nextPtr - mCurrentPtr) + sizeof(uint32_t) +
alignof(uint32_t))) { // The computation of the request could overflow.
if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
return nullptr;
}
if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
return nullptr; return nullptr;
} }
return Allocate(commandId, commandSize, commandAlignment); return Allocate(commandId, commandSize, commandAlignment);
} }
*idAlloc = commandId;
mCurrentPtr = nextPtr;
return commandAlloc;
}
uint8_t* CommandAllocator::AllocateData(size_t commandSize, size_t commandAlignment) { uint8_t* CommandAllocator::AllocateData(size_t commandSize, size_t commandAlignment) {
return Allocate(AdditionalData, commandSize, commandAlignment); return Allocate(AdditionalData, commandSize, commandAlignment);
} }
@ -215,7 +238,7 @@ namespace dawn_native {
std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384))); std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
uint8_t* block = reinterpret_cast<uint8_t*>(malloc(mLastAllocationSize)); uint8_t* block = reinterpret_cast<uint8_t*>(malloc(mLastAllocationSize));
if (block == nullptr) { if (DAWN_UNLIKELY(block == nullptr)) {
return false; return false;
} }

View File

@ -34,9 +34,8 @@ namespace dawn_native {
// //
// CommandIterator commands(allocator); // CommandIterator commands(allocator);
// CommandType type; // CommandType type;
// void* command; // while(commands.NextCommandId(&type)) {
// while(commands.NextCommandId(&e)) { // switch(type) {
// switch(e) {
// case CommandType::Draw: // case CommandType::Draw:
// DrawCommand* draw = commands.NextCommand<DrawCommand>(); // DrawCommand* draw = commands.NextCommand<DrawCommand>();
// // Do the draw // // Do the draw
@ -113,16 +112,22 @@ namespace dawn_native {
T* Allocate(E commandId) { T* Allocate(E commandId) {
static_assert(sizeof(E) == sizeof(uint32_t), ""); static_assert(sizeof(E) == sizeof(uint32_t), "");
static_assert(alignof(E) == alignof(uint32_t), ""); static_assert(alignof(E) == alignof(uint32_t), "");
static_assert(alignof(T) <= kMaxSupportedAlignment, "");
return reinterpret_cast<T*>( return reinterpret_cast<T*>(
Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T))); Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
} }
template <typename T> template <typename T>
T* AllocateData(size_t count) { T* AllocateData(size_t count) {
static_assert(alignof(T) <= kMaxSupportedAlignment, "");
return reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T))); return reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
} }
private: private:
// This is used for some internal computations and can be any power of two as long as code
// using the CommandAllocator passes the static_asserts.
static constexpr size_t kMaxSupportedAlignment = 8;
friend CommandIterator; friend CommandIterator;
CommandBlocks&& AcquireBlocks(); CommandBlocks&& AcquireBlocks();
@ -134,7 +139,7 @@ namespace dawn_native {
size_t mLastAllocationSize = 2048; size_t mLastAllocationSize = 2048;
// Pointers to the current range of allocation in the block. Guaranteed to allow for at // Pointers to the current range of allocation in the block. Guaranteed to allow for at
// least one uint32_t is not nullptr, so that the special EndOfBlock command id can always // least one uint32_t if not nullptr, so that the special EndOfBlock command id can always
// be written. Nullptr iff the blocks were moved out. // be written. Nullptr iff the blocks were moved out.
uint8_t* mCurrentPtr = nullptr; uint8_t* mCurrentPtr = nullptr;
uint8_t* mEndPtr = nullptr; uint8_t* mEndPtr = nullptr;

View File

@ -16,6 +16,8 @@
#include "dawn_native/CommandAllocator.h" #include "dawn_native/CommandAllocator.h"
#include <limits>
using namespace dawn_native; using namespace dawn_native;
// Definition of the command types used in the tests // Definition of the command types used in the tests
@ -362,3 +364,40 @@ TEST(CommandAllocator, EmptyIterator) {
iterator2.DataWasDestroyed(); iterator2.DataWasDestroyed();
} }
} }
template <size_t A>
struct alignas(A) AlignedStruct {
char dummy;
};
// Test for overflows in Allocate's computations, size 1 variant
TEST(CommandAllocator, AllocationOverflow_1) {
CommandAllocator allocator;
AlignedStruct<1>* data =
allocator.AllocateData<AlignedStruct<1>>(std::numeric_limits<size_t>::max() / 1);
ASSERT_EQ(data, nullptr);
}
// Test for overflows in Allocate's computations, size 2 variant
TEST(CommandAllocator, AllocationOverflow_2) {
CommandAllocator allocator;
AlignedStruct<2>* data =
allocator.AllocateData<AlignedStruct<2>>(std::numeric_limits<size_t>::max() / 2);
ASSERT_EQ(data, nullptr);
}
// Test for overflows in Allocate's computations, size 4 variant
TEST(CommandAllocator, AllocationOverflow_4) {
CommandAllocator allocator;
AlignedStruct<4>* data =
allocator.AllocateData<AlignedStruct<4>>(std::numeric_limits<size_t>::max() / 4);
ASSERT_EQ(data, nullptr);
}
// Test for overflows in Allocate's computations, size 8 variant
TEST(CommandAllocator, AllocationOverflow_8) {
CommandAllocator allocator;
AlignedStruct<8>* data =
allocator.AllocateData<AlignedStruct<8>>(std::numeric_limits<size_t>::max() / 8);
ASSERT_EQ(data, nullptr);
}