Add SlabAllocator and tests

Bug: dawn:340
Change-Id: I6fa1948261e8e6f91324464dade3e9954bd833e5
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/15861
Reviewed-by: Jiawei Shao <jiawei.shao@intel.com>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Commit-Queue: Austin Eng <enga@chromium.org>
This commit is contained in:
Austin Eng 2020-03-04 17:48:24 +00:00 committed by Commit Bot service account
parent 35645a601a
commit e5534c4419
8 changed files with 776 additions and 0 deletions

View File

@ -833,11 +833,13 @@ test("dawn_unittests") {
"src/tests/unittests/MathTests.cpp", "src/tests/unittests/MathTests.cpp",
"src/tests/unittests/ObjectBaseTests.cpp", "src/tests/unittests/ObjectBaseTests.cpp",
"src/tests/unittests/PerStageTests.cpp", "src/tests/unittests/PerStageTests.cpp",
"src/tests/unittests/PlacementAllocatedTests.cpp",
"src/tests/unittests/RefCountedTests.cpp", "src/tests/unittests/RefCountedTests.cpp",
"src/tests/unittests/ResultTests.cpp", "src/tests/unittests/ResultTests.cpp",
"src/tests/unittests/RingBufferAllocatorTests.cpp", "src/tests/unittests/RingBufferAllocatorTests.cpp",
"src/tests/unittests/SerialMapTests.cpp", "src/tests/unittests/SerialMapTests.cpp",
"src/tests/unittests/SerialQueueTests.cpp", "src/tests/unittests/SerialQueueTests.cpp",
"src/tests/unittests/SlabAllocatorTests.cpp",
"src/tests/unittests/SystemUtilsTests.cpp", "src/tests/unittests/SystemUtilsTests.cpp",
"src/tests/unittests/ToBackendTests.cpp", "src/tests/unittests/ToBackendTests.cpp",
"src/tests/unittests/validation/BindGroupValidationTests.cpp", "src/tests/unittests/validation/BindGroupValidationTests.cpp",

View File

@ -112,6 +112,7 @@ if (is_win || is_linux || is_mac || is_fuchsia || is_android) {
"Log.h", "Log.h",
"Math.cpp", "Math.cpp",
"Math.h", "Math.h",
"PlacementAllocated.h",
"Platform.h", "Platform.h",
"Result.cpp", "Result.cpp",
"Result.h", "Result.h",
@ -119,6 +120,8 @@ if (is_win || is_linux || is_mac || is_fuchsia || is_android) {
"SerialMap.h", "SerialMap.h",
"SerialQueue.h", "SerialQueue.h",
"SerialStorage.h", "SerialStorage.h",
"SlabAllocator.cpp",
"SlabAllocator.h",
"SwapChainUtils.h", "SwapChainUtils.h",
"SystemUtils.cpp", "SystemUtils.cpp",
"SystemUtils.h", "SystemUtils.h",

View File

@ -28,6 +28,7 @@ target_sources(dawn_common PRIVATE
"Log.h" "Log.h"
"Math.cpp" "Math.cpp"
"Math.h" "Math.h"
"PlacementAllocated.h"
"Platform.h" "Platform.h"
"Result.cpp" "Result.cpp"
"Result.h" "Result.h"
@ -35,6 +36,8 @@ target_sources(dawn_common PRIVATE
"SerialMap.h" "SerialMap.h"
"SerialQueue.h" "SerialQueue.h"
"SerialStorage.h" "SerialStorage.h"
"SlabAllocator.cpp"
"SlabAllocator.h"
"SwapChainUtils.h" "SwapChainUtils.h"
"SystemUtils.cpp" "SystemUtils.cpp"
"SystemUtils.h" "SystemUtils.h"

View File

@ -0,0 +1,37 @@
// Copyright 2020 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef COMMON_PLACEMENTALLOCATED_H_
#define COMMON_PLACEMENTALLOCATED_H_
#include <cstddef>
class PlacementAllocated {
public:
// Delete the default new operator so this can only be created with placement new.
void* operator new(size_t) = delete;
void* operator new(size_t size, void* ptr) {
// Pass through the pointer of the allocation. This is essentially the default
// placement-new implementation, but we must define it if we delete the default
// new operator.
return ptr;
}
void operator delete(void* ptr) {
// Object is placement-allocated. Don't free the memory.
}
};
#endif // COMMON_PLACEMENTALLOCATED_H_

View File

@ -0,0 +1,234 @@
// Copyright 2020 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "common/SlabAllocator.h"
#include "common/Assert.h"
#include "common/Math.h"
#include <cstdlib>
#include <limits>
#include <new>
// IndexLinkNode
SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
: index(index), nextIndex(nextIndex) {
}
// Slab
SlabAllocatorImpl::Slab::Slab(std::unique_ptr<char[]> allocation, IndexLinkNode* head)
: allocation(std::move(allocation)),
freeList(head),
prev(nullptr),
next(nullptr),
blocksInUse(0) {
}
SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {
}
SlabAllocatorImpl::SentinelSlab::~SentinelSlab() {
Slab* slab = this->next;
while (slab != nullptr) {
Slab* next = slab->next;
ASSERT(slab->blocksInUse == 0);
slab->~Slab();
slab = next;
}
}
// SlabAllocatorImpl
SlabAllocatorImpl::Index SlabAllocatorImpl::kInvalidIndex =
std::numeric_limits<SlabAllocatorImpl::Index>::max();
SlabAllocatorImpl::SlabAllocatorImpl(Index blocksPerSlab,
uint32_t allocationAlignment,
uint32_t slabBlocksOffset,
uint32_t blockStride,
uint32_t indexLinkNodeOffset)
: mAllocationAlignment(allocationAlignment),
mSlabBlocksOffset(slabBlocksOffset),
mBlockStride(blockStride),
mIndexLinkNodeOffset(indexLinkNodeOffset),
mBlocksPerSlab(blocksPerSlab),
mTotalAllocationSize(
// required allocation size
static_cast<size_t>(mSlabBlocksOffset) + mBlocksPerSlab * mBlockStride +
// Pad the allocation size by mAllocationAlignment so that the aligned allocation still
// fulfills the required size.
mAllocationAlignment) {
ASSERT(IsPowerOfTwo(mAllocationAlignment));
}
SlabAllocatorImpl::~SlabAllocatorImpl() = default;
SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::OffsetFrom(
IndexLinkNode* node,
std::make_signed_t<Index> offset) const {
return reinterpret_cast<IndexLinkNode*>(reinterpret_cast<char*>(node) +
static_cast<intptr_t>(mBlockStride) * offset);
}
SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::NodeFromObject(void* object) const {
return reinterpret_cast<SlabAllocatorImpl::IndexLinkNode*>(static_cast<char*>(object) +
mIndexLinkNodeOffset);
}
void* SlabAllocatorImpl::ObjectFromNode(IndexLinkNode* node) const {
return static_cast<void*>(reinterpret_cast<char*>(node) - mIndexLinkNodeOffset);
}
bool SlabAllocatorImpl::IsNodeInSlab(Slab* slab, IndexLinkNode* node) const {
char* firstObjectPtr = reinterpret_cast<char*>(slab) + mSlabBlocksOffset;
IndexLinkNode* firstNode = NodeFromObject(firstObjectPtr);
IndexLinkNode* lastNode = OffsetFrom(firstNode, mBlocksPerSlab - 1);
return node >= firstNode && node <= lastNode && node->index < mBlocksPerSlab;
}
void SlabAllocatorImpl::PushFront(Slab* slab, IndexLinkNode* node) const {
ASSERT(IsNodeInSlab(slab, node));
IndexLinkNode* head = slab->freeList;
if (head == nullptr) {
node->nextIndex = kInvalidIndex;
} else {
ASSERT(IsNodeInSlab(slab, head));
node->nextIndex = head->index;
}
slab->freeList = node;
ASSERT(slab->blocksInUse != 0);
slab->blocksInUse--;
}
SlabAllocatorImpl::IndexLinkNode* SlabAllocatorImpl::PopFront(Slab* slab) const {
ASSERT(slab->freeList != nullptr);
IndexLinkNode* head = slab->freeList;
if (head->nextIndex == kInvalidIndex) {
slab->freeList = nullptr;
} else {
ASSERT(IsNodeInSlab(slab, head));
slab->freeList = OffsetFrom(head, head->nextIndex - head->index);
ASSERT(IsNodeInSlab(slab, slab->freeList));
}
ASSERT(slab->blocksInUse < mBlocksPerSlab);
slab->blocksInUse++;
return head;
}
void SlabAllocatorImpl::SentinelSlab::Prepend(SlabAllocatorImpl::Slab* slab) {
if (this->next != nullptr) {
this->next->prev = slab;
}
slab->prev = this;
slab->next = this->next;
this->next = slab;
}
void SlabAllocatorImpl::Slab::Splice() {
SlabAllocatorImpl::Slab* originalPrev = this->prev;
SlabAllocatorImpl::Slab* originalNext = this->next;
this->prev = nullptr;
this->next = nullptr;
ASSERT(originalPrev != nullptr);
// Set the originalNext's prev pointer.
if (originalNext != nullptr) {
originalNext->prev = originalPrev;
}
// Now, set the originalNext as the originalPrev's new next.
originalPrev->next = originalNext;
}
void* SlabAllocatorImpl::Allocate() {
if (mAvailableSlabs.next == nullptr) {
GetNewSlab();
}
Slab* slab = mAvailableSlabs.next;
IndexLinkNode* node = PopFront(slab);
ASSERT(node != nullptr);
// Move full slabs to a separate list, so allocate can always return quickly.
if (slab->blocksInUse == mBlocksPerSlab) {
slab->Splice();
mFullSlabs.Prepend(slab);
}
return ObjectFromNode(node);
}
void SlabAllocatorImpl::Deallocate(void* ptr) {
IndexLinkNode* node = NodeFromObject(ptr);
ASSERT(node->index < mBlocksPerSlab);
void* firstAllocation = ObjectFromNode(OffsetFrom(node, -node->index));
Slab* slab = reinterpret_cast<Slab*>(static_cast<char*>(firstAllocation) - mSlabBlocksOffset);
ASSERT(slab != nullptr);
bool slabWasFull = slab->blocksInUse == mBlocksPerSlab;
ASSERT(slab->blocksInUse != 0);
PushFront(slab, node);
if (slabWasFull) {
// Slab is in the full list. Move it to the recycled list.
ASSERT(slab->freeList != nullptr);
slab->Splice();
mRecycledSlabs.Prepend(slab);
}
// TODO(enga): Occasionally prune slabs if |blocksInUse == 0|.
// Doing so eagerly hurts performance.
}
void SlabAllocatorImpl::GetNewSlab() {
// Should only be called when there are no available slabs.
ASSERT(mAvailableSlabs.next == nullptr);
if (mRecycledSlabs.next != nullptr) {
// If the recycled list is non-empty, swap their contents.
std::swap(mAvailableSlabs.next, mRecycledSlabs.next);
// We swapped the next pointers, so the prev pointer is wrong.
// Update it here.
mAvailableSlabs.next->prev = &mAvailableSlabs;
ASSERT(mRecycledSlabs.next == nullptr);
return;
}
// TODO(enga): Use aligned_alloc with C++17.
auto allocation = std::unique_ptr<char[]>(new char[mTotalAllocationSize]);
char* alignedPtr = AlignPtr(allocation.get(), mAllocationAlignment);
char* dataStart = alignedPtr + mSlabBlocksOffset;
IndexLinkNode* node = NodeFromObject(dataStart);
for (uint32_t i = 0; i < mBlocksPerSlab; ++i) {
new (OffsetFrom(node, i)) IndexLinkNode(i, i + 1);
}
IndexLinkNode* lastNode = OffsetFrom(node, mBlocksPerSlab - 1);
lastNode->nextIndex = kInvalidIndex;
mAvailableSlabs.Prepend(new (alignedPtr) Slab(std::move(allocation), node));
}

202
src/common/SlabAllocator.h Normal file
View File

@ -0,0 +1,202 @@
// Copyright 2020 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef COMMON_SLABALLOCATOR_H_
#define COMMON_SLABALLOCATOR_H_
#include "common/PlacementAllocated.h"
#include <cstdint>
#include <memory>
#include <type_traits>
// The SlabAllocator allocates objects out of one or more fixed-size contiguous "slabs" of memory.
// This makes it very quick to allocate and deallocate fixed-size objects because the allocator only
// needs to index an offset into pre-allocated memory. It is similar to a pool-allocator that
// recycles memory from previous allocations, except multiple allocations are hosted contiguously in
// one large slab.
//
// Internally, the SlabAllocator stores slabs as a linked list to avoid extra indirections indexing
// into an std::vector. To service an allocation request, the allocator only needs to know the first
// currently available slab. There are three backing linked lists: AVAILABLE, FULL, and RECYCLED.
// A slab that is AVAILABLE can be used to immediately service allocation requests. Once it has no
// remaining space, it is moved to the FULL state. When a FULL slab sees any deallocations, it is
// moved to the RECYCLED state. The RECYCLED state is separate from the AVAILABLE state so that
// deallocations don't immediately prepend slabs to the AVAILABLE list, and change the current slab
// servicing allocations. When the AVAILABLE list becomes empty is it swapped with the RECYCLED
// list.
//
// Allocated objects are placement-allocated with some extra info at the end (we'll call the Object
// plus the extra bytes a "block") used to specify the constant index of the block in its parent
// slab, as well as the index of the next available block. So, following the block next-indices
// forms a linked list of free blocks.
//
// Slab creation: When a new slab is allocated, sufficient memory is allocated for it, and then the
// slab metadata plus all of its child blocks are placement-allocated into the memory. Indices and
// next-indices are initialized to form the free-list of blocks.
//
// Allocation: When an object is allocated, if there is no space available in an existing slab, a
// new slab is created (or an old slab is recycled). The first block of the slab is removed and
// returned.
//
// Deallocation: When an object is deallocated, it can compute the pointer to its parent slab
// because it stores the index of its own allocation. That block is then prepended to the slab's
// free list.
class SlabAllocatorImpl {
public:
// Allocations host their current index and the index of the next free block.
// Because this is an index, and not a byte offset, it can be much smaller than a size_t.
// TODO(enga): Is uint8_t sufficient?
using Index = uint16_t;
protected:
// This is essentially a singly linked list using indices instead of pointers,
// so we store the index of "this" in |this->index|.
struct IndexLinkNode : PlacementAllocated {
IndexLinkNode(Index index, Index nextIndex);
const Index index; // The index of this block in the slab.
Index nextIndex; // The index of the next available block. kInvalidIndex, if none.
};
struct Slab : PlacementAllocated {
// A slab is placement-allocated into an aligned pointer from a separate allocation.
// Ownership of the allocation is transferred to the slab on creation.
// | ---------- allocation --------- |
// | pad | Slab | data ------------> |
Slab(std::unique_ptr<char[]> allocation, IndexLinkNode* head);
void Splice();
std::unique_ptr<char[]> allocation;
IndexLinkNode* freeList;
Slab* prev;
Slab* next;
Index blocksInUse;
};
SlabAllocatorImpl(Index blocksPerSlab,
uint32_t allocationAlignment,
uint32_t slabBlocksOffset,
uint32_t blockStride,
uint32_t indexLinkNodeOffset);
~SlabAllocatorImpl();
// Allocate a new block of memory.
void* Allocate();
// Deallocate a block of memory.
void Deallocate(void* ptr);
private:
// The maximum value is reserved to indicate the end of the list.
static Index kInvalidIndex;
// Get the IndexLinkNode |offset| slots away.
IndexLinkNode* OffsetFrom(IndexLinkNode* node, std::make_signed_t<Index> offset) const;
// Compute the pointer to the IndexLinkNode from an allocated object.
IndexLinkNode* NodeFromObject(void* object) const;
// Compute the pointer to the object from an IndexLinkNode.
void* ObjectFromNode(IndexLinkNode* node) const;
bool IsNodeInSlab(Slab* slab, IndexLinkNode* node) const;
// The Slab stores a linked-list of free allocations.
// PushFront/PopFront adds/removes an allocation from the free list.
void PushFront(Slab* slab, IndexLinkNode* node) const;
IndexLinkNode* PopFront(Slab* slab) const;
// Replace the current slab with a new one, and chain the old one off of it.
// Both slabs may still be used for for allocation/deallocation, but older slabs
// will be a little slower to get allocations from.
void GetNewSlab();
const uint32_t mAllocationAlignment;
// | Slab | pad | Obj | pad | Node | pad | Obj | pad | Node | pad | ....
// | -----------| mSlabBlocksOffset
// | | ---------------------- | mBlockStride
// | | ----------| mIndexLinkNodeOffset
// | --------------------------------------> (mSlabBlocksOffset + mBlocksPerSlab * mBlockStride)
// A Slab is metadata, followed by the aligned memory to allocate out of. |mSlabBlocksOffset| is
// the offset to the start of the aligned memory region.
const uint32_t mSlabBlocksOffset;
// Because alignment of allocations may introduce padding, |mBlockStride| is the
// distance between aligned blocks of (Allocation + IndexLinkNode)
const uint32_t mBlockStride;
// The IndexLinkNode is stored after the Allocation itself. This is the offset to it.
const uint32_t mIndexLinkNodeOffset;
const Index mBlocksPerSlab; // The total number of blocks in a slab.
const size_t mTotalAllocationSize;
struct SentinelSlab : Slab {
SentinelSlab();
~SentinelSlab();
void Prepend(Slab* slab);
};
SentinelSlab mAvailableSlabs; // Available slabs to service allocations.
SentinelSlab mFullSlabs; // Full slabs. Stored here so we can skip checking them.
SentinelSlab mRecycledSlabs; // Recycled slabs. Not immediately added to |mAvailableSlabs| so
// we don't thrash the current "active" slab.
};
template <typename T, size_t ObjectSize = 0>
class SlabAllocator : public SlabAllocatorImpl {
// Helper struct for computing alignments
struct Storage {
Slab slab;
struct Block {
// If the size is unspecified, use sizeof(T) as default. Defined here and not as a
// default template parameter because T may be an incomplete type at the time of
// declaration.
static constexpr size_t kSize = ObjectSize == 0 ? sizeof(T) : ObjectSize;
static_assert(kSize >= sizeof(T), "");
alignas(alignof(T)) char object[kSize];
IndexLinkNode node;
} blocks[];
};
public:
SlabAllocator(Index blocksPerSlab)
: SlabAllocatorImpl(
blocksPerSlab,
alignof(Storage), // allocationAlignment
offsetof(Storage, blocks[0]), // slabBlocksOffset
offsetof(Storage, blocks[1]) - offsetof(Storage, blocks[0]), // blockStride
offsetof(typename Storage::Block, node) // indexLinkNodeOffset
) {
}
template <typename... Args>
T* Allocate(Args&&... args) {
void* ptr = SlabAllocatorImpl::Allocate();
return new (ptr) T(std::forward<Args>(args)...);
}
void Deallocate(T* object) {
SlabAllocatorImpl::Deallocate(object);
}
};
#endif // COMMON_SLABALLOCATOR_H_

View File

@ -0,0 +1,115 @@
// Copyright 2020 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "common/PlacementAllocated.h"
using namespace testing;
namespace {
enum class DestructedClass {
Foo,
Bar,
};
class MockDestructor {
public:
MOCK_METHOD2(Call, void(void*, DestructedClass));
};
std::unique_ptr<StrictMock<MockDestructor>> mockDestructor;
class PlacementAllocatedTests : public Test {
void SetUp() override {
mockDestructor = std::make_unique<StrictMock<MockDestructor>>();
}
void TearDown() override {
mockDestructor = nullptr;
}
};
struct Foo : PlacementAllocated {
virtual ~Foo() {
mockDestructor->Call(this, DestructedClass::Foo);
}
};
struct Bar : Foo {
~Bar() override {
mockDestructor->Call(this, DestructedClass::Bar);
}
};
} // namespace
// Test that deletion calls the destructor and does not free memory.
TEST_F(PlacementAllocatedTests, DeletionDoesNotFreeMemory) {
void* ptr = malloc(sizeof(Foo));
Foo* foo = new (ptr) Foo();
EXPECT_CALL(*mockDestructor, Call(foo, DestructedClass::Foo));
delete foo;
// Touch the memory, this shouldn't crash.
static_assert(sizeof(Foo) >= sizeof(uint32_t), "");
*reinterpret_cast<uint32_t*>(foo) = 42;
free(ptr);
}
// Test that destructing an instance of a derived class calls the derived, then base destructor, and
// does not free memory.
TEST_F(PlacementAllocatedTests, DeletingDerivedClassCallsBaseDestructor) {
void* ptr = malloc(sizeof(Bar));
Bar* bar = new (ptr) Bar();
{
InSequence s;
EXPECT_CALL(*mockDestructor, Call(bar, DestructedClass::Bar));
EXPECT_CALL(*mockDestructor, Call(bar, DestructedClass::Foo));
delete bar;
}
// Touch the memory, this shouldn't crash.
static_assert(sizeof(Bar) >= sizeof(uint32_t), "");
*reinterpret_cast<uint32_t*>(bar) = 42;
free(ptr);
}
// Test that destructing an instance of a base class calls the derived, then base destructor, and
// does not free memory.
TEST_F(PlacementAllocatedTests, DeletingBaseClassCallsDerivedDestructor) {
void* ptr = malloc(sizeof(Bar));
Foo* foo = new (ptr) Bar();
{
InSequence s;
EXPECT_CALL(*mockDestructor, Call(foo, DestructedClass::Bar));
EXPECT_CALL(*mockDestructor, Call(foo, DestructedClass::Foo));
delete foo;
}
// Touch the memory, this shouldn't crash.
static_assert(sizeof(Bar) >= sizeof(uint32_t), "");
*reinterpret_cast<uint32_t*>(foo) = 42;
free(ptr);
}

View File

@ -0,0 +1,180 @@
// Copyright 2020 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "common/Math.h"
#include "common/SlabAllocator.h"
namespace {
struct Foo : public PlacementAllocated {
Foo(int value) : value(value) {
}
int value;
};
struct alignas(256) AlignedFoo : public Foo {
using Foo::Foo;
};
} // namespace
// Test that a slab allocator of a single object works.
TEST(SlabAllocatorTests, Single) {
SlabAllocator<Foo> allocator(1);
Foo* obj = allocator.Allocate(4);
EXPECT_EQ(obj->value, 4);
allocator.Deallocate(obj);
}
// Allocate multiple objects and check their data is correct.
TEST(SlabAllocatorTests, AllocateSequential) {
// Check small alignment
{
SlabAllocator<Foo> allocator(5);
std::vector<Foo*> objects;
for (int i = 0; i < 10; ++i) {
auto* ptr = allocator.Allocate(i);
EXPECT_TRUE(std::find(objects.begin(), objects.end(), ptr) == objects.end());
objects.push_back(ptr);
}
for (int i = 0; i < 10; ++i) {
// Check that the value is correct and hasn't been trampled.
EXPECT_EQ(objects[i]->value, i);
// Check that the alignment is correct.
EXPECT_TRUE(IsPtrAligned(objects[i], alignof(Foo)));
}
// Deallocate all of the objects.
for (Foo* object : objects) {
allocator.Deallocate(object);
}
}
// Check large alignment
{
SlabAllocator<AlignedFoo> allocator(9);
std::vector<AlignedFoo*> objects;
for (int i = 0; i < 21; ++i) {
auto* ptr = allocator.Allocate(i);
EXPECT_TRUE(std::find(objects.begin(), objects.end(), ptr) == objects.end());
objects.push_back(ptr);
}
for (int i = 0; i < 21; ++i) {
// Check that the value is correct and hasn't been trampled.
EXPECT_EQ(objects[i]->value, i);
// Check that the alignment is correct.
EXPECT_TRUE(IsPtrAligned(objects[i], 256));
}
// Deallocate all of the objects.
for (AlignedFoo* object : objects) {
allocator.Deallocate(object);
}
}
}
// Test that when reallocating a number of objects <= pool size, all memory is reused.
TEST(SlabAllocatorTests, ReusesFreedMemory) {
SlabAllocator<Foo> allocator(17);
// Allocate a number of objects.
std::set<Foo*> objects;
for (int i = 0; i < 17; ++i) {
EXPECT_TRUE(objects.insert(allocator.Allocate(i)).second);
}
// Deallocate all of the objects.
for (Foo* object : objects) {
allocator.Deallocate(object);
}
std::set<Foo*> reallocatedObjects;
// Allocate objects again. All of the pointers should be the same.
for (int i = 0; i < 17; ++i) {
Foo* ptr = allocator.Allocate(i);
EXPECT_TRUE(reallocatedObjects.insert(ptr).second);
EXPECT_TRUE(std::find(objects.begin(), objects.end(), ptr) != objects.end());
}
// Deallocate all of the objects.
for (Foo* object : objects) {
allocator.Deallocate(object);
}
}
// Test many allocations and deallocations. Meant to catch corner cases with partially
// empty slabs.
TEST(SlabAllocatorTests, AllocateDeallocateMany) {
SlabAllocator<Foo> allocator(17);
std::set<Foo*> objects;
std::set<Foo*> set3;
std::set<Foo*> set7;
// Allocate many objects.
for (uint32_t i = 0; i < 800; ++i) {
Foo* object = allocator.Allocate(i);
EXPECT_TRUE(objects.insert(object).second);
if (i % 3 == 0) {
set3.insert(object);
} else if (i % 7 == 0) {
set7.insert(object);
}
}
// Deallocate every 3rd object.
for (Foo* object : set3) {
allocator.Deallocate(object);
objects.erase(object);
}
// Allocate many more objects
for (uint32_t i = 0; i < 800; ++i) {
Foo* object = allocator.Allocate(i);
EXPECT_TRUE(objects.insert(object).second);
if (i % 7 == 0) {
set7.insert(object);
}
}
// Deallocate every 7th object from the first and second rounds of allocation.
for (Foo* object : set7) {
allocator.Deallocate(object);
objects.erase(object);
}
// Allocate objects again
for (uint32_t i = 0; i < 800; ++i) {
Foo* object = allocator.Allocate(i);
EXPECT_TRUE(objects.insert(object).second);
}
// Deallocate the rest of the objects
for (Foo* object : objects) {
allocator.Deallocate(object);
}
}