Initial commit of all the NXT integration.

More like squashed history, contributors were:
 - Kai Ninomiya
 - Corentin Wallez
This commit is contained in:
Corentin Wallez
2017-04-20 14:38:20 -04:00
commit f07e3bd4c9
134 changed files with 24658 additions and 0 deletions

133
src/backend/CMakeLists.txt Normal file
View File

@@ -0,0 +1,133 @@
# Copyright 2017 The NXT Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set(COMMON_DIR ${CMAKE_CURRENT_SOURCE_DIR}/common)
set(METAL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/metal)
set(OPENGL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/opengl)
set(TESTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/tests)
list(APPEND BACKEND_SOURCES
${COMMON_DIR}/BindGroup.cpp
${COMMON_DIR}/BindGroup.h
${COMMON_DIR}/BindGroupLayout.cpp
${COMMON_DIR}/BindGroupLayout.h
${COMMON_DIR}/BitSetIterator.h
${COMMON_DIR}/Buffer.cpp
${COMMON_DIR}/Buffer.h
${COMMON_DIR}/CommandAllocator.cpp
${COMMON_DIR}/CommandAllocator.h
${COMMON_DIR}/CommandBuffer.cpp
${COMMON_DIR}/CommandBuffer.h
${COMMON_DIR}/Device.cpp
${COMMON_DIR}/Device.h
${COMMON_DIR}/Forward.h
${COMMON_DIR}/InputState.cpp
${COMMON_DIR}/InputState.h
${COMMON_DIR}/Math.cpp
${COMMON_DIR}/Math.h
${COMMON_DIR}/PerStage.cpp
${COMMON_DIR}/PerStage.h
${COMMON_DIR}/Pipeline.cpp
${COMMON_DIR}/Pipeline.h
${COMMON_DIR}/PipelineLayout.cpp
${COMMON_DIR}/PipelineLayout.h
${COMMON_DIR}/Queue.cpp
${COMMON_DIR}/Queue.h
${COMMON_DIR}/RefCounted.cpp
${COMMON_DIR}/RefCounted.h
${COMMON_DIR}/Sampler.cpp
${COMMON_DIR}/Sampler.h
${COMMON_DIR}/ShaderModule.cpp
${COMMON_DIR}/ShaderModule.h
${COMMON_DIR}/Texture.cpp
${COMMON_DIR}/Texture.h
${COMMON_DIR}/ToBackend.h
)
# OpenGL Backend
Generate(
LIB_NAME opengl_autogen
LIB_TYPE STATIC
PRINT_NAME "OpenGL backend autogenerated files"
COMMAND_LINE_ARGS
${GENERATOR_COMMON_ARGS}
-T opengl
)
target_link_libraries(opengl_autogen glfw glad nxtcpp)
target_include_directories(opengl_autogen PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories(opengl_autogen PUBLIC ${GENERATED_DIR})
SetCXX14(opengl_autogen)
SetPIC(opengl_autogen)
list(APPEND BACKEND_SOURCES
${OPENGL_DIR}/CommandBufferGL.cpp
${OPENGL_DIR}/CommandBufferGL.h
${OPENGL_DIR}/OpenGLBackend.cpp
${OPENGL_DIR}/OpenGLBackend.h
${OPENGL_DIR}/PipelineGL.cpp
${OPENGL_DIR}/PipelineGL.h
${OPENGL_DIR}/PipelineLayoutGL.cpp
${OPENGL_DIR}/PipelineLayoutGL.h
${OPENGL_DIR}/SamplerGL.cpp
${OPENGL_DIR}/SamplerGL.h
${OPENGL_DIR}/ShaderModuleGL.cpp
${OPENGL_DIR}/ShaderModuleGL.h
${OPENGL_DIR}/TextureGL.cpp
${OPENGL_DIR}/TextureGL.h
)
# Metal Backend
if (APPLE)
Generate(
LIB_NAME metal_autogen
LIB_TYPE STATIC
PRINT_NAME "Metal backend autogenerated files"
COMMAND_LINE_ARGS
${GENERATOR_COMMON_ARGS}
-T metal
)
target_link_libraries(metal_autogen glfw glad nxtcpp "-framework QuartzCore" "-framework Metal")
target_include_directories(metal_autogen PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories(metal_autogen PUBLIC ${GENERATED_DIR})
SetCXX14(metal_autogen)
SetPIC(metal_autogen)
list(APPEND BACKEND_SOURCES
${METAL_DIR}/MetalBackend.mm
${METAL_DIR}/MetalBackend.h
)
endif()
add_library(nxt_backend SHARED ${BACKEND_SOURCES})
target_link_libraries(nxt_backend opengl_autogen glfw glad spirv-cross)
if (APPLE)
target_link_libraries(nxt_backend metal_autogen)
endif()
target_include_directories(nxt_backend PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
SetCXX14(nxt_backend)
add_executable(backend_unittests
${TESTS_DIR}/BitSetIteratorTests.cpp
${TESTS_DIR}/CommandAllocatorTests.cpp
${TESTS_DIR}/MathTests.cpp
${TESTS_DIR}/PerStageTests.cpp
${TESTS_DIR}/RefCountedTests.cpp
${TESTS_DIR}/ToBackendTests.cpp
${TESTS_DIR}/UnittestsMain.cpp
)
target_link_libraries(backend_unittests nxt_backend gtest)
target_include_directories(backend_unittests PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
SetCXX14(backend_unittests)

View File

@@ -0,0 +1,213 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "BindGroup.h"
#include "BindGroupLayout.h"
#include "Buffer.h"
#include "Device.h"
#include "Texture.h"
namespace backend {
// BindGroup
BindGroupBase::BindGroupBase(BindGroupBuilder* builder)
: layout(std::move(builder->layout)), usage(builder->usage), bindings(std::move(builder->bindings)) {
}
const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
return layout.Get();
}
nxt::BindGroupUsage BindGroupBase::GetUsage() const {
return usage;
}
BufferViewBase* BindGroupBase::GetBindingAsBufferView(size_t binding) {
ASSERT(binding < kMaxBindingsPerGroup);
ASSERT(layout->GetBindingInfo().mask[binding]);
ASSERT(layout->GetBindingInfo().types[binding] == nxt::BindingType::UniformBuffer ||
layout->GetBindingInfo().types[binding] == nxt::BindingType::StorageBuffer);
return reinterpret_cast<BufferViewBase*>(bindings[binding].Get());
}
SamplerBase* BindGroupBase::GetBindingAsSampler(size_t binding) {
ASSERT(binding < kMaxBindingsPerGroup);
ASSERT(layout->GetBindingInfo().mask[binding]);
ASSERT(layout->GetBindingInfo().types[binding] == nxt::BindingType::Sampler);
return reinterpret_cast<SamplerBase*>(bindings[binding].Get());
}
TextureViewBase* BindGroupBase::GetBindingAsTextureView(size_t binding) {
ASSERT(binding < kMaxBindingsPerGroup);
ASSERT(layout->GetBindingInfo().mask[binding]);
ASSERT(layout->GetBindingInfo().types[binding] == nxt::BindingType::SampledTexture);
return reinterpret_cast<TextureViewBase*>(bindings[binding].Get());
}
// BindGroupBuilder
enum BindGroupSetProperties {
BINDGROUP_PROPERTY_USAGE = 0x1,
BINDGROUP_PROPERTY_LAYOUT = 0x2,
};
BindGroupBuilder::BindGroupBuilder(DeviceBase* device)
: device(device) {
}
bool BindGroupBuilder::WasConsumed() const {
return consumed;
}
BindGroupBase* BindGroupBuilder::GetResult() {
constexpr int allProperties = BINDGROUP_PROPERTY_USAGE | BINDGROUP_PROPERTY_LAYOUT;
if ((propertiesSet & allProperties) != allProperties) {
device->HandleError("Bindgroup missing properties");
return nullptr;
}
if (setMask != layout->GetBindingInfo().mask) {
device->HandleError("Bindgroup missing bindings");
return nullptr;
}
consumed = true;
return device->CreateBindGroup(this);
}
void BindGroupBuilder::SetLayout(BindGroupLayoutBase* layout) {
if ((propertiesSet & BINDGROUP_PROPERTY_LAYOUT) != 0) {
device->HandleError("Bindgroup layout property set multiple times");
return;
}
this->layout = layout;
propertiesSet |= BINDGROUP_PROPERTY_LAYOUT;
}
void BindGroupBuilder::SetUsage(nxt::BindGroupUsage usage) {
if ((propertiesSet & BINDGROUP_PROPERTY_USAGE) != 0) {
device->HandleError("Bindgroup usage property set multiple times");
return;
}
this->usage = usage;
propertiesSet |= BINDGROUP_PROPERTY_USAGE;
}
void BindGroupBuilder::SetBufferViews(uint32_t start, uint32_t count, BufferViewBase* const * bufferViews) {
if (!SetBindingsValidationBase(start, count)) {
return;
}
const auto& layoutInfo = layout->GetBindingInfo();
for (size_t i = start, j = 0; i < start + count; ++i, ++j) {
nxt::BufferUsageBit requiredBit;
switch (layoutInfo.types[i]) {
case nxt::BindingType::UniformBuffer:
requiredBit = nxt::BufferUsageBit::Uniform;
break;
case nxt::BindingType::StorageBuffer:
requiredBit = nxt::BufferUsageBit::Storage;
break;
case nxt::BindingType::Sampler:
case nxt::BindingType::SampledTexture:
device->HandleError("Setting buffer for a wrong binding type");
return;
}
if (!(bufferViews[j]->GetBuffer()->GetAllowedUsage() & requiredBit)) {
device->HandleError("Buffer needs to allow the correct usage bit");
return;
}
}
SetBindingsBase(start, count, reinterpret_cast<RefCounted* const *>(bufferViews));
}
void BindGroupBuilder::SetSamplers(uint32_t start, uint32_t count, SamplerBase* const * samplers) {
if (!SetBindingsValidationBase(start, count)) {
return;
}
const auto& layoutInfo = layout->GetBindingInfo();
for (size_t i = start, j = 0; i < start + count; ++i, ++j) {
if (layoutInfo.types[i] != nxt::BindingType::Sampler) {
device->HandleError("Setting binding for a wrong layout binding type");
return;
}
}
SetBindingsBase(start, count, reinterpret_cast<RefCounted* const *>(samplers));
}
void BindGroupBuilder::SetTextureViews(uint32_t start, uint32_t count, TextureViewBase* const * textureViews) {
if (!SetBindingsValidationBase(start, count)) {
return;
}
const auto& layoutInfo = layout->GetBindingInfo();
for (size_t i = start, j = 0; i < start + count; ++i, ++j) {
if (layoutInfo.types[i] != nxt::BindingType::SampledTexture) {
device->HandleError("Setting binding for a wrong layout binding type");
return;
}
if (!(textureViews[j]->GetTexture()->GetAllowedUsage() & nxt::TextureUsageBit::Sampled)) {
device->HandleError("Texture needs to allow the sampled usage bit");
return;
}
}
SetBindingsBase(start, count, reinterpret_cast<RefCounted* const *>(textureViews));
}
void BindGroupBuilder::SetBindingsBase(uint32_t start, uint32_t count, RefCounted* const * objects) {
for (size_t i = start, j = 0; i < start + count; ++i, ++j) {
setMask.set(i);
bindings[i] = objects[j];
}
}
bool BindGroupBuilder::SetBindingsValidationBase(uint32_t start, uint32_t count) {
if (start + count > kMaxBindingsPerGroup) {
device->HandleError("Setting bindings type over maximum number of bindings");
return false;
}
if ((propertiesSet & BINDGROUP_PROPERTY_LAYOUT) == 0) {
device->HandleError("Bindgroup layout must be set before views");
return false;
}
const auto& layoutInfo = layout->GetBindingInfo();
for (size_t i = start, j = 0; i < start + count; ++i, ++j) {
if (setMask[i]) {
device->HandleError("Setting already set binding");
return false;
}
if (!layoutInfo.mask[i]) {
device->HandleError("Setting binding that isn't present in the layout");
return false;
}
}
return true;
}
}

View File

@@ -0,0 +1,95 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_BINDGROUP_H_
#define BACKEND_COMMON_BINDGROUP_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
#include <array>
#include <bitset>
#include <type_traits>
namespace backend {
class BindGroupBase : public RefCounted {
public:
BindGroupBase(BindGroupBuilder* builder);
const BindGroupLayoutBase* GetLayout() const;
nxt::BindGroupUsage GetUsage() const;
BufferViewBase* GetBindingAsBufferView(size_t binding);
SamplerBase* GetBindingAsSampler(size_t binding);
TextureViewBase* GetBindingAsTextureView(size_t binding);
private:
Ref<BindGroupLayoutBase> layout;
nxt::BindGroupUsage usage;
std::array<Ref<RefCounted>, kMaxBindingsPerGroup> bindings;
};
class BindGroupBuilder : public RefCounted {
public:
BindGroupBuilder(DeviceBase* device);
bool WasConsumed() const;
// NXT API
BindGroupBase* GetResult();
void SetLayout(BindGroupLayoutBase* layout);
void SetUsage(nxt::BindGroupUsage usage);
template<typename T>
void SetBufferViews(uint32_t start, uint32_t count, T* const* bufferViews) {
static_assert(std::is_base_of<BufferViewBase, T>::value, "");
SetBufferViews(start, count, reinterpret_cast<BufferViewBase* const*>(bufferViews));
}
void SetBufferViews(uint32_t start, uint32_t count, BufferViewBase* const * bufferViews);
template<typename T>
void SetSamplers(uint32_t start, uint32_t count, T* const* samplers) {
static_assert(std::is_base_of<SamplerBase, T>::value, "");
SetSamplers(start, count, reinterpret_cast<SamplerBase* const*>(samplers));
}
void SetSamplers(uint32_t start, uint32_t count, SamplerBase* const * samplers);
template<typename T>
void SetTextureViews(uint32_t start, uint32_t count, T* const* textureViews) {
static_assert(std::is_base_of<TextureViewBase, T>::value, "");
SetTextureViews(start, count, reinterpret_cast<TextureViewBase* const*>(textureViews));
}
void SetTextureViews(uint32_t start, uint32_t count, TextureViewBase* const * textureViews);
private:
friend class BindGroupBase;
void SetBindingsBase(uint32_t start, uint32_t count, RefCounted* const * objects);
bool SetBindingsValidationBase(uint32_t start, uint32_t count);
DeviceBase* device;
std::bitset<kMaxBindingsPerGroup> setMask;
int propertiesSet = 0;
bool consumed = false;
Ref<BindGroupLayoutBase> layout;
nxt::BindGroupUsage usage;
std::array<Ref<RefCounted>, kMaxBindingsPerGroup> bindings;
};
}
#endif // BACKEND_COMMON_BINDGROUP_H_

View File

@@ -0,0 +1,144 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "BindGroupLayout.h"
#include "Device.h"
#include <functional>
namespace backend {
namespace {
// Workaround for Chrome's stdlib having a broken std::hash for enums and bitsets
template<typename T>
typename std::enable_if<std::is_enum<T>::value, size_t>::type Hash(T value) {
using Integral = typename nxt::UnderlyingType<T>::type;
return std::hash<Integral>()(static_cast<Integral>(value));
}
template<size_t N>
size_t Hash(const std::bitset<N>& value) {
static_assert(N <= sizeof(unsigned long long) * 8, "");
return std::hash<unsigned long long>()(value.to_ullong());
}
// TODO(cwallez@chromium.org): see if we can use boost's hash combined or some equivalent
// this currently assumes that size_t is 64 bits
void CombineHashes(size_t* h1, size_t h2) {
*h1 ^= (h2 << 7) + (h2 >> (64 - 7)) + 0x304975;
}
size_t HashBindingInfo(const BindGroupLayoutBase::LayoutBindingInfo& info) {
size_t hash = Hash(info.mask);
for (size_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
if (info.mask[binding]) {
CombineHashes(&hash, Hash(info.visibilities[binding]));
CombineHashes(&hash, Hash(info.types[binding]));
}
}
return hash;
}
bool operator== (const BindGroupLayoutBase::LayoutBindingInfo& a, const BindGroupLayoutBase::LayoutBindingInfo& b) {
if (a.mask != b.mask) {
return false;
}
for (size_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
if (a.mask[binding]) {
if (a.visibilities[binding] != b.visibilities[binding]) {
return false;
}
if (a.types[binding] != b.types[binding]) {
return false;
}
}
}
return true;
}
}
// BindGroupLayoutBase
BindGroupLayoutBase::BindGroupLayoutBase(BindGroupLayoutBuilder* builder, bool blueprint)
: device(builder->device), bindingInfo(builder->bindingInfo), blueprint(blueprint) {
}
BindGroupLayoutBase::~BindGroupLayoutBase() {
// Do not register the actual cached object if we are a blueprint
if (!blueprint) {
device->UncacheBindGroupLayout(this);
}
}
const BindGroupLayoutBase::LayoutBindingInfo& BindGroupLayoutBase::GetBindingInfo() const {
return bindingInfo;
}
// BindGroupLayoutBuilder
BindGroupLayoutBuilder::BindGroupLayoutBuilder(DeviceBase* device) : device(device) {
}
bool BindGroupLayoutBuilder::WasConsumed() const {
return consumed;
}
const BindGroupLayoutBase::LayoutBindingInfo& BindGroupLayoutBuilder::GetBindingInfo() const {
return bindingInfo;
}
BindGroupLayoutBase* BindGroupLayoutBuilder::GetResult() {
consumed = true;
BindGroupLayoutBase blueprint(this, true);
auto* result = device->GetOrCreateBindGroupLayout(&blueprint, this);
result->Reference();
return result;
}
void BindGroupLayoutBuilder::SetBindingsType(nxt::ShaderStageBit visibility, nxt::BindingType bindingType, uint32_t start, uint32_t count) {
if (start + count > kMaxBindingsPerGroup) {
device->HandleError("Setting bindings type over maximum number of bindings");
return;
}
for (size_t i = start; i < start + count; i++) {
if (bindingInfo.mask[i]) {
device->HandleError("Setting already set binding type");
return;
}
bindingInfo.mask.set(i);
bindingInfo.visibilities[i] = visibility;
bindingInfo.types[i] = bindingType;
}
}
// BindGroupLayoutCacheFuncs
size_t BindGroupLayoutCacheFuncs::operator() (const BindGroupLayoutBase* bgl) const {
return HashBindingInfo(bgl->GetBindingInfo());
}
bool BindGroupLayoutCacheFuncs::operator() (const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const {
return a->GetBindingInfo() == b->GetBindingInfo();
}
}

View File

@@ -0,0 +1,76 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_BINDGROUPLAYOUT_H_
#define BACKEND_COMMON_BINDGROUPLAYOUT_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
#include <array>
#include <bitset>
namespace backend {
class BindGroupLayoutBase : public RefCounted {
public:
BindGroupLayoutBase(BindGroupLayoutBuilder* builder, bool blueprint = false);
~BindGroupLayoutBase() override;
struct LayoutBindingInfo {
std::array<nxt::ShaderStageBit, kMaxBindingsPerGroup> visibilities;
std::array<nxt::BindingType, kMaxBindingsPerGroup> types;
std::bitset<kMaxBindingsPerGroup> mask;
};
const LayoutBindingInfo& GetBindingInfo() const;
private:
DeviceBase* device;
LayoutBindingInfo bindingInfo;
bool blueprint = false;
};
class BindGroupLayoutBuilder : public RefCounted {
public:
BindGroupLayoutBuilder(DeviceBase* device);
bool WasConsumed() const;
const BindGroupLayoutBase::LayoutBindingInfo& GetBindingInfo() const;
// NXT API
BindGroupLayoutBase* GetResult();
void SetBindingsType(nxt::ShaderStageBit visibility, nxt::BindingType bindingType, uint32_t start, uint32_t count);
private:
friend class BindGroupLayoutBase;
DeviceBase* device;
BindGroupLayoutBase::LayoutBindingInfo bindingInfo;
bool consumed = false;
};
// Implements the functors necessary for the unordered_set<BGL*>-based cache.
struct BindGroupLayoutCacheFuncs {
// The hash function
size_t operator() (const BindGroupLayoutBase* bgl) const;
// The equality predicate
bool operator() (const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
};
}
#endif // BACKEND_COMMON_BINDGROUPLAYOUT_H_

View File

@@ -0,0 +1,135 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_BITSETITERATOR_H_
#define BACKEND_COMMON_BITSETITERATOR_H_
#include "Forward.h"
#include "Math.h"
#include <bitset>
#include <limits>
// This is ANGLE's BitSetIterator class with a customizable return type
// TODO(cwallez@chromium.org): it could be optimized, in particular when N <= 64
namespace backend {
template <typename T>
T roundUp(const T value, const T alignment) {
auto temp = value + alignment - static_cast<T>(1);
return temp - temp % alignment;
}
template <size_t N, typename T>
class BitSetIterator final {
public:
BitSetIterator(const std::bitset<N>& bitset);
BitSetIterator(const BitSetIterator& other);
BitSetIterator &operator=(const BitSetIterator& other);
class Iterator final {
public:
Iterator(const std::bitset<N>& bits);
Iterator& operator++();
bool operator==(const Iterator& other) const;
bool operator!=(const Iterator& other) const;
T operator*() const { return static_cast<T>(mCurrentBit); }
private:
unsigned long getNextBit();
static const size_t BitsPerWord = sizeof(unsigned long) * 8;
std::bitset<N> mBits;
unsigned long mCurrentBit;
unsigned long mOffset;
};
Iterator begin() const { return Iterator(mBits); }
Iterator end() const { return Iterator(std::bitset<N>(0)); }
private:
const std::bitset<N> mBits;
};
template <size_t N, typename T>
BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset)
: mBits(bitset) {
}
template <size_t N, typename T>
BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other)
: mBits(other.mBits) {
}
template <size_t N, typename T>
BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) {
mBits = other.mBits;
return *this;
}
template <size_t N, typename T>
BitSetIterator<N, T>::Iterator::Iterator(const std::bitset<N>& bits)
: mBits(bits), mCurrentBit(0), mOffset(0) {
if (bits.any()) {
mCurrentBit = getNextBit();
} else {
mOffset = static_cast<unsigned long>(roundUp(N, BitsPerWord));
}
}
template <size_t N, typename T>
typename BitSetIterator<N, T>::Iterator& BitSetIterator<N, T>::Iterator::operator++() {
ASSERT(mBits.any());
mBits.set(mCurrentBit - mOffset, 0);
mCurrentBit = getNextBit();
return *this;
}
template <size_t N, typename T>
bool BitSetIterator<N, T>::Iterator::operator==(const Iterator& other) const {
return mOffset == other.mOffset && mBits == other.mBits;
}
template <size_t N, typename T>
bool BitSetIterator<N, T>::Iterator::operator!=(const Iterator& other) const {
return !(*this == other);
}
template <size_t N, typename T>
unsigned long BitSetIterator<N, T>::Iterator::getNextBit() {
static std::bitset<N> wordMask(std::numeric_limits<unsigned long>::max());
while (mOffset < N) {
unsigned long wordBits = (mBits & wordMask).to_ulong();
if (wordBits != 0ul) {
return ScanForward(wordBits) + mOffset;
}
mBits >>= BitsPerWord;
mOffset += BitsPerWord;
}
return 0;
}
// Helper to avoid needing to specify the template parameter size
template <size_t N>
BitSetIterator<N, uint32_t> IterateBitSet(const std::bitset<N>& bitset) {
return BitSetIterator<N, uint32_t>(bitset);
}
}
#endif // BACKEND_COMMON_BITSETITERATOR_H_

View File

@@ -0,0 +1,233 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Buffer.h"
#include "Device.h"
#include <utility>
#include <cstdio>
namespace backend {
// Buffer
BufferBase::BufferBase(BufferBuilder* builder)
: device(builder->device),
size(builder->size),
allowedUsage(builder->allowedUsage),
currentUsage(builder->currentUsage) {
}
BufferViewBuilder* BufferBase::CreateBufferViewBuilder() {
return new BufferViewBuilder(device, this);
}
uint32_t BufferBase::GetSize() const {
return size;
}
nxt::BufferUsageBit BufferBase::GetAllowedUsage() const {
return allowedUsage;
}
nxt::BufferUsageBit BufferBase::GetUsage() const {
return currentUsage;
}
void BufferBase::SetSubData(uint32_t start, uint32_t count, const uint32_t* data) {
if ((start + count) * sizeof(uint32_t) > GetSize()) {
device->HandleError("Buffer subdata out of range");
return;
}
if (!(currentUsage & nxt::BufferUsageBit::Mapped)) {
device->HandleError("Buffer needs the mapped usage bit");
return;
}
SetSubDataImpl(start, count, data);
}
bool BufferBase::IsFrozen() const {
return frozen;
}
bool BufferBase::HasFrozenUsage(nxt::BufferUsageBit usage) const {
return frozen && (usage & allowedUsage);
}
bool BufferBase::IsUsagePossible(nxt::BufferUsageBit allowedUsage, nxt::BufferUsageBit usage) {
const nxt::BufferUsageBit allReadBits =
nxt::BufferUsageBit::TransferSrc |
nxt::BufferUsageBit::Index |
nxt::BufferUsageBit::Vertex |
nxt::BufferUsageBit::Uniform;
bool allowed = (usage & allowedUsage) == usage;
bool readOnly = (usage & allReadBits) == usage;
bool singleUse = nxt::HasZeroOrOneBits(usage);
return allowed && (readOnly || singleUse);
}
bool BufferBase::IsTransitionPossible(nxt::BufferUsageBit usage) const {
if (frozen) {
return false;
}
return IsUsagePossible(allowedUsage, usage);
}
void BufferBase::TransitionUsageImpl(nxt::BufferUsageBit usage) {
assert(IsTransitionPossible(usage));
currentUsage = usage;
}
void BufferBase::TransitionUsage(nxt::BufferUsageBit usage) {
if (!IsTransitionPossible(usage)) {
device->HandleError("Buffer frozen or usage not allowed");
return;
}
TransitionUsageImpl(usage);
}
void BufferBase::FreezeUsage(nxt::BufferUsageBit usage) {
if (!IsTransitionPossible(usage)) {
device->HandleError("Buffer frozen or usage not allowed");
return;
}
allowedUsage = usage;
currentUsage = usage;
frozen = true;
}
// BufferBuilder
enum BufferSetProperties {
BUFFER_PROPERTY_ALLOWED_USAGE = 0x1,
BUFFER_PROPERTY_INITIAL_USAGE = 0x2,
BUFFER_PROPERTY_SIZE = 0x4,
};
BufferBuilder::BufferBuilder(DeviceBase* device) : device(device) {
}
bool BufferBuilder::WasConsumed() const {
return consumed;
}
BufferBase* BufferBuilder::GetResult() {
constexpr int allProperties = BUFFER_PROPERTY_ALLOWED_USAGE | BUFFER_PROPERTY_SIZE;
if ((propertiesSet & allProperties) != allProperties) {
device->HandleError("Buffer missing properties");
return nullptr;
}
if (!BufferBase::IsUsagePossible(allowedUsage, currentUsage)) {
device->HandleError("Initial buffer usage is not allowed");
return nullptr;
}
consumed = true;
return device->CreateBuffer(this);
}
void BufferBuilder::SetAllowedUsage(nxt::BufferUsageBit usage) {
if ((propertiesSet & BUFFER_PROPERTY_ALLOWED_USAGE) != 0) {
device->HandleError("Buffer allowedUsage property set multiple times");
return;
}
this->allowedUsage = usage;
propertiesSet |= BUFFER_PROPERTY_ALLOWED_USAGE;
}
void BufferBuilder::SetInitialUsage(nxt::BufferUsageBit usage) {
if ((propertiesSet & BUFFER_PROPERTY_INITIAL_USAGE) != 0) {
device->HandleError("Buffer initialUsage property set multiple times");
return;
}
this->currentUsage = usage;
propertiesSet |= BUFFER_PROPERTY_INITIAL_USAGE;
}
void BufferBuilder::SetSize(uint32_t size) {
if ((propertiesSet & BUFFER_PROPERTY_SIZE) != 0) {
device->HandleError("Buffer size property set multiple times");
return;
}
this->size = size;
propertiesSet |= BUFFER_PROPERTY_SIZE;
}
// BufferViewBase
BufferViewBase::BufferViewBase(BufferViewBuilder* builder)
: buffer(std::move(builder->buffer)), size(builder->size), offset(builder->offset) {
}
BufferBase* BufferViewBase::GetBuffer() {
return buffer.Get();
}
uint32_t BufferViewBase::GetSize() const {
return size;
}
uint32_t BufferViewBase::GetOffset() const {
return offset;
}
// BufferViewBuilder
enum BufferViewSetProperties {
BUFFER_VIEW_PROPERTY_EXTENT = 0x1,
};
BufferViewBuilder::BufferViewBuilder(DeviceBase* device, BufferBase* buffer)
: device(device), buffer(buffer) {
}
bool BufferViewBuilder::WasConsumed() const {
return consumed;
}
BufferViewBase* BufferViewBuilder::GetResult() {
constexpr int allProperties = BUFFER_VIEW_PROPERTY_EXTENT;
if ((propertiesSet & allProperties) != allProperties) {
device->HandleError("Buffer view missing properties");
return nullptr;
}
return device->CreateBufferView(this);
}
void BufferViewBuilder::SetExtent(uint32_t offset, uint32_t size) {
if ((propertiesSet & BUFFER_VIEW_PROPERTY_EXTENT) != 0) {
device->HandleError("Buffer view extent property set multiple times");
return;
}
uint64_t viewEnd = static_cast<uint64_t>(offset) + static_cast<uint64_t>(size);
if (viewEnd > static_cast<uint64_t>(buffer->GetSize())) {
device->HandleError("Buffer view end is OOB");
return;
}
this->offset = offset;
this->size = size;
propertiesSet |= BUFFER_VIEW_PROPERTY_EXTENT;
}
}

114
src/backend/common/Buffer.h Normal file
View File

@@ -0,0 +1,114 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_BUFFER_H_
#define BACKEND_COMMON_BUFFER_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
namespace backend {
class BufferBase : public RefCounted {
public:
BufferBase(BufferBuilder* builder);
uint32_t GetSize() const;
nxt::BufferUsageBit GetAllowedUsage() const;
nxt::BufferUsageBit GetUsage() const;
static bool IsUsagePossible(nxt::BufferUsageBit allowedUsage, nxt::BufferUsageBit usage);
bool IsTransitionPossible(nxt::BufferUsageBit usage) const;
bool IsFrozen() const;
bool HasFrozenUsage(nxt::BufferUsageBit usage) const;
void TransitionUsageImpl(nxt::BufferUsageBit usage);
// NXT API
BufferViewBuilder* CreateBufferViewBuilder();
void SetSubData(uint32_t start, uint32_t count, const uint32_t* data);
void TransitionUsage(nxt::BufferUsageBit usage);
void FreezeUsage(nxt::BufferUsageBit usage);
private:
virtual void SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) = 0;
DeviceBase* device;
uint32_t size;
nxt::BufferUsageBit allowedUsage = nxt::BufferUsageBit::None;
nxt::BufferUsageBit currentUsage = nxt::BufferUsageBit::None;
bool frozen = false;
};
class BufferBuilder : public RefCounted {
public:
BufferBuilder(DeviceBase* device);
bool WasConsumed() const;
// NXT API
BufferBase* GetResult();
void SetAllowedUsage(nxt::BufferUsageBit usage);
void SetInitialUsage(nxt::BufferUsageBit usage);
void SetSize(uint32_t size);
private:
friend class BufferBase;
DeviceBase* device;
uint32_t size;
nxt::BufferUsageBit allowedUsage = nxt::BufferUsageBit::None;
nxt::BufferUsageBit currentUsage = nxt::BufferUsageBit::None;
int propertiesSet = 0;
bool consumed = false;
};
class BufferViewBase : public RefCounted {
public:
BufferViewBase(BufferViewBuilder* builder);
BufferBase* GetBuffer();
uint32_t GetSize() const;
uint32_t GetOffset() const;
private:
Ref<BufferBase> buffer;
uint32_t size;
uint32_t offset;
};
class BufferViewBuilder : public RefCounted {
public:
BufferViewBuilder(DeviceBase* device, BufferBase* buffer);
bool WasConsumed() const;
// NXT API
BufferViewBase* GetResult();
void SetExtent(uint32_t offset, uint32_t size);
private:
friend class BufferViewBase;
DeviceBase* device;
Ref<BufferBase> buffer;
uint32_t offset = 0;
uint32_t size = 0;
int propertiesSet = 0;
bool consumed = false;
};
}
#endif // BACKEND_COMMON_BUFFER_H_

View File

@@ -0,0 +1,219 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "CommandAllocator.h"
#include "Math.h"
#include <cassert>
#include <climits>
#include <cstdlib>
#define ASSERT assert
namespace backend {
constexpr uint32_t EndOfBlock = UINT_MAX;//std::numeric_limits<uint32_t>::max();
constexpr uint32_t AdditionalData = UINT_MAX - 1;//std::numeric_limits<uint32_t>::max();
// TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
CommandIterator::CommandIterator()
: endOfBlock(EndOfBlock) {
Reset();
}
CommandIterator::~CommandIterator() {
ASSERT(dataWasDestroyed);
if (!IsEmpty()) {
for (auto& block : blocks) {
free(block.block);
}
}
}
CommandIterator::CommandIterator(CommandIterator&& other)
: endOfBlock(EndOfBlock) {
if (!other.IsEmpty()) {
blocks = std::move(other.blocks);
other.Reset();
}
other.DataWasDestroyed();
Reset();
}
CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
if (!other.IsEmpty()) {
blocks = std::move(other.blocks);
other.Reset();
} else {
blocks.clear();
}
other.DataWasDestroyed();
Reset();
return *this;
}
CommandIterator::CommandIterator(CommandAllocator&& allocator)
: blocks(allocator.AcquireBlocks()), endOfBlock(EndOfBlock) {
Reset();
}
CommandIterator& CommandIterator::operator=(CommandAllocator&& allocator) {
blocks = allocator.AcquireBlocks();
Reset();
return *this;
}
void CommandIterator::Reset() {
currentBlock = 0;
if (blocks.empty()) {
// This will case the first NextCommandId call to try to move to the next
// block and stop the iteration immediately, without special casing the
// initialization.
currentPtr = reinterpret_cast<uint8_t*>(&endOfBlock);
blocks.emplace_back();
blocks[0].size = sizeof(endOfBlock);
blocks[0].block = currentPtr;
} else {
currentPtr = Align(blocks[0].block, alignof(uint32_t));
}
}
void CommandIterator::DataWasDestroyed() {
dataWasDestroyed = true;
}
bool CommandIterator::IsEmpty() const {
return blocks[0].block == reinterpret_cast<const uint8_t*>(&endOfBlock);
}
bool CommandIterator::NextCommandId(uint32_t* commandId) {
uint8_t* idPtr = Align(currentPtr, alignof(uint32_t));
ASSERT(idPtr + sizeof(uint32_t) <= blocks[currentBlock].block + blocks[currentBlock].size);
uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
if (id == EndOfBlock) {
currentBlock++;
if (currentBlock >= blocks.size()) {
Reset();
return false;
}
currentPtr = Align(blocks[currentBlock].block, alignof(uint32_t));
return NextCommandId(commandId);
}
currentPtr = idPtr + sizeof(uint32_t);
*commandId = id;
return true;
}
void* CommandIterator::NextCommand(size_t commandSize, size_t commandAlignment) {
uint8_t* commandPtr = Align(currentPtr, commandAlignment);
ASSERT(commandPtr + sizeof(commandSize) <= blocks[currentBlock].block + blocks[currentBlock].size);
currentPtr = commandPtr + commandSize;
return commandPtr;
}
void* CommandIterator::NextData(size_t dataSize, size_t dataAlignment) {
uint32_t id;
bool hasId = NextCommandId(&id);
ASSERT(hasId);
ASSERT(id == AdditionalData);
return NextCommand(dataSize, dataAlignment);
}
// Potential TODO(cwallez@chromium.org):
// - Host the size and pointer to next block in the block itself to avoid having an allocation in the vector
// - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant in Allocate
// - Be able to optimize allocation to one block, for command buffers expected to live long to avoid cache misses
// - Better block allocation, maybe have NXT API to say command buffer is going to have size close to another
CommandAllocator::CommandAllocator()
: currentPtr(reinterpret_cast<uint8_t*>(&dummyEnum[0])), endPtr(reinterpret_cast<uint8_t*>(&dummyEnum[1])) {
}
CommandAllocator::~CommandAllocator() {
ASSERT(blocks.empty());
}
CommandBlocks&& CommandAllocator::AcquireBlocks() {
ASSERT(currentPtr != nullptr && endPtr != nullptr);
ASSERT(IsAligned(currentPtr, alignof(uint32_t)));
ASSERT(currentPtr + sizeof(uint32_t) <= endPtr);
*reinterpret_cast<uint32_t*>(currentPtr) = EndOfBlock;
currentPtr = nullptr;
endPtr = nullptr;
return std::move(blocks);
}
uint8_t* CommandAllocator::Allocate(uint32_t commandId, size_t commandSize, size_t commandAlignment) {
ASSERT(currentPtr != nullptr);
ASSERT(endPtr != nullptr);
ASSERT(commandId != EndOfBlock);
// It should always be possible to allocate one id, for EndOfBlock tagging,
ASSERT(IsAligned(currentPtr, alignof(uint32_t)));
ASSERT(currentPtr + sizeof(uint32_t) <= endPtr);
uint32_t* idAlloc = reinterpret_cast<uint32_t*>(currentPtr);
uint8_t* commandAlloc = Align(currentPtr + sizeof(uint32_t), commandAlignment);
uint8_t* nextPtr = Align(commandAlloc + commandSize, alignof(uint32_t));
// When there is not enough space, we signal the EndOfBlock, so that the iterator nows to
// move to the next one. EndOfBlock on the last block means the end of the commands.
if (nextPtr + sizeof(uint32_t) > endPtr) {
// Even if we are not able to get another block, the list of commands will be well-formed
// and iterable as this block will be that last one.
*idAlloc = EndOfBlock;
// Make sure we have space for current allocation, plus end of block and alignment padding
// for the first id.
if (!GetNewBlock(nextPtr - currentPtr + sizeof(uint32_t) + alignof(uint32_t))) {
return nullptr;
}
return Allocate(commandId, commandSize, commandAlignment);
}
*idAlloc = commandId;
currentPtr = nextPtr;
return commandAlloc;
}
uint8_t* CommandAllocator::AllocateData(size_t commandSize, size_t commandAlignment) {
return Allocate(AdditionalData, commandSize, commandAlignment);
}
bool CommandAllocator::GetNewBlock(size_t minimumSize) {
// Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
lastAllocationSize = std::max(minimumSize, std::min(lastAllocationSize * 2, size_t(16384)));
uint8_t* block = reinterpret_cast<uint8_t*>(malloc(lastAllocationSize));
if (block == nullptr) {
return false;
}
blocks.push_back({lastAllocationSize, block});
currentPtr = Align(block, alignof(uint32_t));
endPtr = block + lastAllocationSize;
return true;
}
}

View File

@@ -0,0 +1,150 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_COMMAND_ALLOCATOR_H_
#define BACKEND_COMMON_COMMAND_ALLOCATOR_H_
#include <cstdint>
#include <cstddef>
#include <vector>
namespace backend {
// Allocation for command buffers should be fast. To avoid doing an allocation per command
// or to avoid copying commands when reallocing, we use a linear allocator in a growing set
// of large memory blocks. We also use this to have the format to be (u32 commandId, command),
// so that iteration over the commands is easy.
// Usage of the allocator and iterator:
// CommandAllocator allocator;
// DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
// // Fill command
// // Repeat allocation and filling commands
//
// CommandIterator commands(allocator);
// CommandType type;
// void* command;
// while(commands.NextCommandId(&e)) {
// switch(e) {
// case CommandType::Draw:
// DrawCommand* draw = commands.NextCommand<DrawCommand>();
// // Do the draw
// break;
// // other cases
// }
// }
// Note that you need to extract the commands from the CommandAllocator before destroying it
// and must tell the CommandIterator when the allocated commands have been processed for
// deletion.
// These are the lists of blocks, should not be used directly, only through CommandAllocator
// and CommandIterator
struct BlockDef {
size_t size;
uint8_t* block;
};
using CommandBlocks = std::vector<BlockDef>;
class CommandAllocator;
// TODO(cwallez@chromium.org): prevent copy for both iterator and allocator
class CommandIterator {
public:
CommandIterator();
~CommandIterator();
CommandIterator(CommandIterator&& other);
CommandIterator& operator=(CommandIterator&& other);
CommandIterator(CommandAllocator&& allocator);
CommandIterator& operator=(CommandAllocator&& allocator);
template<typename E>
bool NextCommandId(E* commandId) {
return NextCommandId(reinterpret_cast<uint32_t*>(commandId));
}
template<typename T>
T* NextCommand() {
return reinterpret_cast<T*>(NextCommand(sizeof(T), alignof(T)));
}
template<typename T>
T* NextData(size_t count) {
return reinterpret_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
}
// Needs to be called if iteration was stopped early.
void Reset();
void DataWasDestroyed();
private:
bool IsEmpty() const;
bool NextCommandId(uint32_t* commandId);
void* NextCommand(size_t commandSize, size_t commandAlignment);
void* NextData(size_t dataSize, size_t dataAlignment);
CommandBlocks blocks;
uint8_t* currentPtr = nullptr;
size_t currentBlock = 0;
// Used to avoid a special case for empty iterators.
uint32_t endOfBlock;
bool dataWasDestroyed = false;
};
class CommandAllocator {
public:
CommandAllocator();
~CommandAllocator();
template<typename T, typename E>
T* Allocate(E commandId) {
static_assert(sizeof(E) == sizeof(uint32_t), "");
static_assert(alignof(E) == alignof(uint32_t), "");
return reinterpret_cast<T*>(Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
}
template<typename T>
T* AllocateData(size_t count) {
return reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
}
private:
friend CommandIterator;
CommandBlocks&& AcquireBlocks();
uint8_t* Allocate(uint32_t commandId, size_t commandSize, size_t commandAlignment);
uint8_t* AllocateData(size_t dataSize, size_t dataAlignment);
bool GetNewBlock(size_t minimumSize);
CommandBlocks blocks;
size_t lastAllocationSize = 2048;
// Pointers to the current range of allocation in the block. Guaranteed to allow
// for at least one uint32_t is not nullptr, so that the special EndOfBlock command id
// can always be written.
// Nullptr iff the blocks were moved out.
uint8_t* currentPtr = nullptr;
uint8_t* endPtr = nullptr;
// Data used for the block range at initialization so that the first call to Allocate
// sees there is not enough space and calls GetNewBlock. This avoids having to special
// case the initialization in Allocate.
uint32_t dummyEnum[1] = {0};
};
}
#endif // BACKEND_COMMON_COMMAND_ALLOCATOR_H_

View File

@@ -0,0 +1,623 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "CommandBuffer.h"
#include "BindGroup.h"
#include "BindGroupLayout.h"
#include "Buffer.h"
#include "Commands.h"
#include "Device.h"
#include "InputState.h"
#include "Pipeline.h"
#include "PipelineLayout.h"
#include "Texture.h"
#include <cstring>
#include <map>
namespace backend {
CommandBufferBase::CommandBufferBase(CommandBufferBuilder* builder)
: device(builder->device),
buffersTransitioned(std::move(builder->buffersTransitioned)),
texturesTransitioned(std::move(builder->texturesTransitioned)) {
}
bool CommandBufferBase::ValidateResourceUsagesImmediate() {
for (auto buffer : buffersTransitioned) {
if (buffer->IsFrozen()) {
device->HandleError("Command buffer: cannot transition buffer with frozen usage");
return false;
}
}
for (auto texture : texturesTransitioned) {
if (texture->IsFrozen()) {
device->HandleError("Command buffer: cannot transition texture with frozen usage");
return false;
}
}
return true;
}
void FreeCommands(CommandIterator* commands) {
Command type;
while(commands->NextCommandId(&type)) {
switch (type) {
case Command::CopyBufferToTexture:
{
CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
copy->~CopyBufferToTextureCmd();
}
break;
case Command::Dispatch:
{
DispatchCmd* dispatch = commands->NextCommand<DispatchCmd>();
dispatch->~DispatchCmd();
}
break;
case Command::DrawArrays:
{
DrawArraysCmd* draw = commands->NextCommand<DrawArraysCmd>();
draw->~DrawArraysCmd();
}
break;
case Command::DrawElements:
{
DrawElementsCmd* draw = commands->NextCommand<DrawElementsCmd>();
draw->~DrawElementsCmd();
}
break;
case Command::SetPipeline:
{
SetPipelineCmd* cmd = commands->NextCommand<SetPipelineCmd>();
cmd->~SetPipelineCmd();
}
break;
case Command::SetPushConstants:
{
SetPushConstantsCmd* cmd = commands->NextCommand<SetPushConstantsCmd>();
commands->NextData<uint32_t>(cmd->count);
cmd->~SetPushConstantsCmd();
}
break;
case Command::SetBindGroup:
{
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
cmd->~SetBindGroupCmd();
}
break;
case Command::SetIndexBuffer:
{
SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
cmd->~SetIndexBufferCmd();
}
break;
case Command::SetVertexBuffers:
{
SetVertexBuffersCmd* cmd = commands->NextCommand<SetVertexBuffersCmd>();
auto buffers = commands->NextData<Ref<BufferBase>>(cmd->count);
for (size_t i = 0; i < cmd->count; ++i) {
(&buffers[i])->~Ref<BufferBase>();
}
commands->NextData<uint32_t>(cmd->count);
cmd->~SetVertexBuffersCmd();
}
break;
case Command::TransitionBufferUsage:
{
TransitionBufferUsageCmd* cmd = commands->NextCommand<TransitionBufferUsageCmd>();
cmd->~TransitionBufferUsageCmd();
}
break;
case Command::TransitionTextureUsage:
{
TransitionTextureUsageCmd* cmd = commands->NextCommand<TransitionTextureUsageCmd>();
cmd->~TransitionTextureUsageCmd();
}
break;
}
}
commands->DataWasDestroyed();
}
CommandBufferBuilder::CommandBufferBuilder(DeviceBase* device) : device(device) {
}
CommandBufferBuilder::~CommandBufferBuilder() {
if (!consumed) {
MoveToIterator();
FreeCommands(&iterator);
}
}
bool CommandBufferBuilder::WasConsumed() const {
return consumed;
}
enum ValidationAspect {
VALIDATION_ASPECT_RENDER_PIPELINE,
VALIDATION_ASPECT_COMPUTE_PIPELINE,
VALIDATION_ASPECT_BINDGROUPS,
VALIDATION_ASPECT_VERTEX_BUFFERS,
VALIDATION_ASPECT_INDEX_BUFFER,
VALIDATION_ASPECT_COUNT,
};
using ValidationAspects = std::bitset<VALIDATION_ASPECT_COUNT>;
bool CommandBufferBuilder::ValidateGetResult() {
MoveToIterator();
ValidationAspects aspects;
std::bitset<kMaxBindGroups> bindgroupsSet;
std::bitset<kMaxVertexInputs> inputsSet;
PipelineBase* lastPipeline = nullptr;
std::map<BufferBase*, nxt::BufferUsageBit> mostRecentBufferUsages;
auto bufferHasGuaranteedUsageBit = [&](BufferBase* buffer, nxt::BufferUsageBit usage) -> bool {
assert(usage != nxt::BufferUsageBit::None && nxt::HasZeroOrOneBits(usage));
if (buffer->HasFrozenUsage(usage)) {
return true;
}
auto it = mostRecentBufferUsages.find(buffer);
return it != mostRecentBufferUsages.end() && (it->second & usage);
};
std::map<TextureBase*, nxt::TextureUsageBit> mostRecentTextureUsages;
auto textureHasGuaranteedUsageBit = [&](TextureBase* texture, nxt::TextureUsageBit usage) -> bool {
assert(usage != nxt::TextureUsageBit::None && nxt::HasZeroOrOneBits(usage));
if (texture->HasFrozenUsage(usage)) {
return true;
}
auto it = mostRecentTextureUsages.find(texture);
return it != mostRecentTextureUsages.end() && (it->second & usage);
};
auto validateBindGroupUsages = [&](BindGroupBase* group) -> bool {
const auto& layoutInfo = group->GetLayout()->GetBindingInfo();
for (size_t i = 0; i < kMaxBindingsPerGroup; ++i) {
if (!layoutInfo.mask[i]) {
continue;
}
nxt::BindingType type = layoutInfo.types[i];
switch (type) {
case nxt::BindingType::UniformBuffer:
case nxt::BindingType::StorageBuffer:
{
nxt::BufferUsageBit requiredUsage;
switch (type) {
case nxt::BindingType::UniformBuffer:
requiredUsage = nxt::BufferUsageBit::Uniform;
break;
case nxt::BindingType::StorageBuffer:
requiredUsage = nxt::BufferUsageBit::Storage;
break;
default:
assert(false);
return false;
}
auto buffer = group->GetBindingAsBufferView(i)->GetBuffer();
if (!bufferHasGuaranteedUsageBit(buffer, requiredUsage)) {
device->HandleError("Can't guarantee buffer usage needed by bind group");
return false;
}
}
break;
case nxt::BindingType::SampledTexture:
{
auto requiredUsage = nxt::TextureUsageBit::Sampled;
auto texture = group->GetBindingAsTextureView(i)->GetTexture();
if (!textureHasGuaranteedUsageBit(texture, requiredUsage)) {
device->HandleError("Can't guarantee texture usage needed by bind group");
return false;
}
}
break;
case nxt::BindingType::Sampler:
continue;
}
}
return true;
};
Command type;
while(iterator.NextCommandId(&type)) {
switch (type) {
case Command::CopyBufferToTexture:
{
CopyBufferToTextureCmd* copy = iterator.NextCommand<CopyBufferToTextureCmd>();
BufferBase* buffer = copy->buffer.Get();
TextureBase* texture = copy->texture.Get();
uint64_t width = copy->width;
uint64_t height = copy->height;
uint64_t depth = copy->depth;
uint64_t x = copy->x;
uint64_t y = copy->y;
uint64_t z = copy->z;
uint32_t level = copy->level;
if (!bufferHasGuaranteedUsageBit(buffer, nxt::BufferUsageBit::TransferSrc)) {
device->HandleError("Buffer needs the transfer source usage bit");
return false;
}
if (!textureHasGuaranteedUsageBit(texture, nxt::TextureUsageBit::TransferDst)) {
device->HandleError("Texture needs the transfer destination usage bit");
return false;
}
if (width == 0 || height == 0 || depth == 0) {
device->HandleError("Empty copy");
return false;
}
// TODO(cwallez@chromium.org): check for overflows
uint64_t pixelSize = TextureFormatPixelSize(texture->GetFormat());
uint64_t dataSize = width * height * depth * pixelSize;
// TODO(cwallez@chromium.org): handle buffer offset when it is in the command.
if (dataSize > static_cast<uint64_t>(buffer->GetSize())) {
device->HandleError("Copy would read after end of the buffer");
return false;
}
if (x + width > static_cast<uint64_t>(texture->GetWidth()) ||
y + height > static_cast<uint64_t>(texture->GetHeight()) ||
z + depth > static_cast<uint64_t>(texture->GetDepth()) ||
level > texture->GetNumMipLevels()) {
device->HandleError("Copy would write outside of the texture");
return false;
}
}
break;
case Command::Dispatch:
{
DispatchCmd* cmd = iterator.NextCommand<DispatchCmd>();
constexpr ValidationAspects requiredDispatchAspects =
1 << VALIDATION_ASPECT_COMPUTE_PIPELINE |
1 << VALIDATION_ASPECT_BINDGROUPS |
1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
if ((requiredDispatchAspects & ~aspects).any()) {
// Compute the lazily computed aspects
if (bindgroupsSet.all()) {
aspects.set(VALIDATION_ASPECT_BINDGROUPS);
}
auto requiredInputs = lastPipeline->GetInputState()->GetInputsSetMask();
if ((inputsSet & ~requiredInputs).none()) {
aspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
}
// Check again if anything is missing
if ((requiredDispatchAspects & ~aspects).any()) {
device->HandleError("Some dispatch state is missing");
return false;
}
}
}
break;
case Command::DrawArrays:
case Command::DrawElements:
{
constexpr ValidationAspects requiredDrawAspects =
1 << VALIDATION_ASPECT_RENDER_PIPELINE |
1 << VALIDATION_ASPECT_BINDGROUPS |
1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
if ((requiredDrawAspects & ~aspects).any()) {
// Compute the lazily computed aspects
if (bindgroupsSet.all()) {
aspects.set(VALIDATION_ASPECT_BINDGROUPS);
}
auto requiredInputs = lastPipeline->GetInputState()->GetInputsSetMask();
if ((inputsSet & ~requiredInputs).none()) {
aspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
}
// Check again if anything is missing
if ((requiredDrawAspects & ~aspects).any()) {
device->HandleError("Some draw state is missing");
return false;
}
}
if (type == Command::DrawArrays) {
DrawArraysCmd* draw = iterator.NextCommand<DrawArraysCmd>();
} else {
ASSERT(type == Command::DrawElements);
DrawElementsCmd* draw = iterator.NextCommand<DrawElementsCmd>();
if (!aspects[VALIDATION_ASPECT_INDEX_BUFFER]) {
device->HandleError("Draw elements requires an index buffer");
return false;
}
}
}
break;
case Command::SetPipeline:
{
SetPipelineCmd* cmd = iterator.NextCommand<SetPipelineCmd>();
PipelineBase* pipeline = cmd->pipeline.Get();
PipelineLayoutBase* layout = pipeline->GetLayout();
if (pipeline->IsCompute()) {
aspects.set(VALIDATION_ASPECT_COMPUTE_PIPELINE);
aspects.reset(VALIDATION_ASPECT_RENDER_PIPELINE);
} else {
aspects.set(VALIDATION_ASPECT_RENDER_PIPELINE);
aspects.reset(VALIDATION_ASPECT_COMPUTE_PIPELINE);
}
aspects.reset(VALIDATION_ASPECT_BINDGROUPS);
aspects.reset(VALIDATION_ASPECT_VERTEX_BUFFERS);
bindgroupsSet = ~layout->GetBindGroupsLayoutMask();
// Only bindgroups that were not the same layout in the last pipeline need to be set again.
if (lastPipeline) {
PipelineLayoutBase* lastLayout = lastPipeline->GetLayout();
for (uint32_t i = 0; i < kMaxBindGroups; ++i) {
if (lastLayout->GetBindGroupLayout(i) == layout->GetBindGroupLayout(i)) {
bindgroupsSet |= uint64_t(1) << i;
}
}
}
lastPipeline = pipeline;
}
break;
case Command::SetPushConstants:
{
SetPushConstantsCmd* cmd = iterator.NextCommand<SetPushConstantsCmd>();
iterator.NextData<uint32_t>(cmd->count);
if (cmd->count + cmd->offset > kMaxPushConstants) {
device->HandleError("Setting pushconstants past the limit");
return false;
}
}
break;
case Command::SetBindGroup:
{
SetBindGroupCmd* cmd = iterator.NextCommand<SetBindGroupCmd>();
uint32_t index = cmd->index;
if (cmd->group->GetLayout() != lastPipeline->GetLayout()->GetBindGroupLayout(index)) {
device->HandleError("Bind group layout mismatch");
return false;
}
if (!validateBindGroupUsages(cmd->group.Get())) {
return false;
}
bindgroupsSet |= uint64_t(1) << index;
}
break;
case Command::SetIndexBuffer:
{
SetIndexBufferCmd* cmd = iterator.NextCommand<SetIndexBufferCmd>();
auto buffer = cmd->buffer;
auto usage = nxt::BufferUsageBit::Index;
if (!bufferHasGuaranteedUsageBit(buffer.Get(), usage)) {
device->HandleError("Buffer needs the index usage bit to be guaranteed");
return false;
}
aspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
}
break;
case Command::SetVertexBuffers:
{
SetVertexBuffersCmd* cmd = iterator.NextCommand<SetVertexBuffersCmd>();
auto buffers = iterator.NextData<Ref<BufferBase>>(cmd->count);
iterator.NextData<uint32_t>(cmd->count);
for (uint32_t i = 0; i < cmd->count; ++i) {
auto buffer = buffers[i];
auto usage = nxt::BufferUsageBit::Vertex;
if (!bufferHasGuaranteedUsageBit(buffer.Get(), usage)) {
device->HandleError("Buffer needs vertex usage bit to be guaranteed");
return false;
}
inputsSet.set(cmd->startSlot + i);
}
}
break;
case Command::TransitionBufferUsage:
{
TransitionBufferUsageCmd* cmd = iterator.NextCommand<TransitionBufferUsageCmd>();
auto buffer = cmd->buffer.Get();
auto usage = cmd->usage;
if (!cmd->buffer->IsTransitionPossible(cmd->usage)) {
device->HandleError("Buffer frozen or usage not allowed");
return false;
}
mostRecentBufferUsages[buffer] = usage;
buffersTransitioned.insert(buffer);
}
break;
case Command::TransitionTextureUsage:
{
TransitionTextureUsageCmd* cmd = iterator.NextCommand<TransitionTextureUsageCmd>();
auto texture = cmd->texture.Get();
auto usage = cmd->usage;
if (!cmd->texture->IsTransitionPossible(cmd->usage)) {
device->HandleError("Texture frozen or usage not allowed");
return false;
}
mostRecentTextureUsages[texture] = usage;
texturesTransitioned.insert(texture);
}
break;
}
}
return true;
}
CommandIterator CommandBufferBuilder::AcquireCommands() {
return std::move(iterator);
}
CommandBufferBase* CommandBufferBuilder::GetResult() {
MoveToIterator();
consumed = true;
return device->CreateCommandBuffer(this);
}
void CommandBufferBuilder::CopyBufferToTexture(BufferBase* buffer, TextureBase* texture, uint32_t x, uint32_t y, uint32_t z,
uint32_t width, uint32_t height, uint32_t depth, uint32_t level) {
CopyBufferToTextureCmd* copy = allocator.Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
new(copy) CopyBufferToTextureCmd;
copy->buffer = buffer;
copy->texture = texture;
copy->x = x;
copy->y = y;
copy->z = z;
copy->width = width;
copy->height = height;
copy->depth = depth;
copy->level = level;
}
void CommandBufferBuilder::Dispatch(uint32_t x, uint32_t y, uint32_t z) {
DispatchCmd* dispatch = allocator.Allocate<DispatchCmd>(Command::Dispatch);
new(dispatch) DispatchCmd;
dispatch->x = x;
dispatch->y = y;
dispatch->z = z;
}
void CommandBufferBuilder::DrawArrays(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) {
DrawArraysCmd* draw = allocator.Allocate<DrawArraysCmd>(Command::DrawArrays);
new(draw) DrawArraysCmd;
draw->vertexCount = vertexCount;
draw->instanceCount = instanceCount;
draw->firstVertex = firstVertex;
draw->firstInstance = firstInstance;
}
void CommandBufferBuilder::DrawElements(uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, uint32_t firstInstance) {
DrawElementsCmd* draw = allocator.Allocate<DrawElementsCmd>(Command::DrawElements);
new(draw) DrawElementsCmd;
draw->indexCount = indexCount;
draw->instanceCount = instanceCount;
draw->firstIndex = firstIndex;
draw->firstInstance = firstInstance;
}
void CommandBufferBuilder::SetPipeline(PipelineBase* pipeline) {
SetPipelineCmd* cmd = allocator.Allocate<SetPipelineCmd>(Command::SetPipeline);
new(cmd) SetPipelineCmd;
cmd->pipeline = pipeline;
}
void CommandBufferBuilder::SetPushConstants(nxt::ShaderStageBit stage, uint32_t offset, uint32_t count, const void* data) {
if (offset + count > kMaxPushConstants) {
device->HandleError("Setting too many push constants");
return;
}
SetPushConstantsCmd* cmd = allocator.Allocate<SetPushConstantsCmd>(Command::SetPushConstants);
new(cmd) SetPushConstantsCmd;
cmd->stage = stage;
cmd->offset = offset;
cmd->count = count;
uint32_t* values = allocator.AllocateData<uint32_t>(count);
memcpy(values, data, count * sizeof(uint32_t));
}
void CommandBufferBuilder::SetBindGroup(uint32_t groupIndex, BindGroupBase* group) {
if (groupIndex >= kMaxBindGroups) {
device->HandleError("Setting bind group over the max");
return;
}
SetBindGroupCmd* cmd = allocator.Allocate<SetBindGroupCmd>(Command::SetBindGroup);
new(cmd) SetBindGroupCmd;
cmd->index = groupIndex;
cmd->group = group;
}
void CommandBufferBuilder::SetIndexBuffer(BufferBase* buffer, uint32_t offset, nxt::IndexFormat format) {
// TODO(kainino@chromium.org): validation
SetIndexBufferCmd* cmd = allocator.Allocate<SetIndexBufferCmd>(Command::SetIndexBuffer);
new(cmd) SetIndexBufferCmd;
cmd->buffer = buffer;
cmd->offset = offset;
cmd->format = format;
}
void CommandBufferBuilder::SetVertexBuffers(uint32_t startSlot, uint32_t count, BufferBase* const* buffers, uint32_t const* offsets){
// TODO(kainino@chromium.org): validation
SetVertexBuffersCmd* cmd = allocator.Allocate<SetVertexBuffersCmd>(Command::SetVertexBuffers);
new(cmd) SetVertexBuffersCmd;
cmd->startSlot = startSlot;
cmd->count = count;
Ref<BufferBase>* cmdBuffers = allocator.AllocateData<Ref<BufferBase>>(count);
for (size_t i = 0; i < count; ++i) {
new(&cmdBuffers[i]) Ref<BufferBase>(buffers[i]);
}
uint32_t* cmdOffsets = allocator.AllocateData<uint32_t>(count);
memcpy(cmdOffsets, offsets, count * sizeof(uint32_t));
}
void CommandBufferBuilder::TransitionBufferUsage(BufferBase* buffer, nxt::BufferUsageBit usage) {
TransitionBufferUsageCmd* cmd = allocator.Allocate<TransitionBufferUsageCmd>(Command::TransitionBufferUsage);
new(cmd) TransitionBufferUsageCmd;
cmd->buffer = buffer;
cmd->usage = usage;
}
void CommandBufferBuilder::TransitionTextureUsage(TextureBase* texture, nxt::TextureUsageBit usage) {
TransitionTextureUsageCmd* cmd = allocator.Allocate<TransitionTextureUsageCmd>(Command::TransitionTextureUsage);
new(cmd) TransitionTextureUsageCmd;
cmd->texture = texture;
cmd->usage = usage;
}
void CommandBufferBuilder::MoveToIterator() {
if (!movedToIterator) {
iterator = std::move(allocator);
movedToIterator = true;
}
}
}

View File

@@ -0,0 +1,98 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_COMMANDBUFFERGL_H_
#define BACKEND_COMMON_COMMANDBUFFERGL_H_
#include "nxt/nxtcpp.h"
#include "CommandAllocator.h"
#include "RefCounted.h"
#include <set>
#include <utility>
namespace backend {
class BindGroupBase;
class BufferBase;
class DeviceBase;
class PipelineBase;
class TextureBase;
class CommandBufferBuilder;
class CommandBufferBase : public RefCounted {
public:
CommandBufferBase(CommandBufferBuilder* builder);
bool ValidateResourceUsagesImmediate();
private:
DeviceBase* device;
std::set<BufferBase*> buffersTransitioned;
std::set<TextureBase*> texturesTransitioned;
};
class CommandBufferBuilder : public RefCounted {
public:
CommandBufferBuilder(DeviceBase* device);
~CommandBufferBuilder();
bool WasConsumed() const;
bool ValidateGetResult();
CommandIterator AcquireCommands();
// NXT API
CommandBufferBase* GetResult();
void CopyBufferToTexture(BufferBase* buffer, TextureBase* texture, uint32_t x, uint32_t y, uint32_t z,
uint32_t width, uint32_t height, uint32_t depth, uint32_t level);
void Dispatch(uint32_t x, uint32_t y, uint32_t z);
void DrawArrays(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
void DrawElements(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstIndex, uint32_t firstInstance);
void SetPushConstants(nxt::ShaderStageBit stage, uint32_t offset, uint32_t count, const void* data);
void SetPipeline(PipelineBase* pipeline);
void SetBindGroup(uint32_t groupIndex, BindGroupBase* group);
void SetIndexBuffer(BufferBase* buffer, uint32_t offset, nxt::IndexFormat format);
template<typename T>
void SetVertexBuffers(uint32_t startSlot, uint32_t count, T* const* buffers, uint32_t const* offsets) {
static_assert(std::is_base_of<BufferBase, T>::value, "");
SetVertexBuffers(startSlot, count, reinterpret_cast<BufferBase* const*>(buffers), offsets);
}
void SetVertexBuffers(uint32_t startSlot, uint32_t count, BufferBase* const* buffers, uint32_t const* offsets);
void TransitionBufferUsage(BufferBase* buffer, nxt::BufferUsageBit usage);
void TransitionTextureUsage(TextureBase* texture, nxt::TextureUsageBit usage);
private:
friend class CommandBufferBase;
void MoveToIterator();
DeviceBase* device;
CommandAllocator allocator;
CommandIterator iterator;
bool consumed = false;
bool movedToIterator = false;
// These pointers will remain valid since they are referenced by
// the bind groups which are referenced by this command buffer.
std::set<BufferBase*> buffersTransitioned;
std::set<TextureBase*> texturesTransitioned;
};
}
#endif // BACKEND_COMMON_COMMANDBUFFERGL_H_

View File

@@ -0,0 +1,114 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_COMMANDS_H_
#define BACKEND_COMMON_COMMANDS_H_
#include "Texture.h"
#include "nxt/nxtcpp.h"
namespace backend {
// Definition of the commands that are present in the CommandIterator given by the
// CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header
// dependencies: Ref<Object> needs Object to be defined.
enum class Command {
CopyBufferToTexture,
Dispatch,
DrawArrays,
DrawElements,
SetPipeline,
SetPushConstants,
SetBindGroup,
SetIndexBuffer,
SetVertexBuffers,
TransitionBufferUsage,
TransitionTextureUsage,
};
struct CopyBufferToTextureCmd {
Ref<BufferBase> buffer;
Ref<TextureBase> texture;
uint32_t x, y, z;
uint32_t width, height, depth;
uint32_t level;
};
struct DispatchCmd {
uint32_t x;
uint32_t y;
uint32_t z;
};
struct DrawArraysCmd {
uint32_t vertexCount;
uint32_t instanceCount;
uint32_t firstVertex;
uint32_t firstInstance;
};
struct DrawElementsCmd {
uint32_t indexCount;
uint32_t instanceCount;
uint32_t firstIndex;
uint32_t firstInstance;
};
struct SetPipelineCmd {
Ref<PipelineBase> pipeline;
};
struct SetPushConstantsCmd {
nxt::ShaderStageBit stage;
uint32_t offset;
uint32_t count;
};
struct SetBindGroupCmd {
uint32_t index;
Ref<BindGroupBase> group;
};
struct SetIndexBufferCmd {
Ref<BufferBase> buffer;
uint32_t offset;
nxt::IndexFormat format;
};
struct SetVertexBuffersCmd {
uint32_t startSlot;
uint32_t count;
};
struct TransitionBufferUsageCmd {
Ref<BufferBase> buffer;
nxt::BufferUsageBit usage;
};
struct TransitionTextureUsageCmd {
Ref<TextureBase> texture;
uint32_t startLevel;
uint32_t levelCount;
nxt::TextureUsageBit usage;
};
// This needs to be called before the CommandIterator is freed so that the Ref<> present in
// the commands have a chance to run their destructor and remove internal references.
void FreeCommands(CommandIterator* commands);
}
#endif // BACKEND_COMMON_COMMANDS_H_

View File

@@ -0,0 +1,126 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Device.h"
#include "BindGroup.h"
#include "BindGroupLayout.h"
#include "Buffer.h"
#include "CommandBuffer.h"
#include "InputState.h"
#include "Pipeline.h"
#include "PipelineLayout.h"
#include "Queue.h"
#include "Sampler.h"
#include "ShaderModule.h"
#include "Texture.h"
#include <unordered_set>
namespace backend {
void RegisterSynchronousErrorCallback(nxtDevice device, ErrorCallback callback, void* userData) {
auto deviceBase = reinterpret_cast<DeviceBase*>(device);
deviceBase->RegisterErrorCallback(callback, userData);
}
// DeviceBase::Caches
// The caches are unordered_sets of pointers with special hash and compare functions
// to compare the value of the objects, instead of the pointers.
using BindGroupLayoutCache = std::unordered_set<BindGroupLayoutBase*, BindGroupLayoutCacheFuncs, BindGroupLayoutCacheFuncs>;
struct DeviceBase::Caches {
BindGroupLayoutCache bindGroupLayouts;
};
// DeviceBase
DeviceBase::DeviceBase() {
caches = new DeviceBase::Caches();
}
DeviceBase::~DeviceBase() {
delete caches;
}
void DeviceBase::HandleError(const char* message) {
if (errorCallback) {
errorCallback(message, errorUserData);
}
}
void DeviceBase::RegisterErrorCallback(ErrorCallback callback, void* userData) {
this->errorCallback = callback;
this->errorUserData = userData;
}
BindGroupLayoutBase* DeviceBase::GetOrCreateBindGroupLayout(const BindGroupLayoutBase* blueprint, BindGroupLayoutBuilder* builder) {
// The blueprint is only used to search in the cache and is not modified. However cached
// objects can be modified, and unordered_set cannot search for a const pointer in a non
// const pointer set. That's why we do a const_cast here, but the blueprint won't be
// modified.
auto iter = caches->bindGroupLayouts.find(const_cast<BindGroupLayoutBase*>(blueprint));
if (iter != caches->bindGroupLayouts.end()) {
return *iter;
}
BindGroupLayoutBase* backendObj = CreateBindGroupLayout(builder);
caches->bindGroupLayouts.insert(backendObj);
return backendObj;
}
void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
caches->bindGroupLayouts.erase(obj);
}
BindGroupBuilder* DeviceBase::CreateBindGroupBuilder() {
return new BindGroupBuilder(this);
}
BindGroupLayoutBuilder* DeviceBase::CreateBindGroupLayoutBuilder() {
return new BindGroupLayoutBuilder(this);
}
BufferBuilder* DeviceBase::CreateBufferBuilder() {
return new BufferBuilder(this);
}
CommandBufferBuilder* DeviceBase::CreateCommandBufferBuilder() {
return new CommandBufferBuilder(this);
}
InputStateBuilder* DeviceBase::CreateInputStateBuilder() {
return new InputStateBuilder(this);
}
PipelineBuilder* DeviceBase::CreatePipelineBuilder() {
return new PipelineBuilder(this);
}
PipelineLayoutBuilder* DeviceBase::CreatePipelineLayoutBuilder() {
return new PipelineLayoutBuilder(this);
}
QueueBuilder* DeviceBase::CreateQueueBuilder() {
return new QueueBuilder(this);
}
SamplerBuilder* DeviceBase::CreateSamplerBuilder() {
return new SamplerBuilder(this);
}
ShaderModuleBuilder* DeviceBase::CreateShaderModuleBuilder() {
return new ShaderModuleBuilder(this);
}
TextureBuilder* DeviceBase::CreateTextureBuilder() {
return new TextureBuilder(this);
}
void DeviceBase::CopyBindGroups(uint32_t start, uint32_t count, BindGroupBase* source, BindGroupBase* target) {
// TODO(cwallez@chromium.org): update state tracking then call the backend
}
}

View File

@@ -0,0 +1,94 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_DEVICEBASE_H_
#define BACKEND_COMMON_DEVICEBASE_H_
#include "common/Forward.h"
#include "common/RefCounted.h"
#include "nxt/nxtcpp.h"
namespace backend {
using ErrorCallback = void (*)(const char* errorMessage, void* userData);
class DeviceBase {
public:
DeviceBase();
~DeviceBase();
void HandleError(const char* message);
void RegisterErrorCallback(ErrorCallback callback, void* userData);
virtual BindGroupBase* CreateBindGroup(BindGroupBuilder* builder) = 0;
virtual BindGroupLayoutBase* CreateBindGroupLayout(BindGroupLayoutBuilder* builder) = 0;
virtual BufferBase* CreateBuffer(BufferBuilder* builder) = 0;
virtual BufferViewBase* CreateBufferView(BufferViewBuilder* builder) = 0;
virtual CommandBufferBase* CreateCommandBuffer(CommandBufferBuilder* builder) = 0;
virtual InputStateBase* CreateInputState(InputStateBuilder* builder) = 0;
virtual PipelineBase* CreatePipeline(PipelineBuilder* builder) = 0;
virtual PipelineLayoutBase* CreatePipelineLayout(PipelineLayoutBuilder* builder) = 0;
virtual QueueBase* CreateQueue(QueueBuilder* builder) = 0;
virtual SamplerBase* CreateSampler(SamplerBuilder* builder) = 0;
virtual ShaderModuleBase* CreateShaderModule(ShaderModuleBuilder* builder) = 0;
virtual TextureBase* CreateTexture(TextureBuilder* builder) = 0;
virtual TextureViewBase* CreateTextureView(TextureViewBuilder* builder) = 0;
// Many NXT objects are completely immutable once created which means that if two
// builders are given the same arguments, they can return the same object. Reusing
// objects will help make comparisons between objects by a single pointer comparison.
//
// Technically no object is immutable as they have a reference count, and an
// application with reference-counting issues could "see" that objects are reused.
// This is solved by automatic-reference counting, and also the fact that when using
// the client-server wire every creation will get a different proxy object, with a
// different reference count.
//
// When trying to create an object, we give both the builder and an example of what
// the built object will be, the "blueprint". The blueprint is just a FooBase object
// instead of a backend Foo object. If the blueprint doesn't match an object in the
// cache, then the builder is used to make a new object.
BindGroupLayoutBase* GetOrCreateBindGroupLayout(const BindGroupLayoutBase* blueprint, BindGroupLayoutBuilder* builder);
void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
// NXT API
BindGroupBuilder* CreateBindGroupBuilder();
BindGroupLayoutBuilder* CreateBindGroupLayoutBuilder();
BufferBuilder* CreateBufferBuilder();
BufferViewBuilder* CreateBufferViewBuilder();
CommandBufferBuilder* CreateCommandBufferBuilder();
InputStateBuilder* CreateInputStateBuilder();
PipelineBuilder* CreatePipelineBuilder();
PipelineLayoutBuilder* CreatePipelineLayoutBuilder();
QueueBuilder* CreateQueueBuilder();
SamplerBuilder* CreateSamplerBuilder();
ShaderModuleBuilder* CreateShaderModuleBuilder();
TextureBuilder* CreateTextureBuilder();
void CopyBindGroups(uint32_t start, uint32_t count, BindGroupBase* source, BindGroupBase* target);
private:
// The object caches aren't exposed in the header as they would require a lot of
// additional includes.
struct Caches;
Caches* caches = nullptr;
ErrorCallback errorCallback = nullptr;
void* errorUserData = nullptr;
};
}
#endif // BACKEND_COMMON_DEVICEBASE_H_

View File

@@ -0,0 +1,71 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_FORWARD_H_
#define BACKEND_COMMON_FORWARD_H_
#include <cassert>
#include <cstdint>
#define ASSERT assert
namespace backend {
class BindGroupBase;
class BindGroupBuilder;
class BindGroupLayoutBase;
class BindGroupLayoutBuilder;
class BufferBase;
class BufferBuilder;
class BufferViewBase;
class BufferViewBuilder;
class CommandBufferBase;
class CommandBufferBuilder;
class InputStateBase;
class InputStateBuilder;
class PipelineBase;
class PipelineBuilder;
class PipelineLayoutBase;
class PipelineLayoutBuilder;
class QueueBase;
class QueueBuilder;
class SamplerBase;
class SamplerBuilder;
class ShaderModuleBase;
class ShaderModuleBuilder;
class TextureBase;
class TextureBuilder;
class TextureViewBase;
class TextureViewBuilder;
class DeviceBase;
template<typename T>
class Ref;
template<typename T>
class PerStage;
// TODO(cwallez@chromium.org): where should constants live?
static constexpr uint32_t kMaxPushConstants = 32u;
static constexpr uint32_t kMaxBindGroups = 4u;
static constexpr uint32_t kMaxBindingsPerGroup = 16u; // TODO(cwallez@chromium.org): investigate bindgroup limits
static constexpr uint32_t kMaxVertexAttributes = 16u;
static constexpr uint32_t kMaxVertexInputs = 16u;
static constexpr uint32_t kNumStages = 3;
enum PushConstantType : uint8_t;
}
#endif // BACKEND_COMMON_FORWARD_H_

View File

@@ -0,0 +1,139 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "InputState.h"
#include "Device.h"
namespace backend {
// InputState helpers
size_t IndexFormatSize(nxt::IndexFormat format) {
switch (format) {
case nxt::IndexFormat::Uint16:
return sizeof(uint16_t);
case nxt::IndexFormat::Uint32:
return sizeof(uint32_t);
}
}
uint32_t VertexFormatNumComponents(nxt::VertexFormat format) {
switch (format) {
case nxt::VertexFormat::FloatR32G32B32A32:
return 4;
case nxt::VertexFormat::FloatR32G32B32:
return 3;
case nxt::VertexFormat::FloatR32G32:
return 2;
}
}
size_t VertexFormatSize(nxt::VertexFormat format) {
switch (format) {
case nxt::VertexFormat::FloatR32G32B32A32:
case nxt::VertexFormat::FloatR32G32B32:
case nxt::VertexFormat::FloatR32G32:
return VertexFormatNumComponents(format) * sizeof(float);
}
}
// InputStateBase
InputStateBase::InputStateBase(InputStateBuilder* builder) {
attributesSetMask = builder->attributesSetMask;
attributeInfos = builder->attributeInfos;
inputsSetMask = builder->inputsSetMask;
inputInfos = builder->inputInfos;
}
const std::bitset<kMaxVertexAttributes>& InputStateBase::GetAttributesSetMask() const {
return attributesSetMask;
}
const InputStateBase::AttributeInfo& InputStateBase::GetAttribute(uint32_t location) const {
ASSERT(attributesSetMask[location]);
return attributeInfos[location];
}
const std::bitset<kMaxVertexInputs>& InputStateBase::GetInputsSetMask() const {
return inputsSetMask;
}
const InputStateBase::InputInfo& InputStateBase::GetInput(uint32_t slot) const {
ASSERT(inputsSetMask[slot]);
return inputInfos[slot];
}
// InputStateBuilder
InputStateBuilder::InputStateBuilder(DeviceBase* device) : device(device) {
}
bool InputStateBuilder::WasConsumed() const {
return consumed;
}
InputStateBase* InputStateBuilder::GetResult() {
for (uint32_t location = 0; location < kMaxVertexAttributes; ++location) {
if (attributesSetMask[location] &&
!inputsSetMask[attributeInfos[location].bindingSlot]) {
device->HandleError("Attribute uses unset input");
return nullptr;
}
}
consumed = true;
return device->CreateInputState(this);
}
void InputStateBuilder::SetAttribute(uint32_t shaderLocation,
uint32_t bindingSlot, nxt::VertexFormat format, uint32_t offset) {
if (shaderLocation >= kMaxVertexAttributes) {
device->HandleError("Setting attribute out of bounds");
return;
}
if (bindingSlot >= kMaxVertexInputs) {
device->HandleError("Binding slot out of bounds");
return;
}
if (attributesSetMask[shaderLocation]) {
device->HandleError("Setting already set attribute");
return;
}
attributesSetMask.set(shaderLocation);
auto& info = attributeInfos[shaderLocation];
info.bindingSlot = bindingSlot;
info.format = format;
info.offset = offset;
}
void InputStateBuilder::SetInput(uint32_t bindingSlot, uint32_t stride,
nxt::InputStepMode stepMode) {
if (bindingSlot >= kMaxVertexInputs) {
device->HandleError("Setting input out of bounds");
return;
}
if (inputsSetMask[bindingSlot]) {
device->HandleError("Setting already set input");
return;
}
inputsSetMask.set(bindingSlot);
auto& info = inputInfos[bindingSlot];
info.stride = stride;
info.stepMode = stepMode;
}
}

View File

@@ -0,0 +1,85 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_INPUTSTATE_H_
#define BACKEND_COMMON_INPUTSTATE_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
#include <array>
#include <bitset>
namespace backend {
size_t IndexFormatSize(nxt::IndexFormat format);
uint32_t VertexFormatNumComponents(nxt::VertexFormat format);
size_t VertexFormatSize(nxt::VertexFormat format);
class InputStateBase : public RefCounted {
public:
InputStateBase(InputStateBuilder* builder);
struct AttributeInfo {
uint32_t bindingSlot;
nxt::VertexFormat format;
uint32_t offset;
};
struct InputInfo {
uint32_t stride;
nxt::InputStepMode stepMode;
};
const std::bitset<kMaxVertexAttributes>& GetAttributesSetMask() const;
const AttributeInfo& GetAttribute(uint32_t location) const;
const std::bitset<kMaxVertexInputs>& GetInputsSetMask() const;
const InputInfo& GetInput(uint32_t slot) const;
private:
std::bitset<kMaxVertexAttributes> attributesSetMask;
std::array<AttributeInfo, kMaxVertexAttributes> attributeInfos;
std::bitset<kMaxVertexInputs> inputsSetMask;
std::array<InputInfo, kMaxVertexInputs> inputInfos;
};
class InputStateBuilder : public RefCounted {
public:
InputStateBuilder(DeviceBase* device);
bool WasConsumed() const;
// NXT API
InputStateBase* GetResult();
void SetAttribute(uint32_t shaderLocation, uint32_t bindingSlot,
nxt::VertexFormat format, uint32_t offset);
void SetInput(uint32_t bindingSlot, uint32_t stride,
nxt::InputStepMode stepMode);
private:
friend class InputStateBase;
DeviceBase* device;
std::bitset<kMaxVertexAttributes> attributesSetMask;
std::array<InputStateBase::AttributeInfo, kMaxVertexAttributes> attributeInfos;
std::bitset<kMaxVertexInputs> inputsSetMask;
std::array<InputStateBase::InputInfo, kMaxVertexInputs> inputInfos;
bool consumed = false;
};
}
#endif // BACKEND_COMMON_INPUTSTATE_H_

View File

@@ -0,0 +1,52 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Math.h"
#include "Forward.h"
namespace backend {
unsigned long ScanForward(unsigned long bits) {
ASSERT(bits != 0);
// TODO(cwallez@chromium.org): handle non-posix platforms
// unsigned long firstBitIndex = 0ul;
// unsigned char ret = _BitScanForward(&firstBitIndex, bits);
// ASSERT(ret != 0);
// return firstBitIndex;
return static_cast<unsigned long>(__builtin_ctzl(bits));
}
uint32_t Log2(uint32_t value) {
ASSERT(value != 0);
return 31 - __builtin_clz(value);
}
bool IsPowerOfTwo(size_t n) {
ASSERT(n != 0);
return (n & (n - 1)) == 0;
}
bool IsAligned(const void* ptr, size_t alignment) {
ASSERT(IsPowerOfTwo(alignment));
ASSERT(alignment != 0);
return (reinterpret_cast<intptr_t>(ptr) & (alignment - 1)) == 0;
}
void* AlignVoidPtr(void* ptr, size_t alignment) {
ASSERT(alignment != 0);
return reinterpret_cast<void*>((reinterpret_cast<intptr_t>(ptr) + (alignment - 1)) & ~(alignment - 1));
}
}

43
src/backend/common/Math.h Normal file
View File

@@ -0,0 +1,43 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_MATH_H_
#define BACKEND_COMMON_MATH_H_
#include <cstddef>
#include "cstdint"
namespace backend {
// The following are not valid for 0
unsigned long ScanForward(unsigned long bits);
uint32_t Log2(uint32_t value);
bool IsPowerOfTwo(size_t n);
bool IsAligned(const void* ptr, size_t alignment);
void* AlignVoidPtr(void* ptr, size_t alignment);
template<typename T>
T* Align(T* ptr, size_t alignment) {
return reinterpret_cast<T*>(AlignVoidPtr(ptr, alignment));
}
template<typename T>
const T* Align(const T* ptr, size_t alignment) {
return reinterpret_cast<const T*>(AlignVoidPtr(const_cast<T*>(ptr), alignment));
}
}
#endif // BACKEND_COMMON_MATH_H_

View File

@@ -0,0 +1,29 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "PerStage.h"
namespace backend {
BitSetIterator<kNumStages, nxt::ShaderStage> IterateStages(nxt::ShaderStageBit stages) {
std::bitset<kNumStages> bits(static_cast<uint32_t>(stages));
return BitSetIterator<kNumStages, nxt::ShaderStage>(bits);
}
nxt::ShaderStageBit StageBit(nxt::ShaderStage stage) {
ASSERT(static_cast<uint32_t>(stage) < kNumStages);
return static_cast<nxt::ShaderStageBit>(1 << static_cast<uint32_t>(stage));
}
}

View File

@@ -0,0 +1,68 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_PERSTAGE_H_
#define BACKEND_COMMON_PERSTAGE_H_
#include "BitSetIterator.h"
#include "nxt/nxtcpp.h"
#include <array>
namespace backend {
static_assert(static_cast<uint32_t>(nxt::ShaderStage::Vertex) < kNumStages, "");
static_assert(static_cast<uint32_t>(nxt::ShaderStage::Fragment) < kNumStages, "");
static_assert(static_cast<uint32_t>(nxt::ShaderStage::Compute) < kNumStages, "");
static_assert(static_cast<uint32_t>(nxt::ShaderStageBit::Vertex) == (1 << static_cast<uint32_t>(nxt::ShaderStage::Vertex)), "");
static_assert(static_cast<uint32_t>(nxt::ShaderStageBit::Fragment) == (1 << static_cast<uint32_t>(nxt::ShaderStage::Fragment)), "");
static_assert(static_cast<uint32_t>(nxt::ShaderStageBit::Compute) == (1 << static_cast<uint32_t>(nxt::ShaderStage::Compute)), "");
BitSetIterator<kNumStages, nxt::ShaderStage> IterateStages(nxt::ShaderStageBit stages);
nxt::ShaderStageBit StageBit(nxt::ShaderStage stage);
static constexpr nxt::ShaderStageBit kAllStages = static_cast<nxt::ShaderStageBit>((1 << kNumStages) - 1);
template<typename T>
class PerStage {
public:
T& operator[](nxt::ShaderStage stage) {
ASSERT(static_cast<uint32_t>(stage) < kNumStages);
return data[static_cast<uint32_t>(stage)];
}
const T& operator[](nxt::ShaderStage stage) const {
ASSERT(static_cast<uint32_t>(stage) < kNumStages);
return data[static_cast<uint32_t>(stage)];
}
T& operator[](nxt::ShaderStageBit stageBit) {
uint32_t bit = static_cast<uint32_t>(stageBit);
ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
return data[Log2(bit)];
}
const T& operator[](nxt::ShaderStageBit stageBit) const {
uint32_t bit = static_cast<uint32_t>(stageBit);
ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
return data[Log2(bit)];
}
private:
std::array<T, kNumStages> data;
};
}
#endif // BACKEND_COMMON_PERSTAGE_H_

View File

@@ -0,0 +1,149 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Pipeline.h"
#include "Device.h"
#include "InputState.h"
#include "PipelineLayout.h"
#include "ShaderModule.h"
namespace backend {
// PipelineBase
PipelineBase::PipelineBase(PipelineBuilder* builder)
: device(builder->device), stageMask(builder->stageMask), layout(std::move(builder->layout)),
inputState(std::move(builder->inputState)) {
if (stageMask != (nxt::ShaderStageBit::Vertex | nxt::ShaderStageBit::Fragment) &&
stageMask != nxt::ShaderStageBit::Compute) {
device->HandleError("Wrong combination of stage for pipeline");
return;
}
auto FillPushConstants = [](const ShaderModuleBase* module, PushConstantInfo* info) {
const auto& moduleInfo = module->GetPushConstants();
info->mask = moduleInfo.mask;
for (uint32_t i = 0; i < moduleInfo.names.size(); i++) {
unsigned int size = moduleInfo.sizes[i];
if (size == 0) {
continue;
}
for (uint32_t offset = 0; offset < size; offset++) {
info->types[i + offset] = moduleInfo.types[i];
}
i += size - 1;
}
};
for (auto stageBit : IterateStages(builder->stageMask)) {
if (!builder->stages[stageBit].module->IsCompatibleWithPipelineLayout(layout.Get())) {
device->HandleError("Stage not compatible with layout");
return;
}
FillPushConstants(builder->stages[stageBit].module.Get(), &pushConstants[stageBit]);
}
if (!IsCompute()) {
if ((builder->stages[nxt::ShaderStage::Vertex].module->GetUsedVertexAttributes() & ~inputState->GetAttributesSetMask()).any()) {
device->HandleError("Pipeline vertex stage uses inputs not in the input state");
return;
}
}
}
const PipelineBase::PushConstantInfo& PipelineBase::GetPushConstants(nxt::ShaderStage stage) const {
return pushConstants[stage];
}
nxt::ShaderStageBit PipelineBase::GetStageMask() const {
return stageMask;
}
PipelineLayoutBase* PipelineBase::GetLayout() {
return layout.Get();
}
InputStateBase* PipelineBase::GetInputState() {
return inputState.Get();
}
bool PipelineBase::IsCompute() const {
return stageMask == nxt::ShaderStageBit::Compute;
}
// PipelineBuilder
PipelineBuilder::PipelineBuilder(DeviceBase* device)
: device(device), stageMask(static_cast<nxt::ShaderStageBit>(0)) {
}
bool PipelineBuilder::WasConsumed() const {
return consumed;
}
const PipelineBuilder::StageInfo& PipelineBuilder::GetStageInfo(nxt::ShaderStage stage) const {
ASSERT(stageMask & StageBit(stage));
return stages[stage];
}
PipelineBase* PipelineBuilder::GetResult() {
// TODO(cwallez@chromium.org): the layout should be required, and put the default objects in the device
if (!layout) {
layout = device->CreatePipelineLayoutBuilder()->GetResult();
}
if (!inputState) {
inputState = device->CreateInputStateBuilder()->GetResult();
}
consumed = true;
return device->CreatePipeline(this);
}
void PipelineBuilder::SetLayout(PipelineLayoutBase* layout) {
this->layout = layout;
}
void PipelineBuilder::SetStage(nxt::ShaderStage stage, ShaderModuleBase* module, const char* entryPoint) {
if (entryPoint != std::string("main")) {
device->HandleError("Currently the entry point has to be main()");
return;
}
if (stage != module->GetExecutionModel()) {
device->HandleError("Setting module with wrong execution model");
return;
}
nxt::ShaderStageBit bit = StageBit(stage);
if (stageMask & bit) {
device->HandleError("Setting already set stage");
return;
}
stageMask |= bit;
stages[stage].module = module;
stages[stage].entryPoint = entryPoint;
}
void PipelineBuilder::SetInputState(InputStateBase* inputState) {
this->inputState = inputState;
}
}

View File

@@ -0,0 +1,92 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_PIPELINE_H_
#define BACKEND_COMMON_PIPELINE_H_
#include "Forward.h"
#include "PerStage.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
#include <array>
#include <bitset>
namespace backend {
enum PushConstantType : uint8_t {
Int,
UInt,
Float,
};
class PipelineBase : public RefCounted {
public:
PipelineBase(PipelineBuilder* builder);
struct PushConstantInfo {
std::bitset<kMaxPushConstants> mask;
std::array<PushConstantType, kMaxPushConstants> types;
};
const PushConstantInfo& GetPushConstants(nxt::ShaderStage stage) const;
nxt::ShaderStageBit GetStageMask() const;
PipelineLayoutBase* GetLayout();
InputStateBase* GetInputState();
// TODO(cwallez@chromium.org): split compute and render pipelines
bool IsCompute() const;
private:
DeviceBase* device;
nxt::ShaderStageBit stageMask;
Ref<PipelineLayoutBase> layout;
PerStage<PushConstantInfo> pushConstants;
Ref<InputStateBase> inputState;
};
class PipelineBuilder : public RefCounted {
public:
PipelineBuilder(DeviceBase* device);
bool WasConsumed() const;
struct StageInfo {
std::string entryPoint;
Ref<ShaderModuleBase> module;
};
const StageInfo& GetStageInfo(nxt::ShaderStage stage) const;
// NXT API
PipelineBase* GetResult();
void SetLayout(PipelineLayoutBase* layout);
void SetStage(nxt::ShaderStage stage, ShaderModuleBase* module, const char* entryPoint);
void SetInputState(InputStateBase* inputState);
private:
friend class PipelineBase;
DeviceBase* device;
Ref<PipelineLayoutBase> layout;
nxt::ShaderStageBit stageMask;
PerStage<StageInfo> stages;
Ref<InputStateBase> inputState;
bool consumed = false;
};
}
#endif // BACKEND_COMMON_PIPELINE_H_

View File

@@ -0,0 +1,73 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "PipelineLayout.h"
#include "BindGroupLayout.h"
#include "Device.h"
namespace backend {
// PipelineLayoutBase
PipelineLayoutBase::PipelineLayoutBase(PipelineLayoutBuilder* builder)
: bindGroupLayouts(std::move(builder->bindGroupLayouts)), mask(builder->mask) {
}
const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(size_t group) const {
ASSERT(group < kMaxBindGroups);
return bindGroupLayouts[group].Get();
}
const std::bitset<kMaxBindGroups> PipelineLayoutBase::GetBindGroupsLayoutMask() const {
return mask;
}
// PipelineLayoutBuilder
PipelineLayoutBuilder::PipelineLayoutBuilder(DeviceBase* device) : device(device) {
}
bool PipelineLayoutBuilder::WasConsumed() const {
return consumed;
}
PipelineLayoutBase* PipelineLayoutBuilder::GetResult() {
// TODO(cwallez@chromium.org): this is a hack, have the null bind group layout somewhere in the device
// once we have a cache of BGL
for (size_t group = 0; group < kMaxBindGroups; ++group) {
if (!bindGroupLayouts[group]) {
bindGroupLayouts[group] = device->CreateBindGroupLayoutBuilder()->GetResult();
}
}
consumed = true;
return device->CreatePipelineLayout(this);
}
void PipelineLayoutBuilder::SetBindGroupLayout(uint32_t groupIndex, BindGroupLayoutBase* layout) {
if (groupIndex >= kMaxBindGroups) {
device->HandleError("groupIndex is over the maximum allowed");
return;
}
if (mask[groupIndex]) {
device->HandleError("Bind group layout already specified");
return;
}
bindGroupLayouts[groupIndex] = layout;
mask.set(groupIndex);
}
}

View File

@@ -0,0 +1,63 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_PIPELINELAYOUT_H_
#define BACKEND_COMMON_PIPELINELAYOUT_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
#include <array>
#include <bitset>
namespace backend {
using BindGroupLayoutArray = std::array<Ref<BindGroupLayoutBase>, kMaxBindGroups>;
class PipelineLayoutBase : public RefCounted {
public:
PipelineLayoutBase(PipelineLayoutBuilder* builder);
const BindGroupLayoutBase* GetBindGroupLayout(size_t group) const;
const std::bitset<kMaxBindGroups> GetBindGroupsLayoutMask() const;
protected:
BindGroupLayoutArray bindGroupLayouts;
std::bitset<kMaxBindGroups> mask;
};
class PipelineLayoutBuilder : public RefCounted {
public:
PipelineLayoutBuilder(DeviceBase* device);
bool WasConsumed() const;
// NXT API
PipelineLayoutBase* GetResult();
void SetBindGroupLayout(uint32_t groupIndex, BindGroupLayoutBase* layout);
private:
friend class PipelineLayoutBase;
DeviceBase* device;
BindGroupLayoutArray bindGroupLayouts;
std::bitset<kMaxBindGroups> mask;
bool consumed = false;
};
}
#endif // BACKEND_COMMON_PIPELINELAYOUT_H_

View File

@@ -0,0 +1,42 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Queue.h"
#include "Device.h"
#include "CommandBuffer.h"
namespace backend {
// QueueBase
bool QueueBase::ValidateSubmitCommand(CommandBufferBase* command) {
return command->ValidateResourceUsagesImmediate();
}
// QueueBuilder
QueueBuilder::QueueBuilder(DeviceBase* device) : device(device) {
}
bool QueueBuilder::WasConsumed() const {
return consumed;
}
QueueBase* QueueBuilder::GetResult() {
consumed = true;
return device->CreateQueue(this);
}
}

View File

@@ -0,0 +1,59 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_QUEUE_H_
#define BACKEND_COMMON_QUEUE_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
namespace backend {
class QueueBase : public RefCounted {
private:
bool ValidateSubmitCommand(CommandBufferBase* command);
public:
template<typename T>
bool ValidateSubmit(uint32_t numCommands, T* const * commands) {
static_assert(std::is_base_of<CommandBufferBase, T>::value, "invalid command buffer type");
for (uint32_t i = 0; i < numCommands; ++i) {
if (!ValidateSubmitCommand(commands[i])) {
return false;
}
}
return true;
}
};
class QueueBuilder : public RefCounted {
public:
QueueBuilder(DeviceBase* device);
bool WasConsumed() const;
// NXT API
QueueBase* GetResult();
private:
DeviceBase* device;
bool consumed = false;
};
}
#endif // BACKEND_COMMON_QUEUE_H_

View File

@@ -0,0 +1,66 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "RefCounted.h"
#include <cassert>
#define ASSERT assert
namespace backend {
RefCounted::RefCounted() {
}
RefCounted::~RefCounted() {
}
void RefCounted::ReferenceInternal() {
ASSERT(internalRefs != 0);
// TODO(cwallez@chromium.org): what to do on overflow?
internalRefs ++;
}
void RefCounted::ReleaseInternal() {
ASSERT(internalRefs != 0);
internalRefs --;
if (internalRefs == 0) {
ASSERT(externalRefs == 0);
// TODO(cwallez@chromium.org): would this work with custom allocators?
delete this;
}
}
uint32_t RefCounted::GetExternalRefs() const {
return externalRefs;
}
uint32_t RefCounted::GetInternalRefs() const {
return internalRefs;
}
void RefCounted::Reference() {
ASSERT(externalRefs != 0);
// TODO(cwallez@chromium.org): what to do on overflow?
externalRefs ++;
}
void RefCounted::Release() {
ASSERT(externalRefs != 0);
externalRefs --;
if (externalRefs == 0) {
ReleaseInternal();
}
}
}

View File

@@ -0,0 +1,126 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_REFCOUNTED_H_
#define BACKEND_COMMON_REFCOUNTED_H_
#include <cstdint>
namespace backend {
class RefCounted {
public:
RefCounted();
virtual ~RefCounted();
void ReferenceInternal();
void ReleaseInternal();
uint32_t GetExternalRefs() const;
uint32_t GetInternalRefs() const;
// NXT API
void Reference();
void Release();
protected:
uint32_t externalRefs = 1;
uint32_t internalRefs = 1;
};
template<typename T>
class Ref {
public:
Ref() {}
Ref(T* p): pointee(p) {
Reference();
}
Ref(Ref<T>& other): pointee(other.pointee) {
Reference();
}
Ref<T>& operator=(const Ref<T>& other) {
if (&other == this) return *this;
other.Reference();
Release();
pointee = other.pointee;
return *this;
}
Ref(Ref<T>&& other) {
pointee = other.pointee;
other.pointee = nullptr;
}
Ref<T>& operator=(Ref<T>&& other) {
if (&other == this) return *this;
Release();
pointee = other.pointee;
other.pointee = nullptr;
return *this;
}
~Ref() {
Release();
pointee = nullptr;
}
operator bool() {
return pointee != nullptr;
}
const T& operator*() const {
return *pointee;
}
T& operator*() {
return *pointee;
}
const T* operator->() const {
return pointee;
}
T* operator->() {
return pointee;
}
const T* Get() const {
return pointee;
}
T* Get() {
return pointee;
}
private:
void Reference() const {
if (pointee != nullptr) {
pointee->ReferenceInternal();
}
}
void Release() const {
if (pointee != nullptr) {
pointee->ReleaseInternal();
}
}
//static_assert(std::is_base_of<RefCounted, T>::value, "");
T* pointee = nullptr;
};
}
#endif // BACKEND_COMMON_REFCOUNTED_H_

View File

@@ -0,0 +1,67 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Sampler.h"
#include "Device.h"
namespace backend {
// SamplerBase
SamplerBase::SamplerBase(SamplerBuilder* builder) {
}
// SamplerBuilder
enum SamplerSetProperties {
SAMPLER_PROPERTY_FILTER = 0x1,
};
SamplerBuilder::SamplerBuilder(DeviceBase* device)
:device(device) {
}
nxt::FilterMode SamplerBuilder::GetMagFilter() const {
return magFilter;
}
nxt::FilterMode SamplerBuilder::GetMinFilter() const {
return minFilter;
}
nxt::FilterMode SamplerBuilder::GetMipMapFilter() const {
return mipMapFilter;
}
bool SamplerBuilder::WasConsumed() const {
return consumed;
}
SamplerBase* SamplerBuilder::GetResult() {
consumed = true;
return device->CreateSampler(this);
}
void SamplerBuilder::SetFilterMode(nxt::FilterMode magFilter, nxt::FilterMode minFilter, nxt::FilterMode mipMapFilter) {
if ((propertiesSet & SAMPLER_PROPERTY_FILTER) != 0) {
device->HandleError("Sampler filter property set multiple times");
return;
}
this->magFilter = magFilter;
this->minFilter = minFilter;
this->mipMapFilter = mipMapFilter;
propertiesSet |= SAMPLER_PROPERTY_FILTER;
}
}

View File

@@ -0,0 +1,58 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_SAMPLER_H_
#define BACKEND_COMMON_SAMPLER_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
namespace backend {
class SamplerBase : public RefCounted {
public:
SamplerBase(SamplerBuilder* builder);
};
class SamplerBuilder : public RefCounted {
public:
SamplerBuilder(DeviceBase* device);
nxt::FilterMode GetMagFilter() const;
nxt::FilterMode GetMinFilter() const;
nxt::FilterMode GetMipMapFilter() const;
bool WasConsumed() const;
// NXT API
SamplerBase* GetResult();
void SetFilterMode(nxt::FilterMode magFilter, nxt::FilterMode minFilter, nxt::FilterMode mipMapFilter);
private:
friend class SamplerBase;
DeviceBase* device;
int propertiesSet = 0;
bool consumed = false;
nxt::FilterMode magFilter = nxt::FilterMode::Nearest;
nxt::FilterMode minFilter = nxt::FilterMode::Nearest;
nxt::FilterMode mipMapFilter = nxt::FilterMode::Nearest;
};
}
#endif // BACKEND_COMMON_SAMPLER_H_

View File

@@ -0,0 +1,217 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ShaderModule.h"
#include "BindGroupLayout.h"
#include "Device.h"
#include "Pipeline.h"
#include "PipelineLayout.h"
#include <spirv-cross/spirv_cross.hpp>
namespace backend {
ShaderModuleBase::ShaderModuleBase(ShaderModuleBuilder* builder)
: device(builder->device) {
}
void ShaderModuleBase::ExtractSpirvInfo(const spirv_cross::Compiler& compiler) {
const auto& resources = compiler.get_shader_resources();
switch (compiler.get_execution_model()) {
case spv::ExecutionModelVertex:
executionModel = nxt::ShaderStage::Vertex;
break;
case spv::ExecutionModelFragment:
executionModel = nxt::ShaderStage::Fragment;
break;
case spv::ExecutionModelGLCompute:
executionModel = nxt::ShaderStage::Compute;
break;
default:
ASSERT(false);
break;
}
// Extract push constants
pushConstants.mask.reset();
pushConstants.sizes.fill(0);
pushConstants.types.fill(PushConstantType::Int);
if (resources.push_constant_buffers.size() > 0) {
auto interfaceBlock = resources.push_constant_buffers[0];
const auto& blockType = compiler.get_type(interfaceBlock.type_id);
ASSERT(blockType.basetype == spirv_cross::SPIRType::Struct);
for (uint32_t i = 0; i < blockType.member_types.size(); i++) {
ASSERT(compiler.get_member_decoration_mask(blockType.self, i) & 1ull << spv::DecorationOffset);
uint32_t offset = compiler.get_member_decoration(blockType.self, i, spv::DecorationOffset);
ASSERT(offset % 4 == 0);
offset /= 4;
ASSERT(offset < kMaxPushConstants);
auto memberType = compiler.get_type(blockType.member_types[i]);
PushConstantType constantType;
if (memberType.basetype == spirv_cross::SPIRType::Int) {
constantType = PushConstantType::Int;
} else if (memberType.basetype == spirv_cross::SPIRType::UInt) {
constantType = PushConstantType::UInt;
} else {
ASSERT(memberType.basetype == spirv_cross::SPIRType::Float);
constantType = PushConstantType::Float;
}
pushConstants.mask.set(offset);
pushConstants.names[offset] = interfaceBlock.name + "." + compiler.get_member_name(blockType.self, i);
pushConstants.sizes[offset] = memberType.vecsize * memberType.columns;
pushConstants.types[offset] = constantType;
}
}
// Fill in bindingInfo with the SPIRV bindings
auto ExtractResourcesBinding = [this](const std::vector<spirv_cross::Resource>& resources,
const spirv_cross::Compiler& compiler, nxt::BindingType type) {
constexpr uint64_t requiredBindingDecorationMask = (1ull << spv::DecorationBinding) | (1ull << spv::DecorationDescriptorSet);
for (const auto& resource : resources) {
ASSERT((compiler.get_decoration_mask(resource.id) & requiredBindingDecorationMask) == requiredBindingDecorationMask);
uint32_t binding = compiler.get_decoration(resource.id, spv::DecorationBinding);
uint32_t set = compiler.get_decoration(resource.id, spv::DecorationDescriptorSet);
if (binding >= kMaxBindingsPerGroup || set >= kMaxBindGroups) {
device->HandleError("Binding over limits in the SPIRV");
continue;
}
auto& info = bindingInfo[set][binding];
info.used = true;
info.id = resource.id;
info.base_type_id = resource.base_type_id;
info.type = type;
}
};
ExtractResourcesBinding(resources.uniform_buffers, compiler, nxt::BindingType::UniformBuffer);
ExtractResourcesBinding(resources.separate_images, compiler, nxt::BindingType::SampledTexture);
ExtractResourcesBinding(resources.separate_samplers, compiler, nxt::BindingType::Sampler);
ExtractResourcesBinding(resources.storage_buffers, compiler, nxt::BindingType::StorageBuffer);
// Extract the vertex attributes
if (executionModel == nxt::ShaderStage::Vertex) {
for (const auto& attrib : resources.stage_inputs) {
ASSERT(compiler.get_decoration_mask(attrib.id) & (1ull << spv::DecorationLocation));
uint32_t location = compiler.get_decoration(attrib.id, spv::DecorationLocation);
if (location >= kMaxVertexAttributes) {
device->HandleError("Attribute location over limits in the SPIRV");
return;
}
usedVertexAttributes.set(location);
}
// Without a location qualifier on vertex outputs, spirv_cross::CompilerMSL gives them all
// the location 0, causing a compile error.
for (const auto& attrib : resources.stage_outputs) {
if (!(compiler.get_decoration_mask(attrib.id) & (1ull << spv::DecorationLocation))) {
device->HandleError("Need location qualifier on vertex output");
return;
}
}
}
if (executionModel == nxt::ShaderStage::Fragment) {
// Without a location qualifier on vertex inputs, spirv_cross::CompilerMSL gives them all
// the location 0, causing a compile error.
for (const auto& attrib : resources.stage_inputs) {
if (!(compiler.get_decoration_mask(attrib.id) & (1ull << spv::DecorationLocation))) {
device->HandleError("Need location qualifier on fragment input");
return;
}
}
}
}
const ShaderModuleBase::PushConstantInfo& ShaderModuleBase::GetPushConstants() const {
return pushConstants;
}
const ShaderModuleBase::ModuleBindingInfo& ShaderModuleBase::GetBindingInfo() const {
return bindingInfo;
}
const std::bitset<kMaxVertexAttributes>& ShaderModuleBase::GetUsedVertexAttributes() const {
return usedVertexAttributes;
}
nxt::ShaderStage ShaderModuleBase::GetExecutionModel() const {
return executionModel;
}
bool ShaderModuleBase::IsCompatibleWithPipelineLayout(const PipelineLayoutBase* layout) {
for (size_t group = 0; group < kMaxBindGroups; ++group) {
if (!IsCompatibleWithBindGroupLayout(group, layout->GetBindGroupLayout(group))) {
return false;
}
}
return true;
}
bool ShaderModuleBase::IsCompatibleWithBindGroupLayout(size_t group, const BindGroupLayoutBase* layout) {
const auto& layoutInfo = layout->GetBindingInfo();
for (size_t i = 0; i < kMaxBindingsPerGroup; ++i) {
const auto& moduleInfo = bindingInfo[group][i];
if (!moduleInfo.used) {
continue;
}
if (moduleInfo.type != layoutInfo.types[i]) {
return false;
}
if ((layoutInfo.visibilities[i] & StageBit(executionModel)) == 0) {
return false;
}
}
return true;
}
ShaderModuleBuilder::ShaderModuleBuilder(DeviceBase* device) : device(device) {}
bool ShaderModuleBuilder::WasConsumed() const {
return consumed;
}
std::vector<uint32_t> ShaderModuleBuilder::AcquireSpirv() {
return std::move(spirv);
}
ShaderModuleBase* ShaderModuleBuilder::GetResult() {
if (spirv.size() == 0) {
device->HandleError("Shader module needs to have the source set");
return nullptr;
}
consumed = true;
return device->CreateShaderModule(this);
}
void ShaderModuleBuilder::SetSource(uint32_t codeSize, const uint32_t* code) {
spirv.assign(code, code + codeSize);
}
}

View File

@@ -0,0 +1,95 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_SHADERMODULE_H_
#define BACKEND_COMMON_SHADERMODULE_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
#include <array>
#include <bitset>
#include <vector>
namespace spirv_cross {
class Compiler;
}
namespace backend {
class ShaderModuleBase : public RefCounted {
public:
ShaderModuleBase(ShaderModuleBuilder* builder);
void ExtractSpirvInfo(const spirv_cross::Compiler& compiler);
struct PushConstantInfo {
std::bitset<kMaxPushConstants> mask;
std::array<std::string, kMaxPushConstants> names;
std::array<int, kMaxPushConstants> sizes;
std::array<PushConstantType, kMaxPushConstants> types;
};
struct BindingInfo {
// The SPIRV ID of the resource.
uint32_t id;
uint32_t base_type_id;
nxt::BindingType type;
bool used = false;
};
using ModuleBindingInfo = std::array<std::array<BindingInfo, kMaxBindingsPerGroup>, kMaxBindGroups>;
const PushConstantInfo& GetPushConstants() const;
const ModuleBindingInfo& GetBindingInfo() const;
const std::bitset<kMaxVertexAttributes>& GetUsedVertexAttributes() const;
nxt::ShaderStage GetExecutionModel() const;
bool IsCompatibleWithPipelineLayout(const PipelineLayoutBase* layout);
private:
bool IsCompatibleWithBindGroupLayout(size_t group, const BindGroupLayoutBase* layout);
DeviceBase* device;
PushConstantInfo pushConstants = {};
ModuleBindingInfo bindingInfo;
std::bitset<kMaxVertexAttributes> usedVertexAttributes;
nxt::ShaderStage executionModel;
};
class ShaderModuleBuilder : public RefCounted {
public:
ShaderModuleBuilder(DeviceBase* device);
bool WasConsumed() const;
std::vector<uint32_t> AcquireSpirv();
// NXT API
ShaderModuleBase* GetResult();
void SetSource(uint32_t codeSize, const uint32_t* code);
private:
friend class ShaderModuleBase;
DeviceBase* device;
std::vector<uint32_t> spirv;
bool consumed = false;
};
}
#endif // BACKEND_COMMON_SHADERMODULE_H_

View File

@@ -0,0 +1,239 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Texture.h"
#include "Device.h"
namespace backend {
size_t TextureFormatPixelSize(nxt::TextureFormat format) {
switch (format) {
case nxt::TextureFormat::R8G8B8A8Unorm:
return 4;
}
}
// TextureBase
TextureBase::TextureBase(TextureBuilder* builder)
: device(builder->device), dimension(builder->dimension), format(builder->format), width(builder->width),
height(builder->height), depth(builder->depth), numMipLevels(builder->numMipLevels),
allowedUsage(builder->allowedUsage), currentUsage(builder->currentUsage) {
}
nxt::TextureDimension TextureBase::GetDimension() const {
return dimension;
}
nxt::TextureFormat TextureBase::GetFormat() const {
return format;
}
uint32_t TextureBase::GetWidth() const {
return width;
}
uint32_t TextureBase::GetHeight() const {
return height;
}
uint32_t TextureBase::GetDepth() const {
return depth;
}
uint32_t TextureBase::GetNumMipLevels() const {
return numMipLevels;
}
nxt::TextureUsageBit TextureBase::GetAllowedUsage() const {
return allowedUsage;
}
nxt::TextureUsageBit TextureBase::GetUsage() const {
return currentUsage;
}
TextureViewBuilder* TextureBase::CreateTextureViewBuilder() {
return new TextureViewBuilder(device, this);
}
bool TextureBase::IsFrozen() const {
return frozen;
}
bool TextureBase::HasFrozenUsage(nxt::TextureUsageBit usage) const {
return frozen && (usage & allowedUsage);
}
bool TextureBase::IsUsagePossible(nxt::TextureUsageBit allowedUsage, nxt::TextureUsageBit usage) {
bool allowed = (usage & allowedUsage) == usage;
bool singleUse = nxt::HasZeroOrOneBits(usage);
return allowed && singleUse;
}
bool TextureBase::IsTransitionPossible(nxt::TextureUsageBit usage) const {
if (frozen) {
return false;
}
return IsUsagePossible(allowedUsage, usage);
}
void TextureBase::TransitionUsageImpl(nxt::TextureUsageBit usage) {
assert(IsTransitionPossible(usage));
currentUsage = usage;
}
void TextureBase::TransitionUsage(nxt::TextureUsageBit usage) {
if (!IsTransitionPossible(usage)) {
device->HandleError("Texture frozen or usage not allowed");
return;
}
TransitionUsageImpl(usage);
}
void TextureBase::FreezeUsage(nxt::TextureUsageBit usage) {
if (!IsTransitionPossible(usage)) {
device->HandleError("Texture frozen or usage not allowed");
return;
}
allowedUsage = usage;
currentUsage = usage;
frozen = true;
}
// TextureBuilder
enum TextureSetProperties {
TEXTURE_PROPERTY_DIMENSION = 0x1,
TEXTURE_PROPERTY_EXTENT = 0x2,
TEXTURE_PROPERTY_FORMAT = 0x4,
TEXTURE_PROPERTY_MIP_LEVELS = 0x8,
TEXTURE_PROPERTY_ALLOWED_USAGE = 0x10,
TEXTURE_PROPERTY_INITIAL_USAGE = 0x20,
};
TextureBuilder::TextureBuilder(DeviceBase* device)
: device(device) {
}
bool TextureBuilder::WasConsumed() const {
return consumed;
}
TextureBase* TextureBuilder::GetResult() {
constexpr int allProperties = TEXTURE_PROPERTY_DIMENSION | TEXTURE_PROPERTY_EXTENT |
TEXTURE_PROPERTY_FORMAT | TEXTURE_PROPERTY_MIP_LEVELS | TEXTURE_PROPERTY_ALLOWED_USAGE;
if ((propertiesSet & allProperties) != allProperties) {
device->HandleError("Texture missing properties");
return nullptr;
}
if (!TextureBase::IsUsagePossible(allowedUsage, currentUsage)) {
device->HandleError("Initial texture usage is not allowed");
return nullptr;
}
// TODO(cwallez@chromium.org): check stuff based on the dimension
consumed = true;
return device->CreateTexture(this);
}
void TextureBuilder::SetDimension(nxt::TextureDimension dimension) {
if ((propertiesSet & TEXTURE_PROPERTY_DIMENSION) != 0) {
device->HandleError("Texture dimension property set multiple times");
return;
}
propertiesSet |= TEXTURE_PROPERTY_DIMENSION;
this->dimension = dimension;
}
void TextureBuilder::SetExtent(uint32_t width, uint32_t height, uint32_t depth) {
if ((propertiesSet & TEXTURE_PROPERTY_EXTENT) != 0) {
device->HandleError("Texture extent property set multiple times");
return;
}
if (width == 0 || height == 0 || depth == 0) {
device->HandleError("Cannot create an empty texture");
return;
}
propertiesSet |= TEXTURE_PROPERTY_EXTENT;
this->width = width;
this->height = height;
this->depth = depth;
}
void TextureBuilder::SetFormat(nxt::TextureFormat format) {
if ((propertiesSet & TEXTURE_PROPERTY_FORMAT) != 0) {
device->HandleError("Texture format property set multiple times");
return;
}
propertiesSet |= TEXTURE_PROPERTY_FORMAT;
this->format = format;
}
void TextureBuilder::SetMipLevels(uint32_t numMipLevels) {
if ((propertiesSet & TEXTURE_PROPERTY_MIP_LEVELS) != 0) {
device->HandleError("Texture mip levels property set multiple times");
return;
}
propertiesSet |= TEXTURE_PROPERTY_MIP_LEVELS;
this->numMipLevels = numMipLevels;
}
void TextureBuilder::SetAllowedUsage(nxt::TextureUsageBit usage) {
if ((propertiesSet & TEXTURE_PROPERTY_ALLOWED_USAGE) != 0) {
device->HandleError("Texture allowed usage property set multiple times");
return;
}
propertiesSet |= TEXTURE_PROPERTY_ALLOWED_USAGE;
this->allowedUsage = usage;
}
void TextureBuilder::SetInitialUsage(nxt::TextureUsageBit usage) {
if ((propertiesSet & TEXTURE_PROPERTY_INITIAL_USAGE) != 0) {
device->HandleError("Texture initial usage property set multiple times");
return;
}
propertiesSet |= TEXTURE_PROPERTY_INITIAL_USAGE;
this->currentUsage = usage;
}
// TextureViewBase
TextureViewBase::TextureViewBase(TextureViewBuilder* builder)
: texture(builder->texture) {
}
TextureBase* TextureViewBase::GetTexture() {
return texture.Get();
}
// TextureViewBuilder
TextureViewBuilder::TextureViewBuilder(DeviceBase* device, TextureBase* texture)
: device(device), texture(texture) {
}
bool TextureViewBuilder::WasConsumed() const {
return false;
}
TextureViewBase* TextureViewBuilder::GetResult() {
consumed = true;
return device->CreateTextureView(this);
}
}

View File

@@ -0,0 +1,121 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_TEXTURE_H_
#define BACKEND_COMMON_TEXTURE_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
namespace backend {
size_t TextureFormatPixelSize(nxt::TextureFormat format);
class TextureBase : public RefCounted {
public:
TextureBase(TextureBuilder* builder);
nxt::TextureDimension GetDimension() const;
nxt::TextureFormat GetFormat() const;
uint32_t GetWidth() const;
uint32_t GetHeight() const;
uint32_t GetDepth() const;
uint32_t GetNumMipLevels() const;
nxt::TextureUsageBit GetAllowedUsage() const;
nxt::TextureUsageBit GetUsage() const;
bool IsFrozen() const;
bool HasFrozenUsage(nxt::TextureUsageBit usage) const;
static bool IsUsagePossible(nxt::TextureUsageBit allowedUsage, nxt::TextureUsageBit usage);
bool IsTransitionPossible(nxt::TextureUsageBit usage) const;
void TransitionUsageImpl(nxt::TextureUsageBit usage);
// NXT API
TextureViewBuilder* CreateTextureViewBuilder();
void TransitionUsage(nxt::TextureUsageBit usage);
void FreezeUsage(nxt::TextureUsageBit usage);
private:
DeviceBase* device;
nxt::TextureDimension dimension;
nxt::TextureFormat format;
uint32_t width, height, depth;
uint32_t numMipLevels;
nxt::TextureUsageBit allowedUsage = nxt::TextureUsageBit::None;
nxt::TextureUsageBit currentUsage = nxt::TextureUsageBit::None;
bool frozen = false;
};
class TextureBuilder : public RefCounted {
public:
TextureBuilder(DeviceBase* device);
bool WasConsumed() const;
// NXT API
TextureBase* GetResult();
void SetDimension(nxt::TextureDimension dimension);
void SetExtent(uint32_t width, uint32_t height, uint32_t depth);
void SetFormat(nxt::TextureFormat format);
void SetMipLevels(uint32_t numMipLevels);
void SetAllowedUsage(nxt::TextureUsageBit usage);
void SetInitialUsage(nxt::TextureUsageBit usage);
private:
friend class TextureBase;
DeviceBase* device;
int propertiesSet = 0;
bool consumed = false;
nxt::TextureDimension dimension;
uint32_t width, height, depth;
nxt::TextureFormat format;
uint32_t numMipLevels;
nxt::TextureUsageBit allowedUsage = nxt::TextureUsageBit::None;
nxt::TextureUsageBit currentUsage = nxt::TextureUsageBit::None;
};
class TextureViewBase : public RefCounted {
public:
TextureViewBase(TextureViewBuilder* builder);
TextureBase* GetTexture();
private:
Ref<TextureBase> texture;
};
class TextureViewBuilder : public RefCounted {
public:
TextureViewBuilder(DeviceBase* device, TextureBase* texture);
bool WasConsumed() const;
// NXT API
TextureViewBase* GetResult();
private:
friend class TextureViewBase;
DeviceBase* device;
bool consumed = false;
Ref<TextureBase> texture;
};
}
#endif // BACKEND_COMMON_TEXTURE_H_

View File

@@ -0,0 +1,120 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_TOBACKEND_H_
#define BACKEND_COMMON_TOBACKEND_H_
#include "Forward.h"
namespace backend {
// ToBackendTraits implements the mapping from base type to member type of BackendTraits
template<typename T, typename BackendTraits>
struct ToBackendTraits;
template<typename BackendTraits>
struct ToBackendTraits<BindGroupBase, BackendTraits> {
using BackendType = typename BackendTraits::BindGroupType;
};
template<typename BackendTraits>
struct ToBackendTraits<BindGroupLayoutBase, BackendTraits> {
using BackendType = typename BackendTraits::BindGroupLayoutType;
};
template<typename BackendTraits>
struct ToBackendTraits<BufferBase, BackendTraits> {
using BackendType = typename BackendTraits::BufferType;
};
template<typename BackendTraits>
struct ToBackendTraits<BufferViewBase, BackendTraits> {
using BackendType = typename BackendTraits::BufferViewType;
};
template<typename BackendTraits>
struct ToBackendTraits<CommandBufferBase, BackendTraits> {
using BackendType = typename BackendTraits::CommandBufferType;
};
template<typename BackendTraits>
struct ToBackendTraits<InputStateBase, BackendTraits> {
using BackendType = typename BackendTraits::InputStateType;
};
template<typename BackendTraits>
struct ToBackendTraits<PipelineBase, BackendTraits> {
using BackendType = typename BackendTraits::PipelineType;
};
template<typename BackendTraits>
struct ToBackendTraits<PipelineLayoutBase, BackendTraits> {
using BackendType = typename BackendTraits::PipelineLayoutType;
};
template<typename BackendTraits>
struct ToBackendTraits<QueueBase, BackendTraits> {
using BackendType = typename BackendTraits::QueueType;
};
template<typename BackendTraits>
struct ToBackendTraits<SamplerBase, BackendTraits> {
using BackendType = typename BackendTraits::SamplerType;
};
template<typename BackendTraits>
struct ToBackendTraits<ShaderModuleBase, BackendTraits> {
using BackendType = typename BackendTraits::ShaderModuleType;
};
template<typename BackendTraits>
struct ToBackendTraits<TextureBase, BackendTraits> {
using BackendType = typename BackendTraits::TextureType;
};
template<typename BackendTraits>
struct ToBackendTraits<TextureViewBase, BackendTraits> {
using BackendType = typename BackendTraits::TextureViewType;
};
// ToBackendBase implements conversion to the given BackendTraits
// To use it in a backend, use the following:
// template<typename T>
// auto ToBackend(T&& common) -> decltype(ToBackendBase<MyBackendTraits>(common)) {
// return ToBackendBase<MyBackendTraits>(common);
// }
template<typename BackendTraits, typename T>
Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(Ref<T>& common) {
return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(common);
}
template<typename BackendTraits, typename T>
const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(const Ref<T>& common) {
return reinterpret_cast<const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(common);
}
template<typename BackendTraits, typename T>
typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(T* common) {
return reinterpret_cast<typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
}
template<typename BackendTraits, typename T>
const typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(const T* common) {
return reinterpret_cast<const typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
}
}
#endif // BACKEND_COMMON_TOBACKEND_H_

View File

@@ -0,0 +1,18 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "MetalBackend.h"
#include "common/Device.h"
#include "common/CommandBuffer.h"

View File

@@ -0,0 +1,282 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_METAL_METALBACKEND_H_
#define BACKEND_METAL_METALBACKEND_H_
#include "nxt/nxtcpp.h"
#include <map>
#include <mutex>
#include <unordered_set>
#include "common/Buffer.h"
#include "common/BindGroup.h"
#include "common/BindGroupLayout.h"
#include "common/Device.h"
#include "common/CommandBuffer.h"
#include "common/InputState.h"
#include "common/Pipeline.h"
#include "common/PipelineLayout.h"
#include "common/Queue.h"
#include "common/Sampler.h"
#include "common/ShaderModule.h"
#include "common/Texture.h"
#include "common/ToBackend.h"
#include <type_traits>
#import <Metal/Metal.h>
#import <QuartzCore/CAMetalLayer.h>
namespace spirv_cross {
class CompilerMSL;
}
namespace backend {
namespace metal {
class BindGroup;
class BindGroupLayout;
class Buffer;
class BufferView;
class CommandBuffer;
class InputState;
class Pipeline;
class PipelineLayout;
class Queue;
class Sampler;
class ShaderModule;
class Texture;
class TextureView;
struct MetalBackendTraits {
using BindGroupType = BindGroup;
using BindGroupLayoutType = BindGroupLayout;
using BufferType = Buffer;
using BufferViewType = BufferView;
using CommandBufferType = CommandBuffer;
using InputStateType = InputState;
using PipelineType = Pipeline;
using PipelineLayoutType = PipelineLayout;
using QueueType = Queue;
using SamplerType = Sampler;
using ShaderModuleType = ShaderModule;
using TextureType = Texture;
using TextureViewType = TextureView;
};
template<typename T>
auto ToBackend(T&& common) -> decltype(ToBackendBase<MetalBackendTraits>(common)) {
return ToBackendBase<MetalBackendTraits>(common);
}
class Device : public DeviceBase {
public:
Device(id<MTLDevice> mtlDevice);
~Device();
BindGroupBase* CreateBindGroup(BindGroupBuilder* builder) override;
BindGroupLayoutBase* CreateBindGroupLayout(BindGroupLayoutBuilder* builder) override;
BufferBase* CreateBuffer(BufferBuilder* builder) override;
BufferViewBase* CreateBufferView(BufferViewBuilder* builder) override;
CommandBufferBase* CreateCommandBuffer(CommandBufferBuilder* builder) override;
InputStateBase* CreateInputState(InputStateBuilder* builder) override;
PipelineBase* CreatePipeline(PipelineBuilder* builder) override;
PipelineLayoutBase* CreatePipelineLayout(PipelineLayoutBuilder* builder) override;
QueueBase* CreateQueue(QueueBuilder* builder) override;
SamplerBase* CreateSampler(SamplerBuilder* builder) override;
ShaderModuleBase* CreateShaderModule(ShaderModuleBuilder* builder) override;
TextureBase* CreateTexture(TextureBuilder* builder) override;
TextureViewBase* CreateTextureView(TextureViewBuilder* builder) override;
void SetNextDrawable(id<CAMetalDrawable> drawable);
void Present();
id<MTLDevice> GetMTLDevice();
id<MTLTexture> GetCurrentTexture();
id<MTLTexture> GetCurrentDepthTexture();
// NXT API
void Reference();
void Release();
private:
id<MTLDevice> mtlDevice = nil;
id<MTLCommandQueue> commandQueue = nil;
id<CAMetalDrawable> currentDrawable = nil;
id<MTLTexture> currentTexture = nil;
id<MTLTexture> currentDepthTexture = nil;
};
class BindGroup : public BindGroupBase {
public:
BindGroup(Device* device, BindGroupBuilder* builder);
private:
Device* device;
};
class BindGroupLayout : public BindGroupLayoutBase {
public:
BindGroupLayout(Device* device, BindGroupLayoutBuilder* builder);
private:
Device* device;
};
class Buffer : public BufferBase {
public:
Buffer(Device* device, BufferBuilder* builder);
~Buffer();
id<MTLBuffer> GetMTLBuffer();
std::mutex& GetMutex();
private:
void SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) override;
Device* device;
std::mutex mutex;
id<MTLBuffer> mtlBuffer = nil;
};
class BufferView : public BufferViewBase {
public:
BufferView(Device* device, BufferViewBuilder* builder);
private:
Device* device;
};
class CommandBuffer : public CommandBufferBase {
public:
CommandBuffer(Device* device, CommandBufferBuilder* builder);
~CommandBuffer();
void FillCommands(id<MTLCommandBuffer> commandBuffer, std::unordered_set<std::mutex*>* mutexes);
private:
Device* device;
CommandIterator commands;
};
class InputState : public InputStateBase {
public:
InputState(Device* device, InputStateBuilder* builder);
~InputState();
MTLVertexDescriptor* GetMTLVertexDescriptor();
private:
Device* device;
MTLVertexDescriptor* mtlVertexDescriptor = nil;
};
class Pipeline : public PipelineBase {
public:
Pipeline(Device* device, PipelineBuilder* builder);
~Pipeline();
void Encode(id<MTLRenderCommandEncoder> encoder);
void Encode(id<MTLComputeCommandEncoder> encoder);
MTLSize GetLocalWorkGroupSize() const;
private:
Device* device;
id<MTLRenderPipelineState> mtlRenderPipelineState = nil;
id<MTLDepthStencilState> mtlDepthStencilState = nil;
id<MTLComputePipelineState> mtlComputePipelineState = nil;
MTLSize localWorkgroupSize;
};
class PipelineLayout : public PipelineLayoutBase {
public:
PipelineLayout(Device* device, PipelineLayoutBuilder* builder);
using BindingIndexInfo = std::array<std::array<uint32_t, kMaxBindingsPerGroup>, kMaxBindGroups>;
const BindingIndexInfo& GetBindingIndexInfo(nxt::ShaderStage stage) const;
private:
Device* device;
PerStage<BindingIndexInfo> indexInfo;
};
class Queue : public QueueBase {
public:
Queue(Device* device, QueueBuilder* builder);
~Queue();
id<MTLCommandQueue> GetMTLCommandQueue();
// NXT API
void Submit(uint32_t numCommands, CommandBuffer* const * commands);
private:
Device* device;
id<MTLCommandQueue> commandQueue = nil;
};
class Sampler : public SamplerBase {
public:
Sampler(Device* device, SamplerBuilder* builder);
~Sampler();
id<MTLSamplerState> GetMTLSamplerState();
private:
Device* device;
id<MTLSamplerState> mtlSamplerState = nil;
};
class ShaderModule : public ShaderModuleBase {
public:
ShaderModule(Device* device, ShaderModuleBuilder* builder);
~ShaderModule();
id<MTLFunction> GetFunction(const char* functionName) const;
MTLSize GetLocalWorkGroupSize(const std::string& entryPoint) const;
private:
Device* device;
id<MTLLibrary> mtlLibrary = nil;
spirv_cross::CompilerMSL* compiler = nullptr;
};
class Texture : public TextureBase {
public:
Texture(Device* device, TextureBuilder* builder);
~Texture();
id<MTLTexture> GetMTLTexture();
private:
Device* device;
id<MTLTexture> mtlTexture = nil;
};
class TextureView : public TextureViewBase {
public:
TextureView(Device* device, TextureViewBuilder* builder);
private:
Device* device;
};
}
}
#endif // BACKEND_METAL_METALBACKEND_H_

View File

@@ -0,0 +1,968 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "MetalBackend.h"
#include <spirv-cross/spirv_msl.hpp>
#include <sstream>
#include "common/Commands.h"
namespace backend {
namespace metal {
nxtProcTable GetNonValidatingProcs();
nxtProcTable GetValidatingProcs();
void Init(id<MTLDevice> metalDevice, nxtProcTable* procs, nxtDevice* device) {
*device = nullptr;
*procs = GetValidatingProcs();
*device = reinterpret_cast<nxtDevice>(new Device(metalDevice));
}
void SetNextDrawable(nxtDevice device, id<CAMetalDrawable> drawable) {
Device* backendDevice = reinterpret_cast<Device*>(device);
backendDevice->SetNextDrawable(drawable);
}
void Present(nxtDevice device) {
Device* backendDevice = reinterpret_cast<Device*>(device);
backendDevice->Present();
}
// Device
Device::Device(id<MTLDevice> mtlDevice) : mtlDevice(mtlDevice) {
[mtlDevice retain];
commandQueue = [mtlDevice newCommandQueue];
}
Device::~Device() {
[mtlDevice release];
mtlDevice = nil;
[commandQueue release];
commandQueue = nil;
[currentTexture release];
currentTexture = nil;
[currentDepthTexture release];
currentDepthTexture = nil;
}
BindGroupBase* Device::CreateBindGroup(BindGroupBuilder* builder) {
return new BindGroup(this, builder);
}
BindGroupLayoutBase* Device::CreateBindGroupLayout(BindGroupLayoutBuilder* builder) {
return new BindGroupLayout(this, builder);
}
BufferBase* Device::CreateBuffer(BufferBuilder* builder) {
return new Buffer(this, builder);
}
BufferViewBase* Device::CreateBufferView(BufferViewBuilder* builder) {
return new BufferView(this, builder);
}
CommandBufferBase* Device::CreateCommandBuffer(CommandBufferBuilder* builder) {
return new CommandBuffer(this, builder);
}
InputStateBase* Device::CreateInputState(InputStateBuilder* builder) {
return new InputState(this, builder);
}
PipelineBase* Device::CreatePipeline(PipelineBuilder* builder) {
return new Pipeline(this, builder);
}
PipelineLayoutBase* Device::CreatePipelineLayout(PipelineLayoutBuilder* builder) {
return new PipelineLayout(this, builder);
}
QueueBase* Device::CreateQueue(QueueBuilder* builder) {
return new Queue(this, builder);
}
SamplerBase* Device::CreateSampler(SamplerBuilder* builder) {
return new Sampler(this, builder);
}
ShaderModuleBase* Device::CreateShaderModule(ShaderModuleBuilder* builder) {
return new ShaderModule(this, builder);
}
TextureBase* Device::CreateTexture(TextureBuilder* builder) {
return new Texture(this, builder);
}
TextureViewBase* Device::CreateTextureView(TextureViewBuilder* builder) {
return new TextureView(this, builder);
}
void Device::SetNextDrawable(id<CAMetalDrawable> drawable) {
[currentDrawable release];
currentDrawable = drawable;
[currentDrawable retain];
[currentTexture release];
currentTexture = drawable.texture;
[currentTexture retain];
if (currentDepthTexture == nil ||
currentTexture.width != currentDepthTexture.width ||
currentTexture.height != currentDepthTexture.height) {
if (currentDepthTexture != nil) {
[currentDepthTexture release];
}
MTLTextureDescriptor* depthDescriptor = [MTLTextureDescriptor
texture2DDescriptorWithPixelFormat:MTLPixelFormatDepth32Float
width:currentTexture.width
height:currentTexture.height
mipmapped:NO];
depthDescriptor.textureType = MTLTextureType2D;
depthDescriptor.usage = MTLTextureUsageRenderTarget;
depthDescriptor.storageMode = MTLStorageModePrivate;
currentDepthTexture = [mtlDevice newTextureWithDescriptor:depthDescriptor];
}
MTLRenderPassDescriptor* passDescriptor = [MTLRenderPassDescriptor renderPassDescriptor];
passDescriptor.colorAttachments[0].texture = currentTexture;
passDescriptor.colorAttachments[0].loadAction = MTLLoadActionClear;
passDescriptor.colorAttachments[0].storeAction = MTLStoreActionStore;
passDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(0.0, 0.0, 0.0, 1.0);
passDescriptor.depthAttachment.texture = currentDepthTexture;
passDescriptor.depthAttachment.loadAction = MTLLoadActionClear;
passDescriptor.depthAttachment.storeAction = MTLStoreActionStore;
passDescriptor.depthAttachment.clearDepth = 1.0;
id<MTLCommandBuffer> commandBuffer = [commandQueue commandBuffer];
id<MTLRenderCommandEncoder> commandEncoder = [commandBuffer
renderCommandEncoderWithDescriptor:passDescriptor];
[commandEncoder endEncoding];
[commandBuffer commit];
}
void Device::Present() {
id<MTLCommandBuffer> commandBuffer = [commandQueue commandBuffer];
[commandBuffer presentDrawable: currentDrawable];
[commandBuffer commit];
}
id<MTLDevice> Device::GetMTLDevice() {
return mtlDevice;
}
id<MTLTexture> Device::GetCurrentTexture() {
return currentTexture;
}
id<MTLTexture> Device::GetCurrentDepthTexture() {
return currentDepthTexture;
}
void Device::Reference() {
}
void Device::Release() {
}
// Bind Group
BindGroup::BindGroup(Device* device, BindGroupBuilder* builder)
: BindGroupBase(builder), device(device) {
}
// Bind Group Layout
BindGroupLayout::BindGroupLayout(Device* device, BindGroupLayoutBuilder* builder)
: BindGroupLayoutBase(builder), device(device) {
}
// Buffer
Buffer::Buffer(Device* device, BufferBuilder* builder)
: BufferBase(builder), device(device) {
mtlBuffer = [device->GetMTLDevice() newBufferWithLength:GetSize()
options:MTLResourceStorageModeManaged];
}
Buffer::~Buffer() {
std::lock_guard<std::mutex> lock(mutex);
[mtlBuffer release];
mtlBuffer = nil;
}
id<MTLBuffer> Buffer::GetMTLBuffer() {
return mtlBuffer;
}
std::mutex& Buffer::GetMutex() {
return mutex;
}
void Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) {
uint32_t* dest = reinterpret_cast<uint32_t*>([mtlBuffer contents]);
{
std::lock_guard<std::mutex> lock(mutex);
memcpy(&dest[start], data, count * sizeof(uint32_t));
}
[mtlBuffer didModifyRange:NSMakeRange(start * sizeof(uint32_t), count * sizeof(uint32_t))];
}
// BufferView
BufferView::BufferView(Device* device, BufferViewBuilder* builder)
: BufferViewBase(builder), device(device) {
}
// CommandBuffer
static MTLIndexType IndexFormatType(nxt::IndexFormat format) {
switch (format) {
case nxt::IndexFormat::Uint16:
return MTLIndexTypeUInt16;
case nxt::IndexFormat::Uint32:
return MTLIndexTypeUInt32;
}
}
CommandBuffer::CommandBuffer(Device* device, CommandBufferBuilder* builder)
: CommandBufferBase(builder), device(device), commands(builder->AcquireCommands()) {
}
CommandBuffer::~CommandBuffer() {
FreeCommands(&commands);
}
namespace {
struct CurrentEncoders {
Device* device;
id<MTLBlitCommandEncoder> blit = nil;
id<MTLComputeCommandEncoder> compute = nil;
id<MTLRenderCommandEncoder> render = nil;
void FinishEncoders() {
if (blit != nil) {
[blit endEncoding];
blit = nil;
}
if (compute != nil) {
[compute endEncoding];
compute = nil;
}
if (render != nil) {
[render endEncoding];
render = nil;
}
}
void EnsureBlit(id<MTLCommandBuffer> commandBuffer) {
if (blit == nil) {
FinishEncoders();
blit = [commandBuffer blitCommandEncoder];
}
}
void EnsureCompute(id<MTLCommandBuffer> commandBuffer) {
if (compute == nil) {
FinishEncoders();
compute = [commandBuffer computeCommandEncoder];
// TODO(cwallez@chromium.org): does any state need to be reset?
}
}
void EnsureRender(id<MTLCommandBuffer> commandBuffer) {
if (render == nil) {
FinishEncoders();
// TODO(cwallez@chromium.org): this should be created from a renderpass subpass
MTLRenderPassDescriptor* descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
descriptor.colorAttachments[0].texture = device->GetCurrentTexture();
descriptor.colorAttachments[0].loadAction = MTLLoadActionLoad;
descriptor.colorAttachments[0].storeAction = MTLStoreActionStore;
descriptor.depthAttachment.texture = device->GetCurrentDepthTexture();
descriptor.depthAttachment.loadAction = MTLLoadActionLoad;
descriptor.depthAttachment.storeAction = MTLStoreActionStore;
render = [commandBuffer renderCommandEncoderWithDescriptor:descriptor];
// TODO(cwallez@chromium.org): does any state need to be reset?
}
}
};
}
void CommandBuffer::FillCommands(id<MTLCommandBuffer> commandBuffer, std::unordered_set<std::mutex*>* mutexes) {
Command type;
Pipeline* lastPipeline = nullptr;
id<MTLBuffer> indexBuffer = nil;
uint32_t indexBufferOffset = 0;
MTLIndexType indexType = MTLIndexTypeUInt32;
CurrentEncoders encoders;
encoders.device = device;
while (commands.NextCommandId(&type)) {
switch (type) {
case Command::CopyBufferToTexture:
{
CopyBufferToTextureCmd* copy = commands.NextCommand<CopyBufferToTextureCmd>();
Buffer* buffer = ToBackend(copy->buffer.Get());
Texture* texture = ToBackend(copy->texture.Get());
// TODO(kainino@chromium.org): this has to be in a Blit encoder, not a Render encoder, so ordering is lost here
unsigned rowSize = copy->width * TextureFormatPixelSize(texture->GetFormat());
MTLOrigin origin;
origin.x = copy->x;
origin.y = copy->y;
origin.z = copy->z;
MTLSize size;
size.width = copy->width;
size.height = copy->height;
size.depth = copy->depth;
encoders.EnsureBlit(commandBuffer);
[encoders.blit
copyFromBuffer:buffer->GetMTLBuffer()
sourceOffset:0
sourceBytesPerRow:rowSize
sourceBytesPerImage:(rowSize * copy->height)
sourceSize:size
toTexture:texture->GetMTLTexture()
destinationSlice:0
destinationLevel:copy->level
destinationOrigin:origin];
}
break;
case Command::Dispatch:
{
DispatchCmd* dispatch = commands.NextCommand<DispatchCmd>();
encoders.EnsureCompute(commandBuffer);
ASSERT(lastPipeline->IsCompute());
[encoders.compute dispatchThreadgroups:MTLSizeMake(dispatch->x, dispatch->y, dispatch->z)
threadsPerThreadgroup: lastPipeline->GetLocalWorkGroupSize()];
}
break;
case Command::DrawArrays:
{
DrawArraysCmd* draw = commands.NextCommand<DrawArraysCmd>();
encoders.EnsureRender(commandBuffer);
[encoders.render
drawPrimitives:MTLPrimitiveTypeTriangle
vertexStart:draw->firstVertex
vertexCount:draw->vertexCount
instanceCount:draw->instanceCount
baseInstance:draw->firstInstance];
}
break;
case Command::DrawElements:
{
DrawElementsCmd* draw = commands.NextCommand<DrawElementsCmd>();
encoders.EnsureRender(commandBuffer);
[encoders.render
drawIndexedPrimitives:MTLPrimitiveTypeTriangle
indexCount:draw->indexCount
indexType:indexType
indexBuffer:indexBuffer
indexBufferOffset:indexBufferOffset
instanceCount:draw->instanceCount
baseVertex:0
baseInstance:draw->firstInstance];
}
break;
case Command::SetPipeline:
{
SetPipelineCmd* cmd = commands.NextCommand<SetPipelineCmd>();
lastPipeline = ToBackend(cmd->pipeline).Get();
if (lastPipeline->IsCompute()) {
encoders.EnsureCompute(commandBuffer);
lastPipeline->Encode(encoders.compute);
} else {
encoders.EnsureRender(commandBuffer);
lastPipeline->Encode(encoders.render);
}
}
break;
case Command::SetPushConstants:
{
SetPushConstantsCmd* cmd = commands.NextCommand<SetPushConstantsCmd>();
uint32_t* valuesUInt = commands.NextData<uint32_t>(cmd->count);
int32_t* valuesInt = reinterpret_cast<int32_t*>(valuesUInt);
float* valuesFloat = reinterpret_cast<float*>(valuesUInt);
// TODO(kainino@chromium.org): implement SetPushConstants
}
break;
case Command::SetBindGroup:
{
SetBindGroupCmd* cmd = commands.NextCommand<SetBindGroupCmd>();
BindGroup* group = ToBackend(cmd->group.Get());
uint32_t groupIndex = cmd->index;
const auto& layout = group->GetLayout()->GetBindingInfo();
if (lastPipeline->IsCompute()) {
encoders.EnsureCompute(commandBuffer);
} else {
encoders.EnsureRender(commandBuffer);
}
// TODO(kainino@chromium.org): Maintain buffers and offsets arrays in BindGroup so that we
// only have to do one setVertexBuffers and one setFragmentBuffers call here.
for (size_t binding = 0; binding < layout.mask.size(); ++binding) {
if (!layout.mask[binding]) {
continue;
}
auto stage = layout.visibilities[binding];
bool vertStage = stage & nxt::ShaderStageBit::Vertex;
bool fragStage = stage & nxt::ShaderStageBit::Fragment;
bool computeStage = stage & nxt::ShaderStageBit::Compute;
uint32_t vertIndex = 0;
uint32_t fragIndex = 0;
uint32_t computeIndex = 0;
if (vertStage) {
vertIndex = ToBackend(lastPipeline->GetLayout())->
GetBindingIndexInfo(nxt::ShaderStage::Vertex)[groupIndex][binding];
}
if (fragStage) {
fragIndex = ToBackend(lastPipeline->GetLayout())->
GetBindingIndexInfo(nxt::ShaderStage::Fragment)[groupIndex][binding];
}
if (computeStage) {
computeIndex = ToBackend(lastPipeline->GetLayout())->
GetBindingIndexInfo(nxt::ShaderStage::Compute)[groupIndex][binding];
}
switch (layout.types[binding]) {
case nxt::BindingType::UniformBuffer:
case nxt::BindingType::StorageBuffer:
{
BufferView* view = ToBackend(group->GetBindingAsBufferView(binding));
auto b = ToBackend(view->GetBuffer());
mutexes->insert(&b->GetMutex());
const id<MTLBuffer> buffer = b->GetMTLBuffer();
const NSUInteger offset = view->GetOffset();
if (vertStage) {
[encoders.render
setVertexBuffers:&buffer
offsets:&offset
withRange:NSMakeRange(vertIndex, 1)];
}
if (fragStage) {
[encoders.render
setFragmentBuffers:&buffer
offsets:&offset
withRange:NSMakeRange(fragIndex, 1)];
}
if (computeStage) {
[encoders.compute
setBuffers:&buffer
offsets:&offset
withRange:NSMakeRange(computeIndex, 1)];
}
}
break;
case nxt::BindingType::Sampler:
{
auto sampler = ToBackend(group->GetBindingAsSampler(binding));
if (vertStage) {
[encoders.render
setVertexSamplerState:sampler->GetMTLSamplerState()
atIndex:vertIndex];
}
if (fragStage) {
[encoders.render
setFragmentSamplerState:sampler->GetMTLSamplerState()
atIndex:fragIndex];
}
if (computeStage) {
[encoders.compute
setSamplerState:sampler->GetMTLSamplerState()
atIndex:computeIndex];
}
}
break;
case nxt::BindingType::SampledTexture:
{
auto texture = ToBackend(group->GetBindingAsTextureView(binding)->GetTexture());
if (vertStage) {
[encoders.render
setVertexTexture:texture->GetMTLTexture()
atIndex:vertIndex];
}
if (fragStage) {
[encoders.render
setFragmentTexture:texture->GetMTLTexture()
atIndex:fragIndex];
}
if (computeStage) {
[encoders.compute
setTexture:texture->GetMTLTexture()
atIndex:computeIndex];
}
}
break;
}
}
}
break;
case Command::SetIndexBuffer:
{
SetIndexBufferCmd* cmd = commands.NextCommand<SetIndexBufferCmd>();
auto b = ToBackend(cmd->buffer.Get());
mutexes->insert(&b->GetMutex());
indexBuffer = b->GetMTLBuffer();
indexBufferOffset = cmd->offset;
indexType = IndexFormatType(cmd->format);
}
break;
case Command::SetVertexBuffers:
{
SetVertexBuffersCmd* cmd = commands.NextCommand<SetVertexBuffersCmd>();
auto buffers = commands.NextData<Ref<BufferBase>>(cmd->count);
auto offsets = commands.NextData<uint32_t>(cmd->count);
auto inputState = lastPipeline->GetInputState();
std::array<id<MTLBuffer>, kMaxVertexInputs> mtlBuffers;
std::array<NSUInteger, kMaxVertexInputs> mtlOffsets;
// Perhaps an "array of vertex buffers(+offsets?)" should be
// a NXT API primitive to avoid reconstructing this array?
for (uint32_t i = 0; i < cmd->count; ++i) {
Buffer* buffer = ToBackend(buffers[i].Get());
mutexes->insert(&buffer->GetMutex());
mtlBuffers[i] = buffer->GetMTLBuffer();
mtlOffsets[i] = offsets[i];
}
encoders.EnsureRender(commandBuffer);
[encoders.render
setVertexBuffers:mtlBuffers.data()
offsets:mtlOffsets.data()
withRange:NSMakeRange(kMaxBindingsPerGroup + cmd->startSlot, cmd->count)];
}
break;
case Command::TransitionBufferUsage:
{
TransitionBufferUsageCmd* cmd = commands.NextCommand<TransitionBufferUsageCmd>();
cmd->buffer->TransitionUsageImpl(cmd->usage);
}
break;
case Command::TransitionTextureUsage:
{
TransitionTextureUsageCmd* cmd = commands.NextCommand<TransitionTextureUsageCmd>();
cmd->texture->TransitionUsageImpl(cmd->usage);
}
break;
;
}
}
encoders.FinishEncoders();
}
// InputState
static MTLVertexFormat VertexFormatType(nxt::VertexFormat format) {
switch (format) {
case nxt::VertexFormat::FloatR32G32B32A32:
return MTLVertexFormatFloat4;
case nxt::VertexFormat::FloatR32G32B32:
return MTLVertexFormatFloat3;
case nxt::VertexFormat::FloatR32G32:
return MTLVertexFormatFloat2;
}
}
static MTLVertexStepFunction InputStepModeFunction(nxt::InputStepMode mode) {
switch (mode) {
case nxt::InputStepMode::Vertex:
return MTLVertexStepFunctionPerVertex;
case nxt::InputStepMode::Instance:
return MTLVertexStepFunctionPerInstance;
}
}
InputState::InputState(Device* device, InputStateBuilder* builder)
: InputStateBase(builder), device(device) {
mtlVertexDescriptor = [MTLVertexDescriptor new];
const auto& attributesSetMask = GetAttributesSetMask();
for (size_t i = 0; i < attributesSetMask.size(); ++i) {
if (!attributesSetMask[i]) {
continue;
}
const AttributeInfo& info = GetAttribute(i);
auto attribDesc = [MTLVertexAttributeDescriptor new];
attribDesc.format = VertexFormatType(info.format);
attribDesc.offset = info.offset;
attribDesc.bufferIndex = kMaxBindingsPerGroup + info.bindingSlot;
mtlVertexDescriptor.attributes[i] = attribDesc;
[attribDesc release];
}
const auto& inputsSetMask = GetInputsSetMask();
for (size_t i = 0; i < inputsSetMask.size(); ++i) {
if (!inputsSetMask[i]) {
continue;
}
const InputInfo& info = GetInput(i);
auto layoutDesc = [MTLVertexBufferLayoutDescriptor new];
if (info.stride == 0) {
// For MTLVertexStepFunctionConstant, the stepRate must be 0,
// but the stride must NOT be 0, so I made up a value (256).
layoutDesc.stepFunction = MTLVertexStepFunctionConstant;
layoutDesc.stepRate = 0;
layoutDesc.stride = 256;
} else {
layoutDesc.stepFunction = InputStepModeFunction(info.stepMode);
layoutDesc.stepRate = 1;
layoutDesc.stride = info.stride;
}
mtlVertexDescriptor.layouts[kMaxBindingsPerGroup + i] = layoutDesc;
[layoutDesc release];
}
}
InputState::~InputState() {
[mtlVertexDescriptor release];
mtlVertexDescriptor = nil;
}
MTLVertexDescriptor* InputState::GetMTLVertexDescriptor() {
return mtlVertexDescriptor;
}
// Pipeline
Pipeline::Pipeline(Device* device, PipelineBuilder* builder)
: PipelineBase(builder), device(device) {
if (IsCompute()) {
const auto& module = ToBackend(builder->GetStageInfo(nxt::ShaderStage::Compute).module);
const auto& entryPoint = builder->GetStageInfo(nxt::ShaderStage::Compute).entryPoint;
id<MTLFunction> function = module->GetFunction(entryPoint.c_str());
NSError *error = nil;
mtlComputePipelineState = [device->GetMTLDevice()
newComputePipelineStateWithFunction:function error:&error];
if (error != nil) {
NSLog(@" error => %@", error);
device->HandleError("Error creating pipeline state");
return;
}
// Copy over the local workgroup size as it is passed to dispatch explicitly in Metal
localWorkgroupSize = module->GetLocalWorkGroupSize(entryPoint);
} else {
MTLRenderPipelineDescriptor* descriptor = [MTLRenderPipelineDescriptor new];
for (auto stage : IterateStages(GetStageMask())) {
const auto& module = ToBackend(builder->GetStageInfo(stage).module);
const auto& entryPoint = builder->GetStageInfo(stage).entryPoint;
id<MTLFunction> function = module->GetFunction(entryPoint.c_str());
switch (stage) {
case nxt::ShaderStage::Vertex:
descriptor.vertexFunction = function;
break;
case nxt::ShaderStage::Fragment:
descriptor.fragmentFunction = function;
break;
case nxt::ShaderStage::Compute:
ASSERT(false);
break;
}
}
descriptor.colorAttachments[0].pixelFormat = MTLPixelFormatBGRA8Unorm;
descriptor.depthAttachmentPixelFormat = MTLPixelFormatDepth32Float;
InputState* inputState = ToBackend(GetInputState());
descriptor.vertexDescriptor = inputState->GetMTLVertexDescriptor();
// TODO(kainino@chromium.org): push constants, textures, samplers
NSError *error = nil;
mtlRenderPipelineState = [device->GetMTLDevice()
newRenderPipelineStateWithDescriptor:descriptor error:&error];
if (error != nil) {
NSLog(@" error => %@", error);
device->HandleError("Error creating pipeline state");
return;
}
MTLDepthStencilDescriptor* dsDesc = [MTLDepthStencilDescriptor new];
dsDesc.depthWriteEnabled = true;
dsDesc.depthCompareFunction = MTLCompareFunctionLess;
mtlDepthStencilState = [device->GetMTLDevice()
newDepthStencilStateWithDescriptor:dsDesc];
[dsDesc release];
[descriptor release];
}
}
Pipeline::~Pipeline() {
[mtlRenderPipelineState release];
[mtlDepthStencilState release];
[mtlComputePipelineState release];
}
void Pipeline::Encode(id<MTLRenderCommandEncoder> encoder) {
ASSERT(!IsCompute());
[encoder setDepthStencilState:mtlDepthStencilState];
[encoder setRenderPipelineState:mtlRenderPipelineState];
}
void Pipeline::Encode(id<MTLComputeCommandEncoder> encoder) {
ASSERT(IsCompute());
[encoder setComputePipelineState:mtlComputePipelineState];
}
MTLSize Pipeline::GetLocalWorkGroupSize() const {
return localWorkgroupSize;
}
// PipelineLayout
PipelineLayout::PipelineLayout(Device* device, PipelineLayoutBuilder* builder)
: PipelineLayoutBase(builder), device(device) {
// Each stage has its own numbering namespace in CompilerMSL.
for (auto stage : IterateStages(kAllStages)) {
uint32_t bufferIndex = 0;
uint32_t samplerIndex = 0;
uint32_t textureIndex = 0;
for (size_t group = 0; group < kMaxBindGroups; ++group) {
const auto& groupInfo = GetBindGroupLayout(group)->GetBindingInfo();
for (size_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
if (!(groupInfo.visibilities[binding] & StageBit(stage))) {
continue;
}
if (!groupInfo.mask[binding]) {
continue;
}
switch (groupInfo.types[binding]) {
case nxt::BindingType::UniformBuffer:
case nxt::BindingType::StorageBuffer:
indexInfo[stage][group][binding] = bufferIndex;
bufferIndex++;
break;
case nxt::BindingType::Sampler:
indexInfo[stage][group][binding] = samplerIndex;
samplerIndex++;
break;
case nxt::BindingType::SampledTexture:
indexInfo[stage][group][binding] = textureIndex;
textureIndex++;
break;
}
}
}
}
}
const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo(nxt::ShaderStage stage) const {
return indexInfo[stage];
}
// Queue
Queue::Queue(Device* device, QueueBuilder* builder)
: device(device) {
commandQueue = [device->GetMTLDevice() newCommandQueue];
}
Queue::~Queue() {
[commandQueue release];
commandQueue = nil;
}
id<MTLCommandQueue> Queue::GetMTLCommandQueue() {
return commandQueue;
}
void Queue::Submit(uint32_t numCommands, CommandBuffer* const * commands) {
id<MTLCommandBuffer> commandBuffer = [commandQueue commandBuffer];
// Mutexes are necessary to prevent buffers from being written from the
// CPU before their previous value has been read from the GPU.
// https://developer.apple.com/library/content/documentation/3DDrawing/Conceptual/MTLBestPracticesGuide/TripleBuffering.html
// TODO(kainino@chromium.org): When we have resource transitions, all of these mutexes will be replaced.
std::unordered_set<std::mutex*> mutexes;
for (uint32_t i = 0; i < numCommands; ++i) {
commands[i]->FillCommands(commandBuffer, &mutexes);
}
for (auto mutex : mutexes) {
mutex->lock();
}
[commandBuffer addCompletedHandler:^(id<MTLCommandBuffer> commandBuffer) {
// 'mutexes' is copied into this Block
for (auto mutex : mutexes) {
mutex->unlock();
}
}];
[commandBuffer commit];
}
// Sampler
MTLSamplerMinMagFilter FilterModeToMinMagFilter(nxt::FilterMode mode) {
switch (mode) {
case nxt::FilterMode::Nearest:
return MTLSamplerMinMagFilterNearest;
case nxt::FilterMode::Linear:
return MTLSamplerMinMagFilterLinear;
}
}
MTLSamplerMipFilter FilterModeToMipFilter(nxt::FilterMode mode) {
switch (mode) {
case nxt::FilterMode::Nearest:
return MTLSamplerMipFilterNearest;
case nxt::FilterMode::Linear:
return MTLSamplerMipFilterLinear;
}
}
Sampler::Sampler(Device* device, SamplerBuilder* builder)
: SamplerBase(builder), device(device) {
auto desc = [MTLSamplerDescriptor new];
[desc autorelease];
desc.minFilter = FilterModeToMinMagFilter(builder->GetMinFilter());
desc.magFilter = FilterModeToMinMagFilter(builder->GetMagFilter());
desc.mipFilter = FilterModeToMipFilter(builder->GetMipMapFilter());
// TODO(kainino@chromium.org): wrap modes
mtlSamplerState = [device->GetMTLDevice() newSamplerStateWithDescriptor:desc];
}
Sampler::~Sampler() {
[mtlSamplerState release];
}
id<MTLSamplerState> Sampler::GetMTLSamplerState() {
return mtlSamplerState;
}
// ShaderModule
ShaderModule::ShaderModule(Device* device, ShaderModuleBuilder* builder)
: ShaderModuleBase(builder), device(device) {
compiler = new spirv_cross::CompilerMSL(builder->AcquireSpirv());
ExtractSpirvInfo(*compiler);
spirv_cross::MSLConfiguration mslConfig;
mslConfig.flip_vert_y = false;
mslConfig.flip_frag_y = false;
std::string msl = compiler->compile(mslConfig);
NSString* mslSource = [NSString stringWithFormat:@"%s", msl.c_str()];
NSError *error = nil;
mtlLibrary = [device->GetMTLDevice() newLibraryWithSource:mslSource options:nil error:&error];
if (error != nil) {
NSLog(@"MTLDevice newLibraryWithSource => %@", error);
device->HandleError("Error creating MTLLibrary from MSL source");
}
}
ShaderModule::~ShaderModule() {
delete compiler;
}
id<MTLFunction> ShaderModule::GetFunction(const char* functionName) const {
// TODO(kainino@chromium.org): make this somehow more robust; it needs to behave like clean_func_name:
// https://github.com/KhronosGroup/SPIRV-Cross/blob/4e915e8c483e319d0dd7a1fa22318bef28f8cca3/spirv_msl.cpp#L1213
if (strcmp(functionName, "main") == 0) {
functionName = "main0";
}
NSString* name = [NSString stringWithFormat:@"%s", functionName];
return [mtlLibrary newFunctionWithName:name];
}
MTLSize ShaderModule::GetLocalWorkGroupSize(const std::string& entryPoint) const {
auto size = compiler->get_entry_point(entryPoint).workgroup_size;
return MTLSizeMake(size.x, size.y, size.z);
}
// Texture
MTLPixelFormat TextureFormatPixelFormat(nxt::TextureFormat format) {
switch (format) {
case nxt::TextureFormat::R8G8B8A8Unorm:
return MTLPixelFormatRGBA8Unorm;
}
}
Texture::Texture(Device* device, TextureBuilder* builder)
: TextureBase(builder), device(device) {
auto desc = [MTLTextureDescriptor new];
[desc autorelease];
switch (GetDimension()) {
case nxt::TextureDimension::e2D:
desc.textureType = MTLTextureType2D;
break;
}
desc.usage = MTLTextureUsageShaderRead;
desc.pixelFormat = TextureFormatPixelFormat(GetFormat());
desc.width = GetWidth();
desc.height = GetHeight();
desc.depth = GetDepth();
desc.mipmapLevelCount = GetNumMipLevels();
desc.arrayLength = 1;
mtlTexture = [device->GetMTLDevice() newTextureWithDescriptor:desc];
}
Texture::~Texture() {
[mtlTexture release];
}
id<MTLTexture> Texture::GetMTLTexture() {
return mtlTexture;
}
// TextureView
TextureView::TextureView(Device* device, TextureViewBuilder* builder)
: TextureViewBase(builder), device(device) {
}
}
}

View File

@@ -0,0 +1,303 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "CommandBufferGL.h"
#include "common/Commands.h"
#include "OpenGLBackend.h"
#include "PipelineGL.h"
#include "PipelineLayoutGL.h"
#include "SamplerGL.h"
#include "TextureGL.h"
#include <cstring>
namespace backend {
namespace opengl {
CommandBuffer::CommandBuffer(Device* device, CommandBufferBuilder* builder)
: CommandBufferBase(builder), device(device), commands(builder->AcquireCommands()) {
}
CommandBuffer::~CommandBuffer() {
FreeCommands(&commands);
}
static GLenum IndexFormatType(nxt::IndexFormat format) {
switch (format) {
case nxt::IndexFormat::Uint16:
return GL_UNSIGNED_SHORT;
case nxt::IndexFormat::Uint32:
return GL_UNSIGNED_INT;
}
}
static GLenum VertexFormatType(nxt::VertexFormat format) {
switch (format) {
case nxt::VertexFormat::FloatR32G32B32A32:
case nxt::VertexFormat::FloatR32G32B32:
case nxt::VertexFormat::FloatR32G32:
return GL_FLOAT;
}
}
void CommandBuffer::Execute() {
Command type;
Pipeline* lastPipeline = nullptr;
uint32_t indexBufferOffset = 0;
nxt::IndexFormat indexBufferFormat = nxt::IndexFormat::Uint16;
while(commands.NextCommandId(&type)) {
switch (type) {
case Command::CopyBufferToTexture:
{
CopyBufferToTextureCmd* copy = commands.NextCommand<CopyBufferToTextureCmd>();
Buffer* buffer = ToBackend(copy->buffer.Get());
Texture* texture = ToBackend(copy->texture.Get());
GLenum target = texture->GetGLTarget();
auto format = texture->GetGLFormat();
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
glActiveTexture(GL_TEXTURE0);
glBindTexture(target, texture->GetHandle());
glTexSubImage2D(target, copy->level, copy->x, copy->y, copy->width, copy->height,
format.format, format.type, nullptr);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
break;
case Command::Dispatch:
{
DispatchCmd* dispatch = commands.NextCommand<DispatchCmd>();
glDispatchCompute(dispatch->x, dispatch->y, dispatch->z);
// TODO(cwallez@chromium.org): add barriers to the API
glMemoryBarrier(GL_ALL_BARRIER_BITS);
}
break;
case Command::DrawArrays:
{
DrawArraysCmd* draw = commands.NextCommand<DrawArraysCmd>();
if (draw->firstInstance > 0) {
glDrawArraysInstancedBaseInstance(GL_TRIANGLES,
draw->firstVertex, draw->vertexCount, draw->instanceCount, draw->firstInstance);
} else {
// This branch is only needed on OpenGL < 4.2
glDrawArraysInstanced(GL_TRIANGLES,
draw->firstVertex, draw->vertexCount, draw->instanceCount);
}
}
break;
case Command::DrawElements:
{
DrawElementsCmd* draw = commands.NextCommand<DrawElementsCmd>();
size_t formatSize = IndexFormatSize(indexBufferFormat);
GLenum formatType = IndexFormatType(indexBufferFormat);
if (draw->firstInstance > 0) {
glDrawElementsInstancedBaseInstance(GL_TRIANGLES,
draw->indexCount, formatType,
reinterpret_cast<void*>(draw->firstIndex * formatSize + indexBufferOffset),
draw->instanceCount, draw->firstInstance);
} else {
// This branch is only needed on OpenGL < 4.2
glDrawElementsInstanced(GL_TRIANGLES,
draw->indexCount, formatType,
reinterpret_cast<void*>(draw->firstIndex * formatSize + indexBufferOffset),
draw->instanceCount);
}
}
break;
case Command::SetPipeline:
{
SetPipelineCmd* cmd = commands.NextCommand<SetPipelineCmd>();
ToBackend(cmd->pipeline)->ApplyNow();
lastPipeline = ToBackend(cmd->pipeline).Get();
}
break;
case Command::SetPushConstants:
{
SetPushConstantsCmd* cmd = commands.NextCommand<SetPushConstantsCmd>();
uint32_t* valuesUInt = commands.NextData<uint32_t>(cmd->count);
int32_t* valuesInt = reinterpret_cast<int32_t*>(valuesUInt);
float* valuesFloat = reinterpret_cast<float*>(valuesUInt);
for (auto stage : IterateStages(cmd->stage)) {
const auto& pushConstants = lastPipeline->GetPushConstants(stage);
const auto& glPushConstants = lastPipeline->GetGLPushConstants(stage);
for (size_t i = 0; i < cmd->count; i++) {
GLint location = glPushConstants[cmd->offset + i];
switch (pushConstants.types[cmd->offset + i]) {
case PushConstantType::Int:
glUniform1i(location, valuesInt[i]);
break;
case PushConstantType::UInt:
glUniform1ui(location, valuesUInt[i]);
break;
case PushConstantType::Float:
glUniform1f(location, valuesFloat[i]);
break;
}
}
}
}
break;
case Command::SetBindGroup:
{
SetBindGroupCmd* cmd = commands.NextCommand<SetBindGroupCmd>();
size_t index = cmd->index;
BindGroup* group = ToBackend(cmd->group.Get());
const auto& indices = ToBackend(lastPipeline->GetLayout())->GetBindingIndexInfo()[index];
const auto& layout = group->GetLayout()->GetBindingInfo();
// TODO(cwallez@chromium.org): iterate over the layout bitmask instead
for (size_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
if (!layout.mask[binding]) {
continue;
}
switch (layout.types[binding]) {
case nxt::BindingType::UniformBuffer:
{
BufferView* view = ToBackend(group->GetBindingAsBufferView(binding));
GLuint buffer = ToBackend(view->GetBuffer())->GetHandle();
GLuint index = indices[binding];
glBindBufferRange(GL_UNIFORM_BUFFER, index, buffer, view->GetOffset(), view->GetSize());
}
break;
case nxt::BindingType::Sampler:
{
GLuint sampler = ToBackend(group->GetBindingAsSampler(binding))->GetHandle();
GLuint index = indices[binding];
for (auto unit : lastPipeline->GetTextureUnitsForSampler(index)) {
glBindSampler(unit, sampler);
}
}
break;
case nxt::BindingType::SampledTexture:
{
TextureView* view = ToBackend(group->GetBindingAsTextureView(binding));
Texture* texture = ToBackend(view->GetTexture());
GLuint handle = texture->GetHandle();
GLenum target = texture->GetGLTarget();
GLuint index = indices[binding];
for (auto unit : lastPipeline->GetTextureUnitsForTexture(index)) {
glActiveTexture(GL_TEXTURE0 + unit);
glBindTexture(target, handle);
}
}
break;
case nxt::BindingType::StorageBuffer:
{
BufferView* view = ToBackend(group->GetBindingAsBufferView(binding));
GLuint buffer = ToBackend(view->GetBuffer())->GetHandle();
GLuint index = indices[binding];
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, index, buffer, view->GetOffset(), view->GetSize());
}
break;
}
}
}
break;
case Command::SetIndexBuffer:
{
SetIndexBufferCmd* cmd = commands.NextCommand<SetIndexBufferCmd>();
GLuint buffer = ToBackend(cmd->buffer.Get())->GetHandle();
indexBufferOffset = cmd->offset;
indexBufferFormat = cmd->format;
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer);
}
break;
case Command::SetVertexBuffers:
{
SetVertexBuffersCmd* cmd = commands.NextCommand<SetVertexBuffersCmd>();
auto buffers = commands.NextData<Ref<BufferBase>>(cmd->count);
auto offsets = commands.NextData<uint32_t>(cmd->count);
auto inputState = lastPipeline->GetInputState();
auto& attributesSetMask = inputState->GetAttributesSetMask();
for (uint32_t location = 0; location < attributesSetMask.size(); ++location) {
if (!attributesSetMask[location]) {
// This slot is not used in the input state
continue;
}
auto attribute = inputState->GetAttribute(location);
auto slot = attribute.bindingSlot;
ASSERT(slot < kMaxVertexInputs);
if (slot < cmd->startSlot || slot >= cmd->startSlot + cmd->count) {
// This slot is not affected by this call
continue;
}
size_t bufferIndex = slot - cmd->startSlot;
GLuint buffer = ToBackend(buffers[bufferIndex])->GetHandle();
uint32_t bufferOffset = offsets[bufferIndex];
auto input = inputState->GetInput(slot);
auto components = VertexFormatNumComponents(attribute.format);
auto formatType = VertexFormatType(attribute.format);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glVertexAttribPointer(
location, components, formatType, GL_FALSE,
input.stride,
reinterpret_cast<void*>(static_cast<intptr_t>(bufferOffset + attribute.offset)));
}
}
break;
case Command::TransitionBufferUsage:
{
TransitionBufferUsageCmd* cmd = commands.NextCommand<TransitionBufferUsageCmd>();
cmd->buffer->TransitionUsageImpl(cmd->usage);
}
break;
case Command::TransitionTextureUsage:
{
TransitionTextureUsageCmd* cmd = commands.NextCommand<TransitionTextureUsageCmd>();
cmd->texture->TransitionUsageImpl(cmd->usage);
}
break;
}
}
// HACK: cleanup a tiny bit of state to make this work with
// virtualized contexts enabled in Chromium
glBindSampler(0, 0);
}
}
}

View File

@@ -0,0 +1,45 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_COMMANDBUFFER_H_
#define BACKEND_OPENGL_COMMANDBUFFER_H_
#include "common/CommandAllocator.h"
#include "common/CommandBuffer.h"
namespace backend {
class CommandBufferBuilder;
}
namespace backend {
namespace opengl {
class Device;
class CommandBuffer : public CommandBufferBase {
public:
CommandBuffer(Device* device, CommandBufferBuilder* builder);
~CommandBuffer();
void Execute();
private:
Device* device;
CommandIterator commands;
};
}
}
#endif // BACKEND_OPENGL_COMMANDBUFFER_H_

View File

@@ -0,0 +1,21 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "OpenGLBackend.h"
#include "CommandBufferGL.h"
#include "PipelineGL.h"
#include "PipelineLayoutGL.h"
#include "SamplerGL.h"
#include "ShaderModuleGL.h"
#include "TextureGL.h"

View File

@@ -0,0 +1,180 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "OpenGLBackend.h"
#include "CommandBufferGL.h"
#include "PipelineGL.h"
#include "PipelineLayoutGL.h"
#include "ShaderModuleGL.h"
#include "SamplerGL.h"
#include "TextureGL.h"
namespace backend {
namespace opengl {
nxtProcTable GetNonValidatingProcs();
nxtProcTable GetValidatingProcs();
void HACKCLEAR() {
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}
void Init(void* (*getProc)(const char*), nxtProcTable* procs, nxtDevice* device) {
*device = nullptr;
gladLoadGLLoader(reinterpret_cast<GLADloadproc>(getProc));
glEnable(GL_DEPTH_TEST);
HACKCLEAR();
*procs = GetValidatingProcs();
*device = reinterpret_cast<nxtDevice>(new Device);
}
// Device
BindGroupBase* Device::CreateBindGroup(BindGroupBuilder* builder) {
return new BindGroup(this, builder);
}
BindGroupLayoutBase* Device::CreateBindGroupLayout(BindGroupLayoutBuilder* builder) {
return new BindGroupLayout(this, builder);
}
BufferBase* Device::CreateBuffer(BufferBuilder* builder) {
return new Buffer(this, builder);
}
BufferViewBase* Device::CreateBufferView(BufferViewBuilder* builder) {
return new BufferView(this, builder);
}
CommandBufferBase* Device::CreateCommandBuffer(CommandBufferBuilder* builder) {
return new CommandBuffer(this, builder);
}
InputStateBase* Device::CreateInputState(InputStateBuilder* builder) {
return new InputState(this, builder);
}
PipelineBase* Device::CreatePipeline(PipelineBuilder* builder) {
return new Pipeline(this, builder);
}
PipelineLayoutBase* Device::CreatePipelineLayout(PipelineLayoutBuilder* builder) {
return new PipelineLayout(this, builder);
}
QueueBase* Device::CreateQueue(QueueBuilder* builder) {
return new Queue(this, builder);
}
SamplerBase* Device::CreateSampler(SamplerBuilder* builder) {
return new Sampler(this, builder);
}
ShaderModuleBase* Device::CreateShaderModule(ShaderModuleBuilder* builder) {
return new ShaderModule(this, builder);
}
TextureBase* Device::CreateTexture(TextureBuilder* builder) {
return new Texture(this, builder);
}
TextureViewBase* Device::CreateTextureView(TextureViewBuilder* builder) {
return new TextureView(this, builder);
}
void Device::Reference() {
}
void Device::Release() {
}
// Bind Group
BindGroup::BindGroup(Device* device, BindGroupBuilder* builder)
: BindGroupBase(builder), device(device) {
}
// Bind Group Layout
BindGroupLayout::BindGroupLayout(Device* device, BindGroupLayoutBuilder* builder)
: BindGroupLayoutBase(builder), device(device) {
}
// Buffer
Buffer::Buffer(Device* device, BufferBuilder* builder)
: BufferBase(builder), device(device) {
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, GetSize(), nullptr, GL_STATIC_DRAW);
}
GLuint Buffer::GetHandle() const {
return buffer;
}
void Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) {
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferSubData(GL_ARRAY_BUFFER, start * sizeof(uint32_t), count * sizeof(uint32_t), data);
}
// BufferView
BufferView::BufferView(Device* device, BufferViewBuilder* builder)
: BufferViewBase(builder), device(device) {
}
// InputState
InputState::InputState(Device* device, InputStateBuilder* builder)
: InputStateBase(builder), device(device) {
glGenVertexArrays(1, &vertexArrayObject);
glBindVertexArray(vertexArrayObject);
auto& attributesSetMask = GetAttributesSetMask();
for (uint32_t location = 0; location < attributesSetMask.size(); ++location) {
if (!attributesSetMask[location]) {
continue;
}
auto attribute = GetAttribute(location);
glEnableVertexAttribArray(location);
auto input = GetInput(attribute.bindingSlot);
if (input.stride == 0) {
// Emulate a stride of zero (constant vertex attribute) by
// setting the attribute instance divisor to a huge number.
glVertexAttribDivisor(location, 0xffffffff);
} else {
switch (input.stepMode) {
case nxt::InputStepMode::Vertex:
break;
case nxt::InputStepMode::Instance:
glVertexAttribDivisor(location, 1);
break;
default:
ASSERT(false);
break;
}
}
}
}
GLuint InputState::GetVAO() {
return vertexArrayObject;
}
// Queue
Queue::Queue(Device* device, QueueBuilder* builder) : device(device) {
}
void Queue::Submit(uint32_t numCommands, CommandBuffer* const * commands) {
for (uint32_t i = 0; i < numCommands; ++i) {
commands[i]->Execute();
}
}
}
}

View File

@@ -0,0 +1,151 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_OPENGLBACKEND_H_
#define BACKEND_OPENGL_OPENGLBACKEND_H_
#include "nxt/nxtcpp.h"
#include "common/Buffer.h"
#include "common/BindGroup.h"
#include "common/BindGroupLayout.h"
#include "common/Device.h"
#include "common/InputState.h"
#include "common/Queue.h"
#include "common/ToBackend.h"
#include "glad/glad.h"
namespace backend {
namespace opengl {
class BindGroup;
class BindGroupLayout;
class Buffer;
class BufferView;
class CommandBuffer;
class InputState;
class Pipeline;
class PipelineLayout;
class Queue;
class Sampler;
class ShaderModule;
class Texture;
class TextureView;
struct OpenGLBackendTraits {
using BindGroupType = BindGroup;
using BindGroupLayoutType = BindGroupLayout;
using BufferType = Buffer;
using BufferViewType = BufferView;
using CommandBufferType = CommandBuffer;
using InputStateType = InputState;
using PipelineType = Pipeline;
using PipelineLayoutType = PipelineLayout;
using QueueType = Queue;
using SamplerType = Sampler;
using ShaderModuleType = ShaderModule;
using TextureType = Texture;
using TextureViewType = TextureView;
};
template<typename T>
auto ToBackend(T&& common) -> decltype(ToBackendBase<OpenGLBackendTraits>(common)) {
return ToBackendBase<OpenGLBackendTraits>(common);
}
// Definition of backend types
class Device : public DeviceBase {
public:
BindGroupBase* CreateBindGroup(BindGroupBuilder* builder) override;
BindGroupLayoutBase* CreateBindGroupLayout(BindGroupLayoutBuilder* builder) override;
BufferBase* CreateBuffer(BufferBuilder* builder) override;
BufferViewBase* CreateBufferView(BufferViewBuilder* builder) override;
CommandBufferBase* CreateCommandBuffer(CommandBufferBuilder* builder) override;
InputStateBase* CreateInputState(InputStateBuilder* builder) override;
PipelineBase* CreatePipeline(PipelineBuilder* builder) override;
PipelineLayoutBase* CreatePipelineLayout(PipelineLayoutBuilder* builder) override;
QueueBase* CreateQueue(QueueBuilder* builder) override;
SamplerBase* CreateSampler(SamplerBuilder* builder) override;
ShaderModuleBase* CreateShaderModule(ShaderModuleBuilder* builder) override;
TextureBase* CreateTexture(TextureBuilder* builder) override;
TextureViewBase* CreateTextureView(TextureViewBuilder* builder) override;
// NXT API
void Reference();
void Release();
};
class BindGroup : public BindGroupBase {
public:
BindGroup(Device* device, BindGroupBuilder* builder);
private:
Device* device;
};
class BindGroupLayout : public BindGroupLayoutBase {
public:
BindGroupLayout(Device* device, BindGroupLayoutBuilder* builder);
private:
Device* device;
};
class Buffer : public BufferBase {
public:
Buffer(Device* device, BufferBuilder* builder);
GLuint GetHandle() const;
private:
void SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) override;
Device* device;
GLuint buffer = 0;
};
class BufferView : public BufferViewBase {
public:
BufferView(Device* device, BufferViewBuilder* builder);
private:
Device* device;
};
class InputState : public InputStateBase {
public:
InputState(Device* device, InputStateBuilder* builder);
GLuint GetVAO();
private:
Device* device;
GLuint vertexArrayObject;
};
class Queue : public QueueBase {
public:
Queue(Device* device, QueueBuilder* builder);
// NXT API
void Submit(uint32_t numCommands, CommandBuffer* const * commands);
private:
Device* device;
};
}
}
#endif // BACKEND_OPENGL_OPENGLBACKEND_H_

View File

@@ -0,0 +1,213 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "PipelineGL.h"
#include "OpenGLBackend.h"
#include "PipelineLayoutGL.h"
#include "ShaderModuleGL.h"
#include <iostream>
#include <set>
namespace backend {
namespace opengl {
namespace {
GLenum GLShaderType(nxt::ShaderStage stage) {
switch (stage) {
case nxt::ShaderStage::Vertex:
return GL_VERTEX_SHADER;
case nxt::ShaderStage::Fragment:
return GL_FRAGMENT_SHADER;
case nxt::ShaderStage::Compute:
return GL_COMPUTE_SHADER;
}
}
}
Pipeline::Pipeline(Device* device, PipelineBuilder* builder) : PipelineBase(builder), device(device) {
auto CreateShader = [](GLenum type, const char* source) -> GLuint {
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, &source, nullptr);
glCompileShader(shader);
GLint compileStatus = GL_FALSE;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
if (compileStatus == GL_FALSE) {
GLint infoLogLength = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLogLength);
if (infoLogLength > 1) {
std::vector<char> buffer(infoLogLength);
glGetShaderInfoLog(shader, infoLogLength, nullptr, &buffer[0]);
std::cout << source << std::endl;
std::cout << "Program compilation failed:\n";
std::cout << buffer.data() << std::endl;
}
}
return shader;
};
auto FillPushConstants = [](const ShaderModule* module, GLPushConstantInfo* info, GLuint program) {
const auto& moduleInfo = module->GetPushConstants();
for (uint32_t i = 0; i < moduleInfo.names.size(); i++) {
(*info)[i] = -1;
unsigned int size = moduleInfo.sizes[i];
if (size == 0) {
continue;
}
GLint location = glGetUniformLocation(program, moduleInfo.names[i].c_str());
if (location == -1) {
continue;
}
for (uint32_t offset = 0; offset < size; offset++) {
(*info)[i + offset] = location + offset;
}
i += size - 1;
}
};
program = glCreateProgram();
for (auto stage : IterateStages(GetStageMask())) {
const ShaderModule* module = ToBackend(builder->GetStageInfo(stage).module.Get());
GLuint shader = CreateShader(GLShaderType(stage), module->GetSource());
glAttachShader(program, shader);
}
glLinkProgram(program);
GLint linkStatus = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
if (linkStatus == GL_FALSE) {
GLint infoLogLength = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &infoLogLength);
if (infoLogLength > 1) {
std::vector<char> buffer(infoLogLength);
glGetProgramInfoLog(program, infoLogLength, nullptr, &buffer[0]);
std::cout << "Program link failed:\n";
std::cout << buffer.data() << std::endl;
}
}
for (auto stage : IterateStages(GetStageMask())) {
const ShaderModule* module = ToBackend(builder->GetStageInfo(stage).module.Get());
FillPushConstants(module, &glPushConstants[stage], program);
}
glUseProgram(program);
// The uniforms are part of the program state so we can pre-bind buffer units, texture units etc.
const auto& layout = ToBackend(GetLayout());
const auto& indices = layout->GetBindingIndexInfo();
for (uint32_t group = 0; group < kMaxBindGroups; ++group) {
const auto& groupInfo = layout->GetBindGroupLayout(group)->GetBindingInfo();
for (uint32_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
if (!groupInfo.mask[binding]) {
continue;
}
std::string name = GetBindingName(group, binding);
switch (groupInfo.types[binding]) {
case nxt::BindingType::UniformBuffer:
{
GLint location = glGetUniformBlockIndex(program, name.c_str());
glUniformBlockBinding(program, location, indices[group][binding]);
}
break;
case nxt::BindingType::StorageBuffer:
{
GLuint location = glGetProgramResourceIndex(program, GL_SHADER_STORAGE_BLOCK, name.c_str());
glShaderStorageBlockBinding(program, location, indices[group][binding]);
}
break;
case nxt::BindingType::Sampler:
case nxt::BindingType::SampledTexture:
// These binding types are handled in the separate sampler and texture emulation
break;
}
}
}
// Compute links between stages for combined samplers, then bind them to texture units
{
std::set<CombinedSampler> combinedSamplersSet;
for (auto stage : IterateStages(GetStageMask())) {
const auto& module = ToBackend(builder->GetStageInfo(stage).module);
for (const auto& combined : module->GetCombinedSamplerInfo()) {
combinedSamplersSet.insert(combined);
}
}
unitsForSamplers.resize(layout->GetNumSamplers());
unitsForTextures.resize(layout->GetNumSampledTextures());
GLuint textureUnit = layout->GetTextureUnitsUsed();
for (const auto& combined : combinedSamplersSet) {
std::string name = combined.GetName();
GLint location = glGetUniformLocation(program, name.c_str());
glUniform1i(location, textureUnit);
GLuint samplerIndex = indices[combined.samplerLocation.group][combined.samplerLocation.binding];
unitsForSamplers[samplerIndex].push_back(textureUnit);
GLuint textureIndex = indices[combined.textureLocation.group][combined.textureLocation.binding];
unitsForTextures[textureIndex].push_back(textureUnit);
textureUnit ++;
}
}
}
const Pipeline::GLPushConstantInfo& Pipeline::GetGLPushConstants(nxt::ShaderStage stage) const {
return glPushConstants[stage];
}
const std::vector<GLuint>& Pipeline::GetTextureUnitsForSampler(GLuint index) const {
ASSERT(index >= 0 && index < unitsForSamplers.size());
return unitsForSamplers[index];
}
const std::vector<GLuint>& Pipeline::GetTextureUnitsForTexture(GLuint index) const {
ASSERT(index >= 0 && index < unitsForSamplers.size());
return unitsForTextures[index];
}
GLuint Pipeline::GetProgramHandle() const {
return program;
}
void Pipeline::ApplyNow() {
glUseProgram(program);
auto inputState = ToBackend(GetInputState());
glBindVertexArray(inputState->GetVAO());
}
}
}

View File

@@ -0,0 +1,55 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_PIPELINEGL_H_
#define BACKEND_OPENGL_PIPELINEGL_H_
#include "common/Pipeline.h"
#include "glad/glad.h"
#include <vector>
namespace backend {
namespace opengl {
class Device;
class ShaderModule;
class Pipeline : public PipelineBase {
public:
Pipeline(Device* device, PipelineBuilder* builder);
using GLPushConstantInfo = std::array<GLint, kMaxPushConstants>;
using BindingLocations = std::array<std::array<GLint, kMaxBindingsPerGroup>, kMaxBindGroups>;
const GLPushConstantInfo& GetGLPushConstants(nxt::ShaderStage stage) const;
const std::vector<GLuint>& GetTextureUnitsForSampler(GLuint index) const;
const std::vector<GLuint>& GetTextureUnitsForTexture(GLuint index) const;
GLuint GetProgramHandle() const;
void ApplyNow();
private:
GLuint program;
PerStage<GLPushConstantInfo> glPushConstants;
std::vector<std::vector<GLuint>> unitsForSamplers;
std::vector<std::vector<GLuint>> unitsForTextures;
Device* device;
};
}
}
#endif // BACKEND_OPENGL_PIPELINEGL_H_

View File

@@ -0,0 +1,80 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "PipelineLayoutGL.h"
#include "OpenGLBackend.h"
namespace backend {
namespace opengl {
PipelineLayout::PipelineLayout(Device* device, PipelineLayoutBuilder* builder)
: PipelineLayoutBase(builder), device(device) {
GLuint uboIndex = 0;
GLuint samplerIndex = 0;
GLuint sampledTextureIndex = 0;
GLuint ssboIndex = 0;
for (size_t group = 0; group < kMaxBindGroups; ++group) {
const auto& groupInfo = GetBindGroupLayout(group)->GetBindingInfo();
for (size_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
if (!groupInfo.mask[binding]) {
continue;
}
switch (groupInfo.types[binding]) {
case nxt::BindingType::UniformBuffer:
indexInfo[group][binding] = uboIndex;
uboIndex ++;
break;
case nxt::BindingType::Sampler:
indexInfo[group][binding] = samplerIndex;
samplerIndex ++;
break;
case nxt::BindingType::SampledTexture:
indexInfo[group][binding] = sampledTextureIndex;
sampledTextureIndex ++;
break;
case nxt::BindingType::StorageBuffer:
indexInfo[group][binding] = ssboIndex;
ssboIndex ++;
break;
}
}
}
numSamplers = samplerIndex;
numSampledTextures = sampledTextureIndex;
}
const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo() const {
return indexInfo;
}
GLuint PipelineLayout::GetTextureUnitsUsed() const {
return 0;
}
size_t PipelineLayout::GetNumSamplers() const {
return numSamplers;
}
size_t PipelineLayout::GetNumSampledTextures() const {
return numSampledTextures;
}
}
}

View File

@@ -0,0 +1,48 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_PIPELINELAYOUTGL_H_
#define BACKEND_OPENGL_PIPELINELAYOUTGL_H_
#include "common/PipelineLayout.h"
#include "glad/glad.h"
namespace backend {
namespace opengl {
class Device;
class PipelineLayout : public PipelineLayoutBase {
public:
PipelineLayout(Device* device, PipelineLayoutBuilder* builder);
using BindingIndexInfo = std::array<std::array<GLuint, kMaxBindingsPerGroup>, kMaxBindGroups>;
const BindingIndexInfo& GetBindingIndexInfo() const;
GLuint GetTextureUnitsUsed() const;
size_t GetNumSamplers() const;
size_t GetNumSampledTextures() const;
private:
Device* device;
BindingIndexInfo indexInfo;
size_t numSamplers;
size_t numSampledTextures;
};
}
}
#endif // BACKEND_OPENGL_PIPELINELAYOUTGL_H_

View File

@@ -0,0 +1,62 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "SamplerGL.h"
namespace backend {
namespace opengl {
namespace {
GLenum MagFilterMode(nxt::FilterMode filter) {
switch (filter) {
case nxt::FilterMode::Nearest:
return GL_NEAREST;
case nxt::FilterMode::Linear:
return GL_LINEAR;
}
}
GLenum MinFilterMode(nxt::FilterMode minFilter, nxt::FilterMode mipMapFilter) {
switch (minFilter) {
case nxt::FilterMode::Nearest:
switch (mipMapFilter) {
case nxt::FilterMode::Nearest:
return GL_NEAREST_MIPMAP_NEAREST;
case nxt::FilterMode::Linear:
return GL_NEAREST_MIPMAP_LINEAR;
}
case nxt::FilterMode::Linear:
switch (mipMapFilter) {
case nxt::FilterMode::Nearest:
return GL_LINEAR_MIPMAP_NEAREST;
case nxt::FilterMode::Linear:
return GL_LINEAR_MIPMAP_LINEAR;
}
}
}
}
Sampler::Sampler(Device* device, SamplerBuilder* builder)
: SamplerBase(builder), device(device) {
glGenSamplers(1, &handle);
glSamplerParameteri(handle, GL_TEXTURE_MAG_FILTER, MagFilterMode(builder->GetMagFilter()));
glSamplerParameteri(handle, GL_TEXTURE_MIN_FILTER, MinFilterMode(builder->GetMinFilter(), builder->GetMipMapFilter()));
}
GLuint Sampler::GetHandle() const {
return handle;
}
}
}

View File

@@ -0,0 +1,41 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_SAMPLERGL_H_
#define BACKEND_OPENGL_SAMPLERGL_H_
#include "common/Sampler.h"
#include "glad/glad.h"
namespace backend {
namespace opengl {
class Device;
class Sampler : public SamplerBase {
public:
Sampler(Device* device, SamplerBuilder* builder);
GLuint GetHandle() const;
private:
Device* device;
GLuint handle;
};
}
}
#endif // BACKEND_OPENGL_SAMPLERGL_H_

View File

@@ -0,0 +1,105 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ShaderModuleGL.h"
#include <spirv-cross/spirv_glsl.hpp>
#include <sstream>
namespace backend {
namespace opengl {
std::string GetBindingName(uint32_t group, uint32_t binding) {
std::ostringstream o;
o << "nxt_binding_" << group << "_" << binding;
return o.str();
}
bool operator < (const BindingLocation& a, const BindingLocation& b) {
return std::tie(a.group, a.binding) < std::tie(b.group, b.binding);
}
bool operator < (const CombinedSampler& a, const CombinedSampler& b) {
return std::tie(a.samplerLocation, a.textureLocation) < std::tie(b.samplerLocation, b.textureLocation);
}
std::string CombinedSampler::GetName() const {
std::ostringstream o;
o << "nxt_combined";
o << "_" << samplerLocation.group << "_" << samplerLocation.binding;
o << "_with_" << textureLocation.group << "_" << textureLocation.binding;
return o.str();
}
ShaderModule::ShaderModule(Device* device, ShaderModuleBuilder* builder)
: ShaderModuleBase(builder), device(device) {
spirv_cross::CompilerGLSL compiler(builder->AcquireSpirv());
spirv_cross::CompilerGLSL::Options options;
// TODO(cwallez@chromium.org): discover the backing context version and use that.
#if defined(__APPLE__)
options.version = 410;
#else
options.version = 450;
#endif
compiler.set_options(options);
ExtractSpirvInfo(compiler);
const auto& bindingInfo = GetBindingInfo();
// Extract bindings names so that it can be used to get its location in program.
// Now translate the separate sampler / textures into combined ones and store their info.
// We need to do this before removing the set and binding decorations.
compiler.build_combined_image_samplers();
for (const auto& combined : compiler.get_combined_image_samplers()) {
combinedInfo.emplace_back();
auto& info = combinedInfo.back();
info.samplerLocation.group = compiler.get_decoration(combined.sampler_id, spv::DecorationDescriptorSet);
info.samplerLocation.binding = compiler.get_decoration(combined.sampler_id, spv::DecorationBinding);
info.textureLocation.group = compiler.get_decoration(combined.image_id, spv::DecorationDescriptorSet);
info.textureLocation.binding = compiler.get_decoration(combined.image_id, spv::DecorationBinding);
compiler.set_name(combined.combined_id, info.GetName());
}
// Change binding names to be "nxt_binding_<group>_<binding>".
// Also unsets the SPIRV "Binding" decoration as it outputs "layout(binding=)" which
// isn't supported on OSX's OpenGL.
for (uint32_t group = 0; group < kMaxBindGroups; ++group) {
for (uint32_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
const auto& info = bindingInfo[group][binding];
if (info.used) {
compiler.set_name(info.base_type_id, GetBindingName(group, binding));
compiler.unset_decoration(info.id, spv::DecorationBinding);
compiler.unset_decoration(info.id, spv::DecorationDescriptorSet);
}
}
}
glslSource = compiler.compile();
}
const char* ShaderModule::GetSource() const {
return reinterpret_cast<const char*>(glslSource.data());
}
const ShaderModule::CombinedSamplerInfo& ShaderModule::GetCombinedSamplerInfo() const {
return combinedInfo;
}
}
}

View File

@@ -0,0 +1,60 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_SHADERMODULEGL_H_
#define BACKEND_OPENGL_SHADERMODULEGL_H_
#include "common/ShaderModule.h"
#include "glad/glad.h"
namespace backend {
namespace opengl {
class Device;
std::string GetBindingName(uint32_t group, uint32_t binding);
struct BindingLocation {
uint32_t group;
uint32_t binding;
};
bool operator < (const BindingLocation& a, const BindingLocation& b);
struct CombinedSampler {
BindingLocation samplerLocation;
BindingLocation textureLocation;
std::string GetName() const;
};
bool operator < (const CombinedSampler& a, const CombinedSampler& b);
class ShaderModule : public ShaderModuleBase {
public:
ShaderModule(Device* device, ShaderModuleBuilder* builder);
using CombinedSamplerInfo = std::vector<CombinedSampler>;
const char* GetSource() const;
const CombinedSamplerInfo& GetCombinedSamplerInfo() const;
private:
Device* device;
CombinedSamplerInfo combinedInfo;
std::string glslSource;
};
}
}
#endif // BACKEND_OPENGL_SHADERMODULEGL_H_

View File

@@ -0,0 +1,86 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "TextureGL.h"
#include <algorithm>
#include <vector>
namespace backend {
namespace opengl {
namespace {
GLenum TargetForDimension(nxt::TextureDimension dimension) {
switch (dimension) {
case nxt::TextureDimension::e2D:
return GL_TEXTURE_2D;
}
}
TextureFormatInfo GetGLFormatInfo(nxt::TextureFormat format) {
switch (format) {
case nxt::TextureFormat::R8G8B8A8Unorm:
return {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE};
}
}
}
// Texture
Texture::Texture(Device* device, TextureBuilder* builder)
: TextureBase(builder), device(device) {
target = TargetForDimension(GetDimension());
uint32_t width = GetWidth();
uint32_t height = GetHeight();
uint32_t levels = GetNumMipLevels();
auto formatInfo = GetGLFormatInfo(GetFormat());
glGenTextures(1, &handle);
glBindTexture(target, handle);
for (uint32_t i = 0; i < levels; ++i) {
glTexImage2D(target, i, formatInfo.internalFormat, width, height, 0, formatInfo.format, formatInfo.type, nullptr);
width = std::max(uint32_t(1), width / 2);
height = std::max(uint32_t(1), height / 2);
}
// The texture is not complete if it uses mipmapping and not all levels up to
// MAX_LEVEL have been defined.
glTexParameteri(target, GL_TEXTURE_MAX_LEVEL, levels - 1);
}
GLuint Texture::GetHandle() const {
return handle;
}
GLenum Texture::GetGLTarget() const {
return target;
}
TextureFormatInfo Texture::GetGLFormat() const {
return GetGLFormatInfo(GetFormat());
}
// TextureView
TextureView::TextureView(Device* device, TextureViewBuilder* builder)
: TextureViewBase(builder), device(device) {
}
}
}

View File

@@ -0,0 +1,59 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_TEXTUREGL_H_
#define BACKEND_OPENGL_TEXTUREGL_H_
#include "common/Texture.h"
#include "glad/glad.h"
namespace backend {
namespace opengl {
class Device;
struct TextureFormatInfo {
GLenum internalFormat;
GLenum format;
GLenum type;
};
class Texture : public TextureBase {
public:
Texture(Device* device, TextureBuilder* builder);
GLuint GetHandle() const;
GLenum GetGLTarget() const;
TextureFormatInfo GetGLFormat() const;
private:
Device* device;
GLuint handle;
GLenum target;
};
class TextureView : public TextureViewBase {
public:
TextureView(Device* device, TextureViewBuilder* builder);
private:
Device* device;
};
}
}
#endif // BACKEND_OPENGL_TEXTUREGL_H_

View File

@@ -0,0 +1,85 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "common/BitSetIterator.h"
// This is ANGLE's BitSetIterator_unittests.cpp file.
using namespace backend;
class BitSetIteratorTest : public testing::Test {
protected:
std::bitset<40> mStateBits;
};
// Simple iterator test.
TEST_F(BitSetIteratorTest, Iterator) {
std::set<unsigned long> originalValues;
originalValues.insert(2);
originalValues.insert(6);
originalValues.insert(8);
originalValues.insert(35);
for (unsigned long value : originalValues) {
mStateBits.set(value);
}
std::set<unsigned long> readValues;
for (unsigned long bit : IterateBitSet(mStateBits)) {
EXPECT_EQ(1u, originalValues.count(bit));
EXPECT_EQ(0u, readValues.count(bit));
readValues.insert(bit);
}
EXPECT_EQ(originalValues.size(), readValues.size());
}
// Test an empty iterator.
TEST_F(BitSetIteratorTest, EmptySet) {
// We don't use the FAIL gtest macro here since it returns immediately,
// causing an unreachable code warning in MSVS
bool sawBit = false;
for (unsigned long bit : IterateBitSet(mStateBits)) {
sawBit = true;
}
EXPECT_FALSE(sawBit);
}
// Test iterating a result of combining two bitsets.
TEST_F(BitSetIteratorTest, NonLValueBitset) {
std::bitset<40> otherBits;
mStateBits.set(1);
mStateBits.set(2);
mStateBits.set(3);
mStateBits.set(4);
otherBits.set(0);
otherBits.set(1);
otherBits.set(3);
otherBits.set(5);
std::set<unsigned long> seenBits;
for (unsigned long bit : IterateBitSet(mStateBits & otherBits)) {
EXPECT_EQ(0u, seenBits.count(bit));
seenBits.insert(bit);
EXPECT_TRUE(mStateBits[bit]);
EXPECT_TRUE(otherBits[bit]);
}
EXPECT_EQ((mStateBits & otherBits).count(), seenBits.size());
}

View File

@@ -0,0 +1,361 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "common/CommandAllocator.h"
using namespace backend;
// Definition of the command types used in the tests
enum class CommandType {
Draw,
Pipeline,
PushConstants,
Big,
Small,
};
struct CommandDraw {
uint32_t first;
uint32_t count;
};
struct CommandPipeline {
uint64_t pipeline;
uint32_t attachmentPoint;
};
struct CommandPushConstants {
uint8_t size;
uint8_t offset;
};
constexpr int kBigBufferSize = 65536;
struct CommandBig {
uint32_t buffer[kBigBufferSize];
};
struct CommandSmall {
uint16_t data;
};
// Test allocating nothing works
TEST(CommandAllocator, DoNothingAllocator) {
CommandAllocator allocator;
}
// Test iterating over nothing works
TEST(CommandAllocator, DoNothingAllocatorWithIterator) {
CommandAllocator allocator;
CommandIterator iterator(std::move(allocator));
iterator.DataWasDestroyed();
}
// Test basic usage of allocator + iterator
TEST(CommandAllocator, Basic) {
CommandAllocator allocator;
uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
uint32_t myAttachmentPoint = 2;
uint32_t myFirst = 42;
uint32_t myCount = 16;
{
CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
pipeline->pipeline = myPipeline;
pipeline->attachmentPoint = myAttachmentPoint;
CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
draw->first = myFirst;
draw->count = myCount;
}
{
CommandIterator iterator(std::move(allocator));
CommandType type;
bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Pipeline);
CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
ASSERT_EQ(pipeline->pipeline, myPipeline);
ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw);
CommandDraw* draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, myFirst);
ASSERT_EQ(draw->count, myCount);
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.DataWasDestroyed();
}
}
// Test basic usage of allocator + iterator with data
TEST(CommandAllocator, BasicWithData) {
CommandAllocator allocator;
uint8_t mySize = 8;
uint8_t myOffset = 3;
uint32_t myValues[5] = {6, 42, 0xFFFFFFFF, 0, 54};
{
CommandPushConstants* pushConstants = allocator.Allocate<CommandPushConstants>(CommandType::PushConstants);
pushConstants->size = mySize;
pushConstants->offset = myOffset;
uint32_t* values = allocator.AllocateData<uint32_t>(5);
for (size_t i = 0; i < 5; i++) {
values[i] = myValues[i];
}
}
{
CommandIterator iterator(std::move(allocator));
CommandType type;
bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::PushConstants);
CommandPushConstants* pushConstants = iterator.NextCommand<CommandPushConstants>();
ASSERT_EQ(pushConstants->size, mySize);
ASSERT_EQ(pushConstants->offset, myOffset);
uint32_t* values = iterator.NextData<uint32_t>(5);
for (size_t i = 0; i < 5; i++) {
ASSERT_EQ(values[i], myValues[i]);
}
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.DataWasDestroyed();
}
}
// Test basic iterating several times
TEST(CommandAllocator, MultipleIterations) {
CommandAllocator allocator;
uint32_t myFirst = 42;
uint32_t myCount = 16;
CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
draw->first = myFirst;
draw->count = myCount;
{
CommandIterator iterator(std::move(allocator));
CommandType type;
// First iteration
bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw);
CommandDraw* draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, myFirst);
ASSERT_EQ(draw->count, myCount);
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
// Second iteration
hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw);
draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, myFirst);
ASSERT_EQ(draw->count, myCount);
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.DataWasDestroyed();
}
}
// Test large commands work
TEST(CommandAllocator, LargeCommands) {
CommandAllocator allocator;
const int kCommandCount = 5;
int count = 0;
for (int i = 0; i < kCommandCount; i++) {
CommandBig* big = allocator.Allocate<CommandBig>(CommandType::Big);
for (int j = 0; j < kBigBufferSize; j++) {
big->buffer[j] = count ++;
}
}
CommandIterator iterator(std::move(allocator));
CommandType type;
count = 0;
int numCommands = 0;
while (iterator.NextCommandId(&type)) {
ASSERT_EQ(type, CommandType::Big);
CommandBig* big = iterator.NextCommand<CommandBig>();
for (int i = 0; i < kBigBufferSize; i++) {
ASSERT_EQ(big->buffer[i], count);
count ++;
}
numCommands ++;
}
ASSERT_EQ(numCommands, kCommandCount);
iterator.DataWasDestroyed();
}
// Test many small commands work
TEST(CommandAllocator, ManySmallCommands) {
CommandAllocator allocator;
// Stay under max representable uint16_t
const int kCommandCount = 50000;
int count = 0;
for (int i = 0; i < kCommandCount; i++) {
CommandSmall* small = allocator.Allocate<CommandSmall>(CommandType::Small);
small->data = count ++;
}
CommandIterator iterator(std::move(allocator));
CommandType type;
count = 0;
int numCommands = 0;
while (iterator.NextCommandId(&type)) {
ASSERT_EQ(type, CommandType::Small);
CommandSmall* small = iterator.NextCommand<CommandSmall>();
ASSERT_EQ(small->data, count);
count ++;
numCommands ++;
}
ASSERT_EQ(numCommands, kCommandCount);
iterator.DataWasDestroyed();
}
// ________
// / \
// | POUIC! |
// \_ ______/
// v
// ()_()
// (O.o)
// (> <)o
// Test usage of iterator.Reset
TEST(CommandAllocator, IteratorReset) {
CommandAllocator allocator;
uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
uint32_t myAttachmentPoint = 2;
uint32_t myFirst = 42;
uint32_t myCount = 16;
{
CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
pipeline->pipeline = myPipeline;
pipeline->attachmentPoint = myAttachmentPoint;
CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
draw->first = myFirst;
draw->count = myCount;
}
{
CommandIterator iterator(std::move(allocator));
CommandType type;
bool hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Pipeline);
CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
ASSERT_EQ(pipeline->pipeline, myPipeline);
ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
iterator.Reset();
hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Pipeline);
pipeline = iterator.NextCommand<CommandPipeline>();
ASSERT_EQ(pipeline->pipeline, myPipeline);
ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
hasNext = iterator.NextCommandId(&type);
ASSERT_TRUE(hasNext);
ASSERT_EQ(type, CommandType::Draw);
CommandDraw* draw = iterator.NextCommand<CommandDraw>();
ASSERT_EQ(draw->first, myFirst);
ASSERT_EQ(draw->count, myCount);
hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.DataWasDestroyed();
}
}
// Test iterating empty iterators
TEST(CommandAllocator, EmptyIterator) {
{
CommandAllocator allocator;
CommandIterator iterator(std::move(allocator));
CommandType type;
bool hasNext = iterator.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator.DataWasDestroyed();
}
{
CommandAllocator allocator;
CommandIterator iterator1(std::move(allocator));
CommandIterator iterator2(std::move(iterator1));
CommandType type;
bool hasNext = iterator2.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator1.DataWasDestroyed();
iterator2.DataWasDestroyed();
}
{
CommandIterator iterator1;
CommandIterator iterator2(std::move(iterator1));
CommandType type;
bool hasNext = iterator2.NextCommandId(&type);
ASSERT_FALSE(hasNext);
iterator1.DataWasDestroyed();
iterator2.DataWasDestroyed();
}
}

View File

@@ -0,0 +1,85 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "common/Math.h"
using namespace backend;
// Tests for ScanForward
TEST(Math, ScanForward) {
// Test extrema
ASSERT_EQ(ScanForward(1), 0);
ASSERT_EQ(ScanForward(0x8000000000000000), 63);
// Test with more than one bit set.
ASSERT_EQ(ScanForward(256), 8);
ASSERT_EQ(ScanForward(256 + 32), 5);
ASSERT_EQ(ScanForward(1024 + 256 + 32), 5);
}
// Tests for Log2
TEST(Math, Log2) {
// Test extrema
ASSERT_EQ(Log2(1), 0);
ASSERT_EQ(Log2(0xFFFFFFFF), 31);
// Test boundary between two logs
ASSERT_EQ(Log2(0x80000000), 31);
ASSERT_EQ(Log2(0x7FFFFFFF), 30);
ASSERT_EQ(Log2(16), 4);
ASSERT_EQ(Log2(15), 3);
}
// Tests for IsPowerOfTwo
TEST(Math, IsPowerOfTwo) {
ASSERT_TRUE(IsPowerOfTwo(1));
ASSERT_TRUE(IsPowerOfTwo(2));
ASSERT_FALSE(IsPowerOfTwo(3));
ASSERT_TRUE(IsPowerOfTwo(0x8000000));
ASSERT_FALSE(IsPowerOfTwo(0x8000400));
}
// Tests for Align
TEST(Math, Align) {
constexpr size_t kTestAlignment = 8;
char buffer[kTestAlignment * 4];
for (size_t i = 0; i < 2 * kTestAlignment; ++i) {
char* unaligned = &buffer[i];
char* aligned = Align(unaligned, kTestAlignment);
ASSERT_GE(aligned - unaligned, 0);
ASSERT_LT(aligned - unaligned, kTestAlignment);
ASSERT_EQ(reinterpret_cast<intptr_t>(aligned) & (kTestAlignment -1), 0);
}
}
// Tests for IsAligned
TEST(Math, IsAligned) {
constexpr size_t kTestAlignment = 8;
char buffer[kTestAlignment * 4];
for (size_t i = 0; i < 2 * kTestAlignment; ++i) {
char* unaligned = &buffer[i];
char* aligned = Align(unaligned, kTestAlignment);
ASSERT_EQ(IsAligned(unaligned, kTestAlignment), unaligned == aligned);
}
}

View File

@@ -0,0 +1,89 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "common/PerStage.h"
using namespace backend;
// Tests for StageBit
TEST(PerStage, StageBit) {
ASSERT_EQ(StageBit(nxt::ShaderStage::Vertex), nxt::ShaderStageBit::Vertex);
ASSERT_EQ(StageBit(nxt::ShaderStage::Fragment), nxt::ShaderStageBit::Fragment);
ASSERT_EQ(StageBit(nxt::ShaderStage::Compute), nxt::ShaderStageBit::Compute);
}
// Basic test for the PerStage container
TEST(PerStage, PerStage) {
PerStage<int> data;
// Store data using nxt::ShaderStage
data[nxt::ShaderStage::Vertex] = 42;
data[nxt::ShaderStage::Fragment] = 3;
data[nxt::ShaderStage::Compute] = -1;
// Load it using nxt::ShaderStageBit
ASSERT_EQ(data[nxt::ShaderStageBit::Vertex], 42);
ASSERT_EQ(data[nxt::ShaderStageBit::Fragment], 3);
ASSERT_EQ(data[nxt::ShaderStageBit::Compute], -1);
}
// Test IterateStages with kAllStages
TEST(PerStage, IterateAllStages) {
PerStage<int> counts;
counts[nxt::ShaderStage::Vertex] = 0;
counts[nxt::ShaderStage::Fragment] = 0;
counts[nxt::ShaderStage::Compute] = 0;
for (auto stage : IterateStages(kAllStages)) {
counts[stage] ++;
}
ASSERT_EQ(counts[nxt::ShaderStageBit::Vertex], 1);
ASSERT_EQ(counts[nxt::ShaderStageBit::Fragment], 1);
ASSERT_EQ(counts[nxt::ShaderStageBit::Compute], 1);
}
// Test IterateStages with one stage
TEST(PerStage, IterateOneStage) {
PerStage<int> counts;
counts[nxt::ShaderStage::Vertex] = 0;
counts[nxt::ShaderStage::Fragment] = 0;
counts[nxt::ShaderStage::Compute] = 0;
for (auto stage : IterateStages(nxt::ShaderStageBit::Fragment)) {
counts[stage] ++;
}
ASSERT_EQ(counts[nxt::ShaderStageBit::Vertex], 0);
ASSERT_EQ(counts[nxt::ShaderStageBit::Fragment], 1);
ASSERT_EQ(counts[nxt::ShaderStageBit::Compute], 0);
}
// Test IterateStages with no stage
TEST(PerStage, IterateNoStages) {
PerStage<int> counts;
counts[nxt::ShaderStage::Vertex] = 0;
counts[nxt::ShaderStage::Fragment] = 0;
counts[nxt::ShaderStage::Compute] = 0;
for (auto stage : IterateStages(nxt::ShaderStageBit::Fragment & nxt::ShaderStageBit::Vertex)) {
counts[stage] ++;
}
ASSERT_EQ(counts[nxt::ShaderStageBit::Vertex], 0);
ASSERT_EQ(counts[nxt::ShaderStageBit::Fragment], 0);
ASSERT_EQ(counts[nxt::ShaderStageBit::Compute], 0);
}

View File

@@ -0,0 +1,178 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "common/RefCounted.h"
using namespace backend;
struct RCTest : public RefCounted {
RCTest() {
}
RCTest(bool* deleted): deleted(deleted) {
}
~RCTest() override {
if (deleted != nullptr) {
*deleted = true;
}
}
RCTest* GetThis() {
return this;
}
bool* deleted = nullptr;
};
// Test that RCs start with one external ref, and removing it destroys the object.
TEST(RefCounted, StartsWithOneExternalRef) {
bool deleted = false;
auto test = new RCTest(&deleted);
test->Release();
ASSERT_TRUE(deleted);
}
// Test internal refs keep the RC alive.
TEST(RefCounted, InternalRefKeepsAlive) {
bool deleted = false;
auto test = new RCTest(&deleted);
test->ReferenceInternal();
test->Release();
ASSERT_FALSE(deleted);
test->ReleaseInternal();
ASSERT_TRUE(deleted);
}
// Test Ref remove internal reference when going out of scope
TEST(Ref, EndOfScopeRemovesInternalRef) {
bool deleted = false;
{
Ref<RCTest> test(new RCTest(&deleted));
test->Release();
}
ASSERT_TRUE(deleted);
}
// Test getting pointer out of the Ref
TEST(Ref, Gets) {
RCTest* original = new RCTest;
Ref<RCTest> test(original);
test->Release();
ASSERT_EQ(test.Get(), original);
ASSERT_EQ(&*test, original);
ASSERT_EQ(test->GetThis(), original);
}
// Test Refs default to null
TEST(Ref, DefaultsToNull) {
Ref<RCTest> test;
ASSERT_EQ(test.Get(), nullptr);
ASSERT_EQ(&*test, nullptr);
ASSERT_EQ(test->GetThis(), nullptr);
}
// Test Refs can be used inside ifs
TEST(Ref, BoolConversion) {
Ref<RCTest> empty;
Ref<RCTest> full(new RCTest);
full->Release();
if (!full || empty) {
ASSERT_TRUE(false);
}
}
// Test Ref's copy constructor
TEST(Ref, CopyConstructor) {
bool deleted;
RCTest* original = new RCTest(&deleted);
Ref<RCTest> source(original);
Ref<RCTest> destination(source);
original->Release();
ASSERT_EQ(source.Get(), original);
ASSERT_EQ(destination.Get(), original);
source = nullptr;
ASSERT_FALSE(deleted);
destination = nullptr;
ASSERT_TRUE(deleted);
}
// Test Ref's copy assignment
TEST(Ref, CopyAssignment) {
bool deleted;
RCTest* original = new RCTest(&deleted);
Ref<RCTest> source(original);
original->Release();
Ref<RCTest> destination;
destination = source;
ASSERT_EQ(source.Get(), original);
ASSERT_EQ(destination.Get(), original);
source = nullptr;
// This fails when address sanitizer is turned on
ASSERT_FALSE(deleted);
destination = nullptr;
ASSERT_TRUE(deleted);
}
// Test Ref's move constructor
TEST(Ref, MoveConstructor) {
bool deleted;
RCTest* original = new RCTest(&deleted);
Ref<RCTest> source(original);
Ref<RCTest> destination(std::move(source));
original->Release();
ASSERT_EQ(source.Get(), nullptr);
ASSERT_EQ(destination.Get(), original);
ASSERT_FALSE(deleted);
destination = nullptr;
ASSERT_TRUE(deleted);
}
// Test Ref's move assignment
TEST(Ref, MoveAssignment) {
bool deleted;
RCTest* original = new RCTest(&deleted);
Ref<RCTest> source(original);
original->Release();
Ref<RCTest> destination;
destination = std::move(source);
ASSERT_EQ(source.Get(), nullptr);
ASSERT_EQ(destination.Get(), original);
ASSERT_FALSE(deleted);
destination = nullptr;
ASSERT_TRUE(deleted);
}

View File

@@ -0,0 +1,89 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "common/RefCounted.h"
#include "common/ToBackend.h"
#include <type_traits>
// Make our own Base - Backend object pair, reusing the CommandBuffer name
namespace backend {
class CommandBufferBase : public RefCounted {
};
}
using namespace backend;
class MyCommandBuffer : public CommandBufferBase {
};
struct MyBackendTraits {
using CommandBufferType = MyCommandBuffer;
};
// Instanciate ToBackend for our "backend"
template<typename T>
auto ToBackend(T&& common) -> decltype(ToBackendBase<MyBackendTraits>(common)) {
return ToBackendBase<MyBackendTraits>(common);
}
// Test that ToBackend correctly converts pointers to base classes.
TEST(ToBackend, Pointers) {
{
MyCommandBuffer* cmdBuf = new MyCommandBuffer;
const CommandBufferBase* base = cmdBuf;
auto backendCmdBuf = ToBackend(base);
static_assert(std::is_same<decltype(backendCmdBuf), const MyCommandBuffer*>::value, "");
ASSERT_EQ(cmdBuf, backendCmdBuf);
cmdBuf->Release();
}
{
MyCommandBuffer* cmdBuf = new MyCommandBuffer;
CommandBufferBase* base = cmdBuf;
auto backendCmdBuf = ToBackend(base);
static_assert(std::is_same<decltype(backendCmdBuf), MyCommandBuffer*>::value, "");
ASSERT_EQ(cmdBuf, backendCmdBuf);
cmdBuf->Release();
}
}
// Test that ToBackend correctly converts Refs to base classes.
TEST(ToBackend, Ref) {
{
MyCommandBuffer* cmdBuf = new MyCommandBuffer;
const Ref<CommandBufferBase> base(cmdBuf);
const auto& backendCmdBuf = ToBackend(base);
static_assert(std::is_same<decltype(ToBackend(base)), const Ref<MyCommandBuffer>&>::value, "");
ASSERT_EQ(cmdBuf, backendCmdBuf.Get());
cmdBuf->Release();
}
{
MyCommandBuffer* cmdBuf = new MyCommandBuffer;
Ref<CommandBufferBase> base(cmdBuf);
auto backendCmdBuf = ToBackend(base);
static_assert(std::is_same<decltype(ToBackend(base)), Ref<MyCommandBuffer>&>::value, "");
ASSERT_EQ(cmdBuf, backendCmdBuf.Get());
cmdBuf->Release();
}
}

View File

@@ -0,0 +1,20 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}