Consistent formatting for Dawn/Tint.
This CL updates the clang format files to have a single shared format between Dawn and Tint. The major changes are tabs are 4 spaces, lines are 100 columns and namespaces are not indented. Bug: dawn:1339 Change-Id: I4208742c95643998d9fd14e77a9cc558071ded39 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87603 Commit-Queue: Dan Sinclair <dsinclair@chromium.org> Reviewed-by: Corentin Wallez <cwallez@chromium.org> Kokoro: Kokoro <noreply+kokoro@google.com>
This commit is contained in:
parent
73b1d1dafa
commit
41e4d9a34c
|
@ -1,8 +1,5 @@
|
|||
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
|
||||
BasedOnStyle: Chromium
|
||||
Standard: Cpp11
|
||||
|
||||
AllowShortFunctionsOnASingleLine: false
|
||||
|
||||
ColumnLimit: 100
|
||||
|
||||
|
@ -11,10 +8,3 @@ IndentWidth: 4
|
|||
ObjCBlockIndentWidth: 4
|
||||
AccessModifierOffset: -2
|
||||
|
||||
CompactNamespaces: true
|
||||
|
||||
# This should result in only one indentation level with compacted namespaces
|
||||
NamespaceIndentation: All
|
||||
|
||||
# Use this option once clang-format 6 is out.
|
||||
IndentPPDirectives: AfterHash
|
||||
|
|
|
@ -121,7 +121,7 @@ def _NonInclusiveFileFilter(file):
|
|||
"third_party/khronos/KHR/khrplatform.h", # Third party file
|
||||
"tools/roll-all", # Branch name
|
||||
"tools/src/container/key.go", # External URL
|
||||
"tools/src/go.sum", # External URL
|
||||
"go.sum", # External URL
|
||||
]
|
||||
return file.LocalPath() not in filter_list
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
filter=-runtime/indentation_namespace
|
|
@ -45,9 +45,7 @@ namespace dawn {
|
|||
struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> {
|
||||
static constexpr bool enable = true;
|
||||
using type = T;
|
||||
constexpr static T Lower(T t) {
|
||||
return t;
|
||||
}
|
||||
constexpr static T Lower(T t) { return t; }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
@ -55,14 +53,9 @@ namespace dawn {
|
|||
using Integral = typename std::underlying_type<T>::type;
|
||||
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr BoolConvertible(Integral value) : value(value) {
|
||||
}
|
||||
constexpr operator bool() const {
|
||||
return value != 0;
|
||||
}
|
||||
constexpr operator T() const {
|
||||
return static_cast<T>(value);
|
||||
}
|
||||
constexpr BoolConvertible(Integral value) : value(value) {}
|
||||
constexpr operator bool() const { return value != 0; }
|
||||
constexpr operator T() const { return static_cast<T>(value); }
|
||||
|
||||
Integral value;
|
||||
};
|
||||
|
@ -71,15 +64,13 @@ namespace dawn {
|
|||
struct LowerBitmask<BoolConvertible<T>> {
|
||||
static constexpr bool enable = true;
|
||||
using type = T;
|
||||
static constexpr type Lower(BoolConvertible<T> t) {
|
||||
return t;
|
||||
}
|
||||
static constexpr type Lower(BoolConvertible<T> t) { return t; }
|
||||
};
|
||||
|
||||
template <typename T1,
|
||||
template <
|
||||
typename T1,
|
||||
typename T2,
|
||||
typename = typename std::enable_if<LowerBitmask<T1>::enable &&
|
||||
LowerBitmask<T2>::enable>::type>
|
||||
typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
|
||||
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
|
||||
using T = typename LowerBitmask<T1>::type;
|
||||
using Integral = typename std::underlying_type<T>::type;
|
||||
|
@ -87,10 +78,10 @@ namespace dawn {
|
|||
static_cast<Integral>(LowerBitmask<T2>::Lower(right));
|
||||
}
|
||||
|
||||
template <typename T1,
|
||||
template <
|
||||
typename T1,
|
||||
typename T2,
|
||||
typename = typename std::enable_if<LowerBitmask<T1>::enable &&
|
||||
LowerBitmask<T2>::enable>::type>
|
||||
typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
|
||||
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
|
||||
using T = typename LowerBitmask<T1>::type;
|
||||
using Integral = typename std::underlying_type<T>::type;
|
||||
|
@ -98,10 +89,10 @@ namespace dawn {
|
|||
static_cast<Integral>(LowerBitmask<T2>::Lower(right));
|
||||
}
|
||||
|
||||
template <typename T1,
|
||||
template <
|
||||
typename T1,
|
||||
typename T2,
|
||||
typename = typename std::enable_if<LowerBitmask<T1>::enable &&
|
||||
LowerBitmask<T2>::enable>::type>
|
||||
typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
|
||||
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
|
||||
using T = typename LowerBitmask<T1>::type;
|
||||
using Integral = typename std::underlying_type<T>::type;
|
||||
|
@ -116,30 +107,30 @@ namespace dawn {
|
|||
return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t));
|
||||
}
|
||||
|
||||
template <typename T,
|
||||
template <
|
||||
typename T,
|
||||
typename T2,
|
||||
typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
|
||||
LowerBitmask<T2>::enable>::type>
|
||||
typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
|
||||
constexpr T& operator&=(T& l, T2 right) {
|
||||
T r = LowerBitmask<T2>::Lower(right);
|
||||
l = l & r;
|
||||
return l;
|
||||
}
|
||||
|
||||
template <typename T,
|
||||
template <
|
||||
typename T,
|
||||
typename T2,
|
||||
typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
|
||||
LowerBitmask<T2>::enable>::type>
|
||||
typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
|
||||
constexpr T& operator|=(T& l, T2 right) {
|
||||
T r = LowerBitmask<T2>::Lower(right);
|
||||
l = l | r;
|
||||
return l;
|
||||
}
|
||||
|
||||
template <typename T,
|
||||
template <
|
||||
typename T,
|
||||
typename T2,
|
||||
typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
|
||||
LowerBitmask<T2>::enable>::type>
|
||||
typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
|
||||
constexpr T& operator^=(T& l, T2 right) {
|
||||
T r = LowerBitmask<T2>::Lower(right);
|
||||
l = l ^ r;
|
||||
|
|
|
@ -48,8 +48,8 @@ namespace dawn::native::metal {
|
|||
uint32_t plane;
|
||||
};
|
||||
|
||||
DAWN_NATIVE_EXPORT WGPUTexture
|
||||
WrapIOSurface(WGPUDevice device, const ExternalImageDescriptorIOSurface* descriptor);
|
||||
DAWN_NATIVE_EXPORT WGPUTexture WrapIOSurface(WGPUDevice device,
|
||||
const ExternalImageDescriptorIOSurface* descriptor);
|
||||
|
||||
// When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't
|
||||
// mean that the operations will be visible to other APIs/Metal devices right away. macOS
|
||||
|
|
|
@ -35,8 +35,9 @@ namespace dawn::native::opengl {
|
|||
};
|
||||
|
||||
using PresentCallback = void (*)(void*);
|
||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation
|
||||
CreateNativeSwapChainImpl(WGPUDevice device, PresentCallback present, void* presentUserdata);
|
||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
|
||||
PresentCallback present,
|
||||
void* presentUserdata);
|
||||
DAWN_NATIVE_EXPORT WGPUTextureFormat
|
||||
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
|
||||
|
||||
|
|
|
@ -28,8 +28,8 @@ namespace dawn::native::vulkan {
|
|||
|
||||
DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
|
||||
|
||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation
|
||||
CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surface);
|
||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
|
||||
::VkSurfaceKHR surface);
|
||||
DAWN_NATIVE_EXPORT WGPUTextureFormat
|
||||
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
|
||||
|
||||
|
|
|
@ -101,8 +101,7 @@ namespace dawn::platform {
|
|||
// The |fingerprint| is provided by Dawn to inform the client to discard the Dawn caches
|
||||
// when the fingerprint changes. The returned CachingInterface is expected to outlive the
|
||||
// device which uses it to persistently cache objects.
|
||||
virtual CachingInterface* GetCachingInterface(const void* fingerprint,
|
||||
size_t fingerprintSize);
|
||||
virtual CachingInterface* GetCachingInterface(const void* fingerprint, size_t fingerprintSize);
|
||||
virtual std::unique_ptr<WorkerTaskPool> CreateWorkerTaskPool();
|
||||
|
||||
private:
|
||||
|
|
|
@ -53,8 +53,7 @@ namespace dawn::wire {
|
|||
DAWN_WIRE_EXPORT size_t
|
||||
SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties);
|
||||
|
||||
DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties(
|
||||
const WGPUDeviceProperties* deviceProperties,
|
||||
DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties(const WGPUDeviceProperties* deviceProperties,
|
||||
char* serializeBuffer);
|
||||
|
||||
DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
|
||||
|
|
|
@ -160,9 +160,7 @@ namespace dawn::wire {
|
|||
// the subrange (offset, offset + size) of the allocation at buffer unmap
|
||||
// This subrange is always the whole mapped region for now
|
||||
// There could be nothing to be serialized (if using shared memory)
|
||||
virtual void SerializeDataUpdate(void* serializePointer,
|
||||
size_t offset,
|
||||
size_t size) = 0;
|
||||
virtual void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) = 0;
|
||||
|
||||
private:
|
||||
WriteHandle(const WriteHandle&) = delete;
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
|
||||
BasedOnStyle: Chromium
|
|
@ -1 +0,0 @@
|
|||
filter=-runtime/indentation_namespace
|
|
@ -62,24 +62,18 @@ class BitSetIterator final {
|
|||
uint32_t mOffset;
|
||||
};
|
||||
|
||||
Iterator begin() const {
|
||||
return Iterator(mBits);
|
||||
}
|
||||
Iterator end() const {
|
||||
return Iterator(std::bitset<N>(0));
|
||||
}
|
||||
Iterator begin() const { return Iterator(mBits); }
|
||||
Iterator end() const { return Iterator(std::bitset<N>(0)); }
|
||||
|
||||
private:
|
||||
const std::bitset<N> mBits;
|
||||
};
|
||||
|
||||
template <size_t N, typename T>
|
||||
BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {
|
||||
}
|
||||
BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {}
|
||||
|
||||
template <size_t N, typename T>
|
||||
BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {
|
||||
}
|
||||
BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {}
|
||||
|
||||
template <size_t N, typename T>
|
||||
BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) {
|
||||
|
|
|
@ -22,12 +22,8 @@
|
|||
template <typename T>
|
||||
struct CoreFoundationRefTraits {
|
||||
static constexpr T kNullValue = nullptr;
|
||||
static void Reference(T value) {
|
||||
CFRetain(value);
|
||||
}
|
||||
static void Release(T value) {
|
||||
CFRelease(value);
|
||||
}
|
||||
static void Reference(T value) { CFRetain(value); }
|
||||
static void Release(T value) { CFRelease(value); }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -25,17 +25,17 @@ namespace gpu_info {
|
|||
// Referenced from the following Mesa source code:
|
||||
// https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
|
||||
// gen9
|
||||
const std::array<uint32_t, 25> Skylake = {
|
||||
{0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913, 0x1915, 0x1916,
|
||||
0x1917, 0x191A, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, 0x1926, 0x1927,
|
||||
0x192A, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D}};
|
||||
const std::array<uint32_t, 25> Skylake = {{0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913,
|
||||
0x1915, 0x1916, 0x1917, 0x191A, 0x191B, 0x191D, 0x191E,
|
||||
0x1921, 0x1923, 0x1926, 0x1927, 0x192A, 0x192B, 0x192D,
|
||||
0x1932, 0x193A, 0x193B, 0x193D}};
|
||||
// gen9p5
|
||||
const std::array<uint32_t, 20> Kabylake = {
|
||||
{0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E, 0x591E, 0x5912, 0x5917,
|
||||
0x5902, 0x591B, 0x593B, 0x590B, 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
|
||||
const std::array<uint32_t, 17> Coffeelake = {
|
||||
{0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B,
|
||||
0x3E94, 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
|
||||
const std::array<uint32_t, 20> Kabylake = {{0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E,
|
||||
0x591E, 0x5912, 0x5917, 0x5902, 0x591B, 0x593B, 0x590B,
|
||||
0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
|
||||
const std::array<uint32_t, 17> Coffeelake = {{0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91,
|
||||
0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B, 0x3E94,
|
||||
0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
|
||||
const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
|
||||
const std::array<uint32_t, 21> Cometlake = {
|
||||
{0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
|
||||
|
|
|
@ -22,12 +22,8 @@
|
|||
template <typename T>
|
||||
struct IOKitRefTraits {
|
||||
static constexpr T kNullValue = IO_OBJECT_NULL;
|
||||
static void Reference(T value) {
|
||||
IOObjectRetain(value);
|
||||
}
|
||||
static void Release(T value) {
|
||||
IOObjectRelease(value);
|
||||
}
|
||||
static void Reference(T value) { IOObjectRetain(value); }
|
||||
static void Release(T value) { IOObjectRelease(value); }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -99,10 +99,8 @@ class LinkedList;
|
|||
template <typename T>
|
||||
class LinkNode {
|
||||
public:
|
||||
LinkNode() : previous_(nullptr), next_(nullptr) {
|
||||
}
|
||||
LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {
|
||||
}
|
||||
LinkNode() : previous_(nullptr), next_(nullptr) {}
|
||||
LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {}
|
||||
|
||||
LinkNode(LinkNode<T>&& rhs) {
|
||||
next_ = rhs.next_;
|
||||
|
@ -154,22 +152,14 @@ class LinkNode {
|
|||
return true;
|
||||
}
|
||||
|
||||
LinkNode<T>* previous() const {
|
||||
return previous_;
|
||||
}
|
||||
LinkNode<T>* previous() const { return previous_; }
|
||||
|
||||
LinkNode<T>* next() const {
|
||||
return next_;
|
||||
}
|
||||
LinkNode<T>* next() const { return next_; }
|
||||
|
||||
// Cast from the node-type to the value type.
|
||||
const T* value() const {
|
||||
return static_cast<const T*>(this);
|
||||
}
|
||||
const T* value() const { return static_cast<const T*>(this); }
|
||||
|
||||
T* value() {
|
||||
return static_cast<T*>(this);
|
||||
}
|
||||
T* value() { return static_cast<T*>(this); }
|
||||
|
||||
private:
|
||||
friend class LinkedList<T>;
|
||||
|
@ -183,8 +173,7 @@ class LinkedList {
|
|||
// The "root" node is self-referential, and forms the basis of a circular
|
||||
// list (root_.next() will point back to the start of the list,
|
||||
// and root_->previous() wraps around to the end of the list).
|
||||
LinkedList() : root_(&root_, &root_) {
|
||||
}
|
||||
LinkedList() : root_(&root_, &root_) {}
|
||||
|
||||
~LinkedList() {
|
||||
// If any LinkNodes still exist in the LinkedList, there will be outstanding references to
|
||||
|
@ -194,9 +183,7 @@ class LinkedList {
|
|||
}
|
||||
|
||||
// Appends |e| to the end of the linked list.
|
||||
void Append(LinkNode<T>* e) {
|
||||
e->InsertBefore(&root_);
|
||||
}
|
||||
void Append(LinkNode<T>* e) { e->InsertBefore(&root_); }
|
||||
|
||||
// Moves all elements (in order) of the list and appends them into |l| leaving the list empty.
|
||||
void MoveInto(LinkedList<T>* l) {
|
||||
|
@ -212,21 +199,13 @@ class LinkedList {
|
|||
root_.previous_ = &root_;
|
||||
}
|
||||
|
||||
LinkNode<T>* head() const {
|
||||
return root_.next();
|
||||
}
|
||||
LinkNode<T>* head() const { return root_.next(); }
|
||||
|
||||
LinkNode<T>* tail() const {
|
||||
return root_.previous();
|
||||
}
|
||||
LinkNode<T>* tail() const { return root_.previous(); }
|
||||
|
||||
const LinkNode<T>* end() const {
|
||||
return &root_;
|
||||
}
|
||||
const LinkNode<T>* end() const { return &root_; }
|
||||
|
||||
bool empty() const {
|
||||
return head() == end();
|
||||
}
|
||||
bool empty() const { return head() == end(); }
|
||||
|
||||
private:
|
||||
LinkNode<T> root_;
|
||||
|
@ -235,8 +214,7 @@ class LinkedList {
|
|||
template <typename T>
|
||||
class LinkedListIterator {
|
||||
public:
|
||||
explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {
|
||||
}
|
||||
explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {}
|
||||
|
||||
// We keep an early reference to the next node in the list so that even if the current element
|
||||
// is modified or removed from the list, we have a valid next node.
|
||||
|
@ -246,13 +224,9 @@ class LinkedListIterator {
|
|||
return *this;
|
||||
}
|
||||
|
||||
bool operator!=(const LinkedListIterator<T>& other) const {
|
||||
return current_ != other.current_;
|
||||
}
|
||||
bool operator!=(const LinkedListIterator<T>& other) const { return current_ != other.current_; }
|
||||
|
||||
LinkNode<T>* operator*() const {
|
||||
return current_;
|
||||
}
|
||||
LinkNode<T>* operator*() const { return current_; }
|
||||
|
||||
private:
|
||||
LinkNode<T>* current_;
|
||||
|
|
|
@ -64,8 +64,7 @@ namespace dawn {
|
|||
|
||||
} // anonymous namespace
|
||||
|
||||
LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {
|
||||
}
|
||||
LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {}
|
||||
|
||||
LogMessage::~LogMessage() {
|
||||
std::string fullMessage = mStream.str();
|
||||
|
|
|
@ -67,12 +67,8 @@
|
|||
template <typename T>
|
||||
struct NSRefTraits {
|
||||
static constexpr T kNullValue = nullptr;
|
||||
static void Reference(T value) {
|
||||
[value retain];
|
||||
}
|
||||
static void Release(T value) {
|
||||
[value release];
|
||||
}
|
||||
static void Reference(T value) { [value retain]; }
|
||||
static void Release(T value) { [value release]; }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
@ -80,13 +76,9 @@ class NSRef : public RefBase<T*, NSRefTraits<T*>> {
|
|||
public:
|
||||
using RefBase<T*, NSRefTraits<T*>>::RefBase;
|
||||
|
||||
const T* operator*() const {
|
||||
return this->Get();
|
||||
}
|
||||
const T* operator*() const { return this->Get(); }
|
||||
|
||||
T* operator*() {
|
||||
return this->Get();
|
||||
}
|
||||
T* operator*() { return this->Get(); }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
@ -104,13 +96,9 @@ class NSPRef : public RefBase<T, NSRefTraits<T>> {
|
|||
public:
|
||||
using RefBase<T, NSRefTraits<T>>::RefBase;
|
||||
|
||||
const T operator*() const {
|
||||
return this->Get();
|
||||
}
|
||||
const T operator*() const { return this->Get(); }
|
||||
|
||||
T operator*() {
|
||||
return this->Get();
|
||||
}
|
||||
T operator*() { return this->Get(); }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -36,17 +36,13 @@ template <typename T, typename Traits>
|
|||
class RefBase {
|
||||
public:
|
||||
// Default constructor and destructor.
|
||||
RefBase() : mValue(Traits::kNullValue) {
|
||||
}
|
||||
RefBase() : mValue(Traits::kNullValue) {}
|
||||
|
||||
~RefBase() {
|
||||
Release(mValue);
|
||||
}
|
||||
~RefBase() { Release(mValue); }
|
||||
|
||||
// Constructors from nullptr.
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr RefBase(std::nullptr_t) : RefBase() {
|
||||
}
|
||||
constexpr RefBase(std::nullptr_t) : RefBase() {}
|
||||
|
||||
RefBase<T, Traits>& operator=(std::nullptr_t) {
|
||||
Set(Traits::kNullValue);
|
||||
|
@ -55,9 +51,7 @@ class RefBase {
|
|||
|
||||
// Constructors from a value T.
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
RefBase(T value) : mValue(value) {
|
||||
Reference(value);
|
||||
}
|
||||
RefBase(T value) : mValue(value) { Reference(value); }
|
||||
|
||||
RefBase<T, Traits>& operator=(const T& value) {
|
||||
Set(value);
|
||||
|
@ -65,18 +59,14 @@ class RefBase {
|
|||
}
|
||||
|
||||
// Constructors from a RefBase<T>
|
||||
RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) {
|
||||
Reference(other.mValue);
|
||||
}
|
||||
RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) { Reference(other.mValue); }
|
||||
|
||||
RefBase<T, Traits>& operator=(const RefBase<T, Traits>& other) {
|
||||
Set(other.mValue);
|
||||
return *this;
|
||||
}
|
||||
|
||||
RefBase(RefBase<T, Traits>&& other) {
|
||||
mValue = other.Detach();
|
||||
}
|
||||
RefBase(RefBase<T, Traits>&& other) { mValue = other.Detach(); }
|
||||
|
||||
RefBase<T, Traits>& operator=(RefBase<T, Traits>&& other) {
|
||||
if (&other != this) {
|
||||
|
@ -113,28 +103,16 @@ class RefBase {
|
|||
}
|
||||
|
||||
// Comparison operators.
|
||||
bool operator==(const T& other) const {
|
||||
return mValue == other;
|
||||
}
|
||||
bool operator==(const T& other) const { return mValue == other; }
|
||||
|
||||
bool operator!=(const T& other) const {
|
||||
return mValue != other;
|
||||
}
|
||||
bool operator!=(const T& other) const { return mValue != other; }
|
||||
|
||||
const T operator->() const {
|
||||
return mValue;
|
||||
}
|
||||
T operator->() {
|
||||
return mValue;
|
||||
}
|
||||
const T operator->() const { return mValue; }
|
||||
T operator->() { return mValue; }
|
||||
|
||||
// Smart pointer methods.
|
||||
const T& Get() const {
|
||||
return mValue;
|
||||
}
|
||||
T& Get() {
|
||||
return mValue;
|
||||
}
|
||||
const T& Get() const { return mValue; }
|
||||
T& Get() { return mValue; }
|
||||
|
||||
[[nodiscard]] T Detach() {
|
||||
T value{std::move(mValue)};
|
||||
|
|
|
@ -45,12 +45,8 @@ class RefCounted {
|
|||
template <typename T>
|
||||
struct RefCountedTraits {
|
||||
static constexpr T* kNullValue = nullptr;
|
||||
static void Reference(T* value) {
|
||||
value->Reference();
|
||||
}
|
||||
static void Release(T* value) {
|
||||
value->Release();
|
||||
}
|
||||
static void Reference(T* value) { value->Reference(); }
|
||||
static void Release(T* value) { value->Release(); }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -237,16 +237,13 @@ class [[nodiscard]] Result {
|
|||
|
||||
// Implementation of Result<void, E>
|
||||
template <typename E>
|
||||
Result<void, E>::Result() {
|
||||
}
|
||||
Result<void, E>::Result() {}
|
||||
|
||||
template <typename E>
|
||||
Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {
|
||||
}
|
||||
Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {}
|
||||
|
||||
template <typename E>
|
||||
Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {
|
||||
}
|
||||
Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {}
|
||||
|
||||
template <typename E>
|
||||
Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) {
|
||||
|
@ -271,8 +268,7 @@ bool Result<void, E>::IsSuccess() const {
|
|||
}
|
||||
|
||||
template <typename E>
|
||||
void Result<void, E>::AcquireSuccess() {
|
||||
}
|
||||
void Result<void, E>::AcquireSuccess() {}
|
||||
|
||||
template <typename E>
|
||||
std::unique_ptr<E> Result<void, E>::AcquireError() {
|
||||
|
@ -298,13 +294,11 @@ namespace detail {
|
|||
|
||||
// Implementation of Result<T*, E>
|
||||
template <typename T, typename E>
|
||||
Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {
|
||||
}
|
||||
Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
Result<T*, E>::Result(std::unique_ptr<E> error)
|
||||
: mPayload(detail::MakePayload(error.release(), detail::Error)) {
|
||||
}
|
||||
: mPayload(detail::MakePayload(error.release(), detail::Error)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
template <typename TChild>
|
||||
|
@ -355,13 +349,11 @@ std::unique_ptr<E> Result<T*, E>::AcquireError() {
|
|||
// Implementation of Result<const T*, E*>
|
||||
template <typename T, typename E>
|
||||
Result<const T*, E>::Result(const T* success)
|
||||
: mPayload(detail::MakePayload(success, detail::Success)) {
|
||||
}
|
||||
: mPayload(detail::MakePayload(success, detail::Success)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
Result<const T*, E>::Result(std::unique_ptr<E> error)
|
||||
: mPayload(detail::MakePayload(error.release(), detail::Error)) {
|
||||
}
|
||||
: mPayload(detail::MakePayload(error.release(), detail::Error)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) {
|
||||
|
@ -415,13 +407,11 @@ Result<Ref<T>, E>::Result(Ref<U>&& success)
|
|||
|
||||
template <typename T, typename E>
|
||||
template <typename U>
|
||||
Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {
|
||||
}
|
||||
Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
Result<Ref<T>, E>::Result(std::unique_ptr<E> error)
|
||||
: mPayload(detail::MakePayload(error.release(), detail::Error)) {
|
||||
}
|
||||
: mPayload(detail::MakePayload(error.release(), detail::Error)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
template <typename U>
|
||||
|
@ -473,12 +463,10 @@ std::unique_ptr<E> Result<Ref<T>, E>::AcquireError() {
|
|||
|
||||
// Implementation of Result<T, E>
|
||||
template <typename T, typename E>
|
||||
Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {
|
||||
}
|
||||
Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {
|
||||
}
|
||||
Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
Result<T, E>::~Result() {
|
||||
|
|
|
@ -193,8 +193,7 @@ typename SerialStorage<Derived>::StorageIterator SerialStorage<Derived>::FindUpT
|
|||
template <typename Derived>
|
||||
SerialStorage<Derived>::BeginEnd::BeginEnd(typename SerialStorage<Derived>::StorageIterator start,
|
||||
typename SerialStorage<Derived>::StorageIterator end)
|
||||
: mStartIt(start), mEndIt(end) {
|
||||
}
|
||||
: mStartIt(start), mEndIt(end) {}
|
||||
|
||||
template <typename Derived>
|
||||
typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::begin() const {
|
||||
|
@ -210,8 +209,7 @@ typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::end(
|
|||
|
||||
template <typename Derived>
|
||||
SerialStorage<Derived>::Iterator::Iterator(typename SerialStorage<Derived>::StorageIterator start)
|
||||
: mStorageIterator(start), mSerialIterator(nullptr) {
|
||||
}
|
||||
: mStorageIterator(start), mSerialIterator(nullptr) {}
|
||||
|
||||
template <typename Derived>
|
||||
typename SerialStorage<Derived>::Iterator& SerialStorage<Derived>::Iterator::operator++() {
|
||||
|
@ -257,8 +255,7 @@ template <typename Derived>
|
|||
SerialStorage<Derived>::ConstBeginEnd::ConstBeginEnd(
|
||||
typename SerialStorage<Derived>::ConstStorageIterator start,
|
||||
typename SerialStorage<Derived>::ConstStorageIterator end)
|
||||
: mStartIt(start), mEndIt(end) {
|
||||
}
|
||||
: mStartIt(start), mEndIt(end) {}
|
||||
|
||||
template <typename Derived>
|
||||
typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::begin()
|
||||
|
@ -276,8 +273,7 @@ typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBegi
|
|||
template <typename Derived>
|
||||
SerialStorage<Derived>::ConstIterator::ConstIterator(
|
||||
typename SerialStorage<Derived>::ConstStorageIterator start)
|
||||
: mStorageIterator(start), mSerialIterator(nullptr) {
|
||||
}
|
||||
: mStorageIterator(start), mSerialIterator(nullptr) {}
|
||||
|
||||
template <typename Derived>
|
||||
typename SerialStorage<Derived>::ConstIterator&
|
||||
|
|
|
@ -25,19 +25,16 @@
|
|||
// IndexLinkNode
|
||||
|
||||
SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
|
||||
: index(index), nextIndex(nextIndex) {
|
||||
}
|
||||
: index(index), nextIndex(nextIndex) {}
|
||||
|
||||
// Slab
|
||||
|
||||
SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head)
|
||||
: allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {
|
||||
}
|
||||
: allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {}
|
||||
|
||||
SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default;
|
||||
|
||||
SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {
|
||||
}
|
||||
SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {}
|
||||
|
||||
SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default;
|
||||
|
||||
|
@ -83,8 +80,7 @@ SlabAllocatorImpl::SlabAllocatorImpl(SlabAllocatorImpl&& rhs)
|
|||
mTotalAllocationSize(rhs.mTotalAllocationSize),
|
||||
mAvailableSlabs(std::move(rhs.mAvailableSlabs)),
|
||||
mFullSlabs(std::move(rhs.mFullSlabs)),
|
||||
mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {
|
||||
}
|
||||
mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {}
|
||||
|
||||
SlabAllocatorImpl::~SlabAllocatorImpl() = default;
|
||||
|
||||
|
|
|
@ -168,8 +168,7 @@ class SlabAllocator : public SlabAllocatorImpl {
|
|||
SlabAllocator(size_t totalObjectBytes,
|
||||
uint32_t objectSize = u32_sizeof<T>,
|
||||
uint32_t objectAlignment = u32_alignof<T>)
|
||||
: SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {
|
||||
}
|
||||
: SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {}
|
||||
|
||||
template <typename... Args>
|
||||
T* Allocate(Args&&... args) {
|
||||
|
@ -177,9 +176,7 @@ class SlabAllocator : public SlabAllocatorImpl {
|
|||
return new (ptr) T(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
void Deallocate(T* object) {
|
||||
SlabAllocatorImpl::Deallocate(object);
|
||||
}
|
||||
void Deallocate(T* object) { SlabAllocatorImpl::Deallocate(object); }
|
||||
};
|
||||
|
||||
#endif // SRC_DAWN_COMMON_SLABALLOCATOR_H_
|
||||
|
|
|
@ -41,16 +41,11 @@ class StackAllocator : public std::allocator<T> {
|
|||
// maintaining this for as long as any containers using this allocator are
|
||||
// live.
|
||||
struct Source {
|
||||
Source() : used_stack_buffer_(false) {
|
||||
}
|
||||
Source() : used_stack_buffer_(false) {}
|
||||
|
||||
// Casts the buffer in its right type.
|
||||
T* stack_buffer() {
|
||||
return reinterpret_cast<T*>(stack_buffer_);
|
||||
}
|
||||
const T* stack_buffer() const {
|
||||
return reinterpret_cast<const T*>(&stack_buffer_);
|
||||
}
|
||||
T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); }
|
||||
const T* stack_buffer() const { return reinterpret_cast<const T*>(&stack_buffer_); }
|
||||
|
||||
// The buffer itself. It is not of type T because we don't want the
|
||||
// constructors and destructors to be automatically called. Define a POD
|
||||
|
@ -73,8 +68,7 @@ class StackAllocator : public std::allocator<T> {
|
|||
|
||||
// For the straight up copy c-tor, we can share storage.
|
||||
StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
|
||||
: std::allocator<T>(), source_(rhs.source_) {
|
||||
}
|
||||
: std::allocator<T>(), source_(rhs.source_) {}
|
||||
|
||||
// ISO C++ requires the following constructor to be defined,
|
||||
// and std::vector in VC++2008SP1 Release fails with an error
|
||||
|
@ -84,18 +78,15 @@ class StackAllocator : public std::allocator<T> {
|
|||
// no guarantee that the Source buffer of Ts is large enough
|
||||
// for Us.
|
||||
template <typename U, size_t other_capacity>
|
||||
StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {
|
||||
}
|
||||
StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {}
|
||||
|
||||
// This constructor must exist. It creates a default allocator that doesn't
|
||||
// actually have a stack buffer. glibc's std::string() will compare the
|
||||
// current allocator against the default-constructed allocator, so this
|
||||
// should be fast.
|
||||
StackAllocator() : source_(nullptr) {
|
||||
}
|
||||
StackAllocator() : source_(nullptr) {}
|
||||
|
||||
explicit StackAllocator(Source* source) : source_(source) {
|
||||
}
|
||||
explicit StackAllocator(Source* source) : source_(source) {}
|
||||
|
||||
// Actually do the allocation. Use the stack buffer if nobody has used it yet
|
||||
// and the size requested fits. Otherwise, fall through to the standard
|
||||
|
@ -154,28 +145,18 @@ class StackContainer {
|
|||
// shorter lifetimes than the source. The copy will share the same allocator
|
||||
// and therefore the same stack buffer as the original. Use std::copy to
|
||||
// copy into a "real" container for longer-lived objects.
|
||||
ContainerType& container() {
|
||||
return container_;
|
||||
}
|
||||
const ContainerType& container() const {
|
||||
return container_;
|
||||
}
|
||||
ContainerType& container() { return container_; }
|
||||
const ContainerType& container() const { return container_; }
|
||||
|
||||
// Support operator-> to get to the container. This allows nicer syntax like:
|
||||
// StackContainer<...> foo;
|
||||
// std::sort(foo->begin(), foo->end());
|
||||
ContainerType* operator->() {
|
||||
return &container_;
|
||||
}
|
||||
const ContainerType* operator->() const {
|
||||
return &container_;
|
||||
}
|
||||
ContainerType* operator->() { return &container_; }
|
||||
const ContainerType* operator->() const { return &container_; }
|
||||
|
||||
// Retrieves the stack source so that that unit tests can verify that the
|
||||
// buffer is being used properly.
|
||||
const typename Allocator::Source& stack_data() const {
|
||||
return stack_data_;
|
||||
}
|
||||
const typename Allocator::Source& stack_data() const { return stack_data_; }
|
||||
|
||||
protected:
|
||||
typename Allocator::Source stack_data_;
|
||||
|
@ -225,8 +206,7 @@ class StackVector
|
|||
: public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> {
|
||||
public:
|
||||
StackVector()
|
||||
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
|
||||
}
|
||||
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {}
|
||||
|
||||
// We need to put this in STL containers sometimes, which requires a copy
|
||||
// constructor. We can't call the regular copy constructor because that will
|
||||
|
@ -244,12 +224,8 @@ class StackVector
|
|||
|
||||
// Vectors are commonly indexed, which isn't very convenient even with
|
||||
// operator-> (using "->at()" does exception stuff we don't want).
|
||||
T& operator[](size_t i) {
|
||||
return this->container().operator[](i);
|
||||
}
|
||||
const T& operator[](size_t i) const {
|
||||
return this->container().operator[](i);
|
||||
}
|
||||
T& operator[](size_t i) { return this->container().operator[](i); }
|
||||
const T& operator[](size_t i) const { return this->container().operator[](i); }
|
||||
|
||||
private:
|
||||
// StackVector(const StackVector& rhs) = delete;
|
||||
|
|
|
@ -208,8 +208,7 @@ std::optional<std::string> GetModuleDirectory() {
|
|||
ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value)
|
||||
: mName(variableName),
|
||||
mOriginalValue(GetEnvironmentVar(variableName)),
|
||||
mIsSet(SetEnvironmentVar(variableName, value)) {
|
||||
}
|
||||
mIsSet(SetEnvironmentVar(variableName, value)) {}
|
||||
|
||||
ScopedEnvironmentVar::~ScopedEnvironmentVar() {
|
||||
if (mIsSet) {
|
||||
|
|
|
@ -75,25 +75,20 @@ namespace detail {
|
|||
|
||||
// Construction from non-narrowing integral types.
|
||||
template <typename I,
|
||||
typename = std::enable_if_t<
|
||||
std::is_integral<I>::value &&
|
||||
typename =
|
||||
std::enable_if_t<std::is_integral<I>::value &&
|
||||
std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
|
||||
std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
|
||||
explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {
|
||||
}
|
||||
explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {}
|
||||
|
||||
// Allow explicit casts only to the underlying type. If you're casting out of an
|
||||
// TypedInteger, you should know what what you're doing, and exactly what type you
|
||||
// expect.
|
||||
explicit constexpr operator T() const {
|
||||
return static_cast<T>(this->mValue);
|
||||
}
|
||||
explicit constexpr operator T() const { return static_cast<T>(this->mValue); }
|
||||
|
||||
// Same-tag TypedInteger comparison operators
|
||||
#define TYPED_COMPARISON(op) \
|
||||
constexpr bool operator op(const TypedIntegerImpl& rhs) const { \
|
||||
return mValue op rhs.mValue; \
|
||||
}
|
||||
constexpr bool operator op(const TypedIntegerImpl& rhs) const { return mValue op rhs.mValue; }
|
||||
TYPED_COMPARISON(<)
|
||||
TYPED_COMPARISON(<=)
|
||||
TYPED_COMPARISON(>)
|
||||
|
@ -132,8 +127,9 @@ namespace detail {
|
|||
}
|
||||
|
||||
template <typename T2 = T>
|
||||
static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))>
|
||||
AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))> AddImpl(
|
||||
TypedIntegerImpl<Tag, T> lhs,
|
||||
TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static_assert(std::is_same<T, T2>::value);
|
||||
|
||||
// Overflow would wrap around
|
||||
|
@ -142,8 +138,9 @@ namespace detail {
|
|||
}
|
||||
|
||||
template <typename T2 = T>
|
||||
static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))>
|
||||
AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))> AddImpl(
|
||||
TypedIntegerImpl<Tag, T> lhs,
|
||||
TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static_assert(std::is_same<T, T2>::value);
|
||||
|
||||
if (lhs.mValue > 0) {
|
||||
|
@ -160,8 +157,9 @@ namespace detail {
|
|||
}
|
||||
|
||||
template <typename T2 = T>
|
||||
static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))>
|
||||
SubImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))> SubImpl(
|
||||
TypedIntegerImpl<Tag, T> lhs,
|
||||
TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static_assert(std::is_same<T, T2>::value);
|
||||
|
||||
// Overflow would wrap around
|
||||
|
|
|
@ -42,8 +42,7 @@ namespace ityp {
|
|||
|
||||
template <typename... Values>
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {
|
||||
}
|
||||
constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {}
|
||||
|
||||
Value& operator[](Index i) {
|
||||
I index = static_cast<I>(i);
|
||||
|
@ -69,25 +68,15 @@ namespace ityp {
|
|||
return Base::at(index);
|
||||
}
|
||||
|
||||
typename Base::iterator begin() noexcept {
|
||||
return Base::begin();
|
||||
}
|
||||
typename Base::iterator begin() noexcept { return Base::begin(); }
|
||||
|
||||
typename Base::const_iterator begin() const noexcept {
|
||||
return Base::begin();
|
||||
}
|
||||
typename Base::const_iterator begin() const noexcept { return Base::begin(); }
|
||||
|
||||
typename Base::iterator end() noexcept {
|
||||
return Base::end();
|
||||
}
|
||||
typename Base::iterator end() noexcept { return Base::end(); }
|
||||
|
||||
typename Base::const_iterator end() const noexcept {
|
||||
return Base::end();
|
||||
}
|
||||
typename Base::const_iterator end() const noexcept { return Base::end(); }
|
||||
|
||||
constexpr Index size() const {
|
||||
return Index(I(Size));
|
||||
}
|
||||
constexpr Index size() const { return Index(I(Size)); }
|
||||
|
||||
using Base::back;
|
||||
using Base::data;
|
||||
|
|
|
@ -30,30 +30,21 @@ namespace ityp {
|
|||
|
||||
static_assert(sizeof(I) <= sizeof(size_t));
|
||||
|
||||
explicit constexpr bitset(const Base& rhs) : Base(rhs) {
|
||||
}
|
||||
explicit constexpr bitset(const Base& rhs) : Base(rhs) {}
|
||||
|
||||
public:
|
||||
using reference = typename Base::reference;
|
||||
|
||||
constexpr bitset() noexcept : Base() {
|
||||
}
|
||||
constexpr bitset() noexcept : Base() {}
|
||||
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr bitset(uint64_t value) noexcept : Base(value) {
|
||||
}
|
||||
constexpr bitset(uint64_t value) noexcept : Base(value) {}
|
||||
|
||||
constexpr bool operator[](Index i) const {
|
||||
return Base::operator[](static_cast<I>(i));
|
||||
}
|
||||
constexpr bool operator[](Index i) const { return Base::operator[](static_cast<I>(i)); }
|
||||
|
||||
typename Base::reference operator[](Index i) {
|
||||
return Base::operator[](static_cast<I>(i));
|
||||
}
|
||||
typename Base::reference operator[](Index i) { return Base::operator[](static_cast<I>(i)); }
|
||||
|
||||
bool test(Index i) const {
|
||||
return Base::test(static_cast<I>(i));
|
||||
}
|
||||
bool test(Index i) const { return Base::test(static_cast<I>(i)); }
|
||||
|
||||
using Base::all;
|
||||
using Base::any;
|
||||
|
@ -81,33 +72,21 @@ namespace ityp {
|
|||
return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
|
||||
}
|
||||
|
||||
bitset operator~() const noexcept {
|
||||
return bitset(*this).flip();
|
||||
}
|
||||
bitset operator~() const noexcept { return bitset(*this).flip(); }
|
||||
|
||||
bitset& set() noexcept {
|
||||
return static_cast<bitset&>(Base::set());
|
||||
}
|
||||
bitset& set() noexcept { return static_cast<bitset&>(Base::set()); }
|
||||
|
||||
bitset& set(Index i, bool value = true) {
|
||||
return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
|
||||
}
|
||||
|
||||
bitset& reset() noexcept {
|
||||
return static_cast<bitset&>(Base::reset());
|
||||
}
|
||||
bitset& reset() noexcept { return static_cast<bitset&>(Base::reset()); }
|
||||
|
||||
bitset& reset(Index i) {
|
||||
return static_cast<bitset&>(Base::reset(static_cast<I>(i)));
|
||||
}
|
||||
bitset& reset(Index i) { return static_cast<bitset&>(Base::reset(static_cast<I>(i))); }
|
||||
|
||||
bitset& flip() noexcept {
|
||||
return static_cast<bitset&>(Base::flip());
|
||||
}
|
||||
bitset& flip() noexcept { return static_cast<bitset&>(Base::flip()); }
|
||||
|
||||
bitset& flip(Index i) {
|
||||
return static_cast<bitset&>(Base::flip(static_cast<I>(i)));
|
||||
}
|
||||
bitset& flip(Index i) { return static_cast<bitset&>(Base::flip(static_cast<I>(i))); }
|
||||
|
||||
using Base::to_string;
|
||||
using Base::to_ullong;
|
||||
|
|
|
@ -31,39 +31,25 @@ namespace ityp {
|
|||
using I = UnderlyingType<Index>;
|
||||
|
||||
public:
|
||||
constexpr span() : mData(nullptr), mSize(0) {
|
||||
}
|
||||
constexpr span(Value* data, Index size) : mData(data), mSize(size) {
|
||||
}
|
||||
constexpr span() : mData(nullptr), mSize(0) {}
|
||||
constexpr span(Value* data, Index size) : mData(data), mSize(size) {}
|
||||
|
||||
constexpr Value& operator[](Index i) const {
|
||||
ASSERT(i < mSize);
|
||||
return mData[static_cast<I>(i)];
|
||||
}
|
||||
|
||||
Value* data() noexcept {
|
||||
return mData;
|
||||
}
|
||||
Value* data() noexcept { return mData; }
|
||||
|
||||
const Value* data() const noexcept {
|
||||
return mData;
|
||||
}
|
||||
const Value* data() const noexcept { return mData; }
|
||||
|
||||
Value* begin() noexcept {
|
||||
return mData;
|
||||
}
|
||||
Value* begin() noexcept { return mData; }
|
||||
|
||||
const Value* begin() const noexcept {
|
||||
return mData;
|
||||
}
|
||||
const Value* begin() const noexcept { return mData; }
|
||||
|
||||
Value* end() noexcept {
|
||||
return mData + static_cast<I>(mSize);
|
||||
}
|
||||
Value* end() noexcept { return mData + static_cast<I>(mSize); }
|
||||
|
||||
const Value* end() const noexcept {
|
||||
return mData + static_cast<I>(mSize);
|
||||
}
|
||||
const Value* end() const noexcept { return mData + static_cast<I>(mSize); }
|
||||
|
||||
Value& front() {
|
||||
ASSERT(mData != nullptr);
|
||||
|
@ -89,9 +75,7 @@ namespace ityp {
|
|||
return *(mData + static_cast<I>(mSize) - 1);
|
||||
}
|
||||
|
||||
Index size() const {
|
||||
return mSize;
|
||||
}
|
||||
Index size() const { return mSize; }
|
||||
|
||||
private:
|
||||
Value* mData;
|
||||
|
|
|
@ -32,11 +32,8 @@ namespace ityp {
|
|||
static_assert(StaticCapacity <= std::numeric_limits<I>::max());
|
||||
|
||||
public:
|
||||
stack_vec() : Base() {
|
||||
}
|
||||
explicit stack_vec(Index size) : Base() {
|
||||
this->container().resize(static_cast<I>(size));
|
||||
}
|
||||
stack_vec() : Base() {}
|
||||
explicit stack_vec(Index size) : Base() { this->container().resize(static_cast<I>(size)); }
|
||||
|
||||
Value& operator[](Index i) {
|
||||
ASSERT(i < size());
|
||||
|
@ -48,57 +45,31 @@ namespace ityp {
|
|||
return Base::operator[](static_cast<I>(i));
|
||||
}
|
||||
|
||||
void resize(Index size) {
|
||||
this->container().resize(static_cast<I>(size));
|
||||
}
|
||||
void resize(Index size) { this->container().resize(static_cast<I>(size)); }
|
||||
|
||||
void reserve(Index size) {
|
||||
this->container().reserve(static_cast<I>(size));
|
||||
}
|
||||
void reserve(Index size) { this->container().reserve(static_cast<I>(size)); }
|
||||
|
||||
Value* data() {
|
||||
return this->container().data();
|
||||
}
|
||||
Value* data() { return this->container().data(); }
|
||||
|
||||
const Value* data() const {
|
||||
return this->container().data();
|
||||
}
|
||||
const Value* data() const { return this->container().data(); }
|
||||
|
||||
typename VectorBase::iterator begin() noexcept {
|
||||
return this->container().begin();
|
||||
}
|
||||
typename VectorBase::iterator begin() noexcept { return this->container().begin(); }
|
||||
|
||||
typename VectorBase::const_iterator begin() const noexcept {
|
||||
return this->container().begin();
|
||||
}
|
||||
typename VectorBase::const_iterator begin() const noexcept { return this->container().begin(); }
|
||||
|
||||
typename VectorBase::iterator end() noexcept {
|
||||
return this->container().end();
|
||||
}
|
||||
typename VectorBase::iterator end() noexcept { return this->container().end(); }
|
||||
|
||||
typename VectorBase::const_iterator end() const noexcept {
|
||||
return this->container().end();
|
||||
}
|
||||
typename VectorBase::const_iterator end() const noexcept { return this->container().end(); }
|
||||
|
||||
typename VectorBase::reference front() {
|
||||
return this->container().front();
|
||||
}
|
||||
typename VectorBase::reference front() { return this->container().front(); }
|
||||
|
||||
typename VectorBase::const_reference front() const {
|
||||
return this->container().front();
|
||||
}
|
||||
typename VectorBase::const_reference front() const { return this->container().front(); }
|
||||
|
||||
typename VectorBase::reference back() {
|
||||
return this->container().back();
|
||||
}
|
||||
typename VectorBase::reference back() { return this->container().back(); }
|
||||
|
||||
typename VectorBase::const_reference back() const {
|
||||
return this->container().back();
|
||||
}
|
||||
typename VectorBase::const_reference back() const { return this->container().back(); }
|
||||
|
||||
Index size() const {
|
||||
return Index(static_cast<I>(this->container().size()));
|
||||
}
|
||||
Index size() const { return Index(static_cast<I>(this->container().size())); }
|
||||
};
|
||||
|
||||
} // namespace ityp
|
||||
|
|
|
@ -42,23 +42,17 @@ namespace ityp {
|
|||
using Base::size;
|
||||
|
||||
public:
|
||||
vector() : Base() {
|
||||
}
|
||||
vector() : Base() {}
|
||||
|
||||
explicit vector(Index size) : Base(static_cast<I>(size)) {
|
||||
}
|
||||
explicit vector(Index size) : Base(static_cast<I>(size)) {}
|
||||
|
||||
vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {
|
||||
}
|
||||
vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {}
|
||||
|
||||
vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {
|
||||
}
|
||||
vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {}
|
||||
|
||||
vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {
|
||||
}
|
||||
vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {}
|
||||
|
||||
vector(std::initializer_list<Value> init) : Base(init) {
|
||||
}
|
||||
vector(std::initializer_list<Value> init) : Base(init) {}
|
||||
|
||||
vector& operator=(const vector& rhs) {
|
||||
Base::operator=(static_cast<const Base&>(rhs));
|
||||
|
@ -95,13 +89,9 @@ namespace ityp {
|
|||
return Index(static_cast<I>(Base::size()));
|
||||
}
|
||||
|
||||
void resize(Index size) {
|
||||
Base::resize(static_cast<I>(size));
|
||||
}
|
||||
void resize(Index size) { Base::resize(static_cast<I>(size)); }
|
||||
|
||||
void reserve(Index size) {
|
||||
Base::reserve(static_cast<I>(size));
|
||||
}
|
||||
void reserve(Index size) { Base::reserve(static_cast<I>(size)); }
|
||||
};
|
||||
|
||||
} // namespace ityp
|
||||
|
|
|
@ -85,49 +85,33 @@ namespace dawn::native::vulkan {
|
|||
public:
|
||||
// Default constructor and assigning of VK_NULL_HANDLE
|
||||
VkHandle() = default;
|
||||
VkHandle(std::nullptr_t) {
|
||||
}
|
||||
VkHandle(std::nullptr_t) {}
|
||||
|
||||
// Use default copy constructor/assignment
|
||||
VkHandle(const VkHandle<Tag, HandleType>& other) = default;
|
||||
VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
|
||||
|
||||
// Comparisons between handles
|
||||
bool operator==(VkHandle<Tag, HandleType> other) const {
|
||||
return mHandle == other.mHandle;
|
||||
}
|
||||
bool operator!=(VkHandle<Tag, HandleType> other) const {
|
||||
return mHandle != other.mHandle;
|
||||
}
|
||||
bool operator==(VkHandle<Tag, HandleType> other) const { return mHandle == other.mHandle; }
|
||||
bool operator!=(VkHandle<Tag, HandleType> other) const { return mHandle != other.mHandle; }
|
||||
|
||||
// Comparisons between handles and VK_NULL_HANDLE
|
||||
bool operator==(std::nullptr_t) const {
|
||||
return mHandle == 0;
|
||||
}
|
||||
bool operator!=(std::nullptr_t) const {
|
||||
return mHandle != 0;
|
||||
}
|
||||
bool operator==(std::nullptr_t) const { return mHandle == 0; }
|
||||
bool operator!=(std::nullptr_t) const { return mHandle != 0; }
|
||||
|
||||
// Implicit conversion to real Vulkan types.
|
||||
operator HandleType() const {
|
||||
return GetHandle();
|
||||
}
|
||||
operator HandleType() const { return GetHandle(); }
|
||||
|
||||
HandleType GetHandle() const {
|
||||
return mHandle;
|
||||
}
|
||||
HandleType GetHandle() const { return mHandle; }
|
||||
|
||||
HandleType& operator*() {
|
||||
return mHandle;
|
||||
}
|
||||
HandleType& operator*() { return mHandle; }
|
||||
|
||||
static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
|
||||
return VkHandle{handle};
|
||||
}
|
||||
|
||||
private:
|
||||
explicit VkHandle(HandleType handle) : mHandle(handle) {
|
||||
}
|
||||
explicit VkHandle(HandleType handle) : mHandle(handle) {}
|
||||
|
||||
HandleType mHandle = 0;
|
||||
};
|
||||
|
|
|
@ -41,9 +41,7 @@ namespace {
|
|||
}
|
||||
return buf.data();
|
||||
}
|
||||
bool Flush() override {
|
||||
return true;
|
||||
}
|
||||
bool Flush() override { return true; }
|
||||
|
||||
private:
|
||||
std::vector<char> buf;
|
||||
|
|
|
@ -53,14 +53,14 @@ namespace dawn::native {
|
|||
std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
|
||||
mLimits.v1.maxInterStageShaderComponents =
|
||||
std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
|
||||
mLimits.v1.maxSampledTexturesPerShaderStage = std::min(
|
||||
mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
|
||||
mLimits.v1.maxSampledTexturesPerShaderStage =
|
||||
std::min(mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
|
||||
mLimits.v1.maxSamplersPerShaderStage =
|
||||
std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
|
||||
mLimits.v1.maxStorageBuffersPerShaderStage =
|
||||
std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
|
||||
mLimits.v1.maxStorageTexturesPerShaderStage = std::min(
|
||||
mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
|
||||
mLimits.v1.maxStorageTexturesPerShaderStage =
|
||||
std::min(mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
|
||||
mLimits.v1.maxUniformBuffersPerShaderStage =
|
||||
std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
|
||||
mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
|
||||
|
@ -119,8 +119,8 @@ namespace dawn::native {
|
|||
if (result.IsError()) {
|
||||
std::unique_ptr<ErrorData> errorData = result.AcquireError();
|
||||
// TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
|
||||
callback(WGPURequestDeviceStatus_Error, nullptr,
|
||||
errorData->GetFormattedMessage().c_str(), userdata);
|
||||
callback(WGPURequestDeviceStatus_Error, nullptr, errorData->GetFormattedMessage().c_str(),
|
||||
userdata);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -198,13 +198,12 @@ namespace dawn::native {
|
|||
for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) {
|
||||
wgpu::FeatureName f = descriptor->requiredFeatures[i];
|
||||
DAWN_TRY(ValidateFeatureName(f));
|
||||
DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f),
|
||||
"Requested feature %s is not supported.", f);
|
||||
DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f), "Requested feature %s is not supported.",
|
||||
f);
|
||||
}
|
||||
|
||||
if (descriptor->requiredLimits != nullptr) {
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
|
||||
DAWN_TRY_CONTEXT(ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
|
||||
descriptor->requiredLimits->limits),
|
||||
"validating required limits");
|
||||
|
||||
|
|
|
@ -74,8 +74,7 @@ namespace dawn::native {
|
|||
FeaturesSet mSupportedFeatures;
|
||||
|
||||
private:
|
||||
virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
|
||||
const DeviceDescriptor* descriptor) = 0;
|
||||
virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) = 0;
|
||||
|
||||
virtual MaybeError InitializeImpl() = 0;
|
||||
|
||||
|
|
|
@ -21,8 +21,7 @@
|
|||
namespace dawn::native {
|
||||
|
||||
AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
|
||||
: mWorkerTaskPool(workerTaskPool) {
|
||||
}
|
||||
: mWorkerTaskPool(workerTaskPool) {}
|
||||
|
||||
void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
|
||||
// If these allocations becomes expensive, we can slab-allocate tasks.
|
||||
|
|
|
@ -21,8 +21,7 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(
|
||||
const RenderBundleEncoderDescriptor* descriptor)
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor)
|
||||
: mSampleCount(descriptor->sampleCount) {
|
||||
ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
|
||||
for (ColorAttachmentIndex i(uint8_t(0));
|
||||
|
@ -58,10 +57,8 @@ namespace dawn::native {
|
|||
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
|
||||
for (ColorAttachmentIndex i(uint8_t(0));
|
||||
i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount));
|
||||
++i) {
|
||||
TextureViewBase* attachment =
|
||||
descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
|
||||
i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount)); ++i) {
|
||||
TextureViewBase* attachment = descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
|
||||
if (attachment == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
@ -85,8 +82,7 @@ namespace dawn::native {
|
|||
ASSERT(mSampleCount > 0);
|
||||
}
|
||||
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) =
|
||||
default;
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) = default;
|
||||
|
||||
size_t AttachmentStateBlueprint::HashFunc::operator()(
|
||||
const AttachmentStateBlueprint* attachmentState) const {
|
||||
|
@ -107,8 +103,7 @@ namespace dawn::native {
|
|||
return hash;
|
||||
}
|
||||
|
||||
bool AttachmentStateBlueprint::EqualityFunc::operator()(
|
||||
const AttachmentStateBlueprint* a,
|
||||
bool AttachmentStateBlueprint::EqualityFunc::operator()(const AttachmentStateBlueprint* a,
|
||||
const AttachmentStateBlueprint* b) const {
|
||||
// Check set attachments
|
||||
if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
|
||||
|
@ -136,8 +131,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
|
||||
: AttachmentStateBlueprint(blueprint), ObjectBase(device) {
|
||||
}
|
||||
: AttachmentStateBlueprint(blueprint), ObjectBase(device) {}
|
||||
|
||||
AttachmentState::~AttachmentState() {
|
||||
GetDevice()->UncacheAttachmentState(this);
|
||||
|
@ -148,13 +142,12 @@ namespace dawn::native {
|
|||
return AttachmentStateBlueprint::HashFunc()(this);
|
||||
}
|
||||
|
||||
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
|
||||
AttachmentState::GetColorAttachmentsMask() const {
|
||||
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> AttachmentState::GetColorAttachmentsMask()
|
||||
const {
|
||||
return mColorAttachmentsSet;
|
||||
}
|
||||
|
||||
wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(
|
||||
ColorAttachmentIndex index) const {
|
||||
wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(ColorAttachmentIndex index) const {
|
||||
ASSERT(mColorAttachmentsSet[index]);
|
||||
return mColorFormats[index];
|
||||
}
|
||||
|
|
|
@ -48,8 +48,7 @@ namespace dawn::native {
|
|||
size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
|
||||
};
|
||||
struct EqualityFunc {
|
||||
bool operator()(const AttachmentStateBlueprint* a,
|
||||
const AttachmentStateBlueprint* b) const;
|
||||
bool operator()(const AttachmentStateBlueprint* a, const AttachmentStateBlueprint* b) const;
|
||||
};
|
||||
|
||||
protected:
|
||||
|
|
|
@ -17,8 +17,7 @@
|
|||
namespace dawn::native {
|
||||
|
||||
BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
|
||||
: mInstance(instance), mType(type) {
|
||||
}
|
||||
: mInstance(instance), mType(type) {}
|
||||
|
||||
wgpu::BackendType BackendConnection::GetType() const {
|
||||
return mType;
|
||||
|
|
|
@ -54,21 +54,18 @@ namespace dawn::native {
|
|||
case wgpu::BufferBindingType::Uniform:
|
||||
requiredUsage = wgpu::BufferUsage::Uniform;
|
||||
maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
|
||||
requiredBindingAlignment =
|
||||
device->GetLimits().v1.minUniformBufferOffsetAlignment;
|
||||
requiredBindingAlignment = device->GetLimits().v1.minUniformBufferOffsetAlignment;
|
||||
break;
|
||||
case wgpu::BufferBindingType::Storage:
|
||||
case wgpu::BufferBindingType::ReadOnlyStorage:
|
||||
requiredUsage = wgpu::BufferUsage::Storage;
|
||||
maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
|
||||
requiredBindingAlignment =
|
||||
device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
break;
|
||||
case kInternalStorageBufferBinding:
|
||||
requiredUsage = kInternalStorageBuffer;
|
||||
maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
|
||||
requiredBindingAlignment =
|
||||
device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
break;
|
||||
case wgpu::BufferBindingType::Undefined:
|
||||
UNREACHABLE();
|
||||
|
@ -92,26 +89,25 @@ namespace dawn::native {
|
|||
|
||||
// Note that no overflow can happen because we already checked that
|
||||
// bufferSize >= bindingSize
|
||||
DAWN_INVALID_IF(
|
||||
entry.offset > bufferSize - bindingSize,
|
||||
DAWN_INVALID_IF(entry.offset > bufferSize - bindingSize,
|
||||
"Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
|
||||
entry.offset, bufferSize, bindingSize, entry.buffer);
|
||||
|
||||
DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
|
||||
"Offset (%u) does not satisfy the minimum %s alignment (%u).",
|
||||
entry.offset, bindingInfo.buffer.type, requiredBindingAlignment);
|
||||
"Offset (%u) does not satisfy the minimum %s alignment (%u).", entry.offset,
|
||||
bindingInfo.buffer.type, requiredBindingAlignment);
|
||||
|
||||
DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
|
||||
"Binding usage (%s) of %s doesn't match expected usage (%s).",
|
||||
entry.buffer->GetUsageExternalOnly(), entry.buffer, requiredUsage);
|
||||
|
||||
DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
|
||||
"Binding size (%u) is smaller than the minimum binding size (%u).",
|
||||
bindingSize, bindingInfo.buffer.minBindingSize);
|
||||
"Binding size (%u) is smaller than the minimum binding size (%u).", bindingSize,
|
||||
bindingInfo.buffer.minBindingSize);
|
||||
|
||||
DAWN_INVALID_IF(bindingSize > maxBindingSize,
|
||||
"Binding size (%u) is larger than the maximum binding size (%u).",
|
||||
bindingSize, maxBindingSize);
|
||||
"Binding size (%u) is larger than the maximum binding size (%u).", bindingSize,
|
||||
maxBindingSize);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
@ -131,24 +127,20 @@ namespace dawn::native {
|
|||
TextureViewBase* view = entry.textureView;
|
||||
|
||||
Aspect aspect = view->GetAspects();
|
||||
DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect,
|
||||
view);
|
||||
DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect, view);
|
||||
|
||||
TextureBase* texture = view->GetTexture();
|
||||
switch (bindingInfo.bindingType) {
|
||||
case BindingInfoType::Texture: {
|
||||
SampleTypeBit supportedTypes =
|
||||
texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
|
||||
SampleTypeBit requiredType =
|
||||
SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
|
||||
SampleTypeBit requiredType = SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
!(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
|
||||
DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
|
||||
"Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
|
||||
texture->GetUsage(), texture);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
|
||||
DAWN_INVALID_IF(texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
|
||||
"Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
|
||||
texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
|
||||
|
||||
|
@ -158,29 +150,25 @@ namespace dawn::native {
|
|||
"types (%s).",
|
||||
supportedTypes, texture, requiredType);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
|
||||
DAWN_INVALID_IF(entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
|
||||
"Dimension (%s) of %s doesn't match the expected dimension (%s).",
|
||||
entry.textureView->GetDimension(), entry.textureView,
|
||||
bindingInfo.texture.viewDimension);
|
||||
break;
|
||||
}
|
||||
case BindingInfoType::StorageTexture: {
|
||||
DAWN_INVALID_IF(
|
||||
!(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
|
||||
DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
|
||||
"Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
|
||||
texture->GetUsage(), texture);
|
||||
|
||||
ASSERT(!texture->IsMultisampledTexture());
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
texture->GetFormat().format != bindingInfo.storageTexture.format,
|
||||
DAWN_INVALID_IF(texture->GetFormat().format != bindingInfo.storageTexture.format,
|
||||
"Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
|
||||
texture, bindingInfo.storageTexture.format);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
entry.textureView->GetDimension() !=
|
||||
bindingInfo.storageTexture.viewDimension,
|
||||
entry.textureView->GetDimension() != bindingInfo.storageTexture.viewDimension,
|
||||
"Dimension (%s) of %s doesn't match the expected dimension (%s).",
|
||||
entry.textureView->GetDimension(), entry.textureView,
|
||||
bindingInfo.storageTexture.viewDimension);
|
||||
|
@ -214,22 +202,19 @@ namespace dawn::native {
|
|||
|
||||
switch (bindingInfo.sampler.type) {
|
||||
case wgpu::SamplerBindingType::NonFiltering:
|
||||
DAWN_INVALID_IF(
|
||||
entry.sampler->IsFiltering(),
|
||||
DAWN_INVALID_IF(entry.sampler->IsFiltering(),
|
||||
"Filtering sampler %s is incompatible with non-filtering sampler "
|
||||
"binding.",
|
||||
entry.sampler);
|
||||
[[fallthrough]];
|
||||
case wgpu::SamplerBindingType::Filtering:
|
||||
DAWN_INVALID_IF(
|
||||
entry.sampler->IsComparison(),
|
||||
DAWN_INVALID_IF(entry.sampler->IsComparison(),
|
||||
"Comparison sampler %s is incompatible with non-comparison sampler "
|
||||
"binding.",
|
||||
entry.sampler);
|
||||
break;
|
||||
case wgpu::SamplerBindingType::Comparison:
|
||||
DAWN_INVALID_IF(
|
||||
!entry.sampler->IsComparison(),
|
||||
DAWN_INVALID_IF(!entry.sampler->IsComparison(),
|
||||
"Non-comparison sampler %s is imcompatible with comparison sampler "
|
||||
"binding.",
|
||||
entry.sampler);
|
||||
|
@ -254,8 +239,7 @@ namespace dawn::native {
|
|||
entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
|
||||
"Expected only external texture to be set for binding entry.");
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
expansions.find(BindingNumber(entry.binding)) == expansions.end(),
|
||||
DAWN_INVALID_IF(expansions.find(BindingNumber(entry.binding)) == expansions.end(),
|
||||
"External texture binding entry %u is not present in the bind group layout.",
|
||||
entry.binding);
|
||||
|
||||
|
@ -269,8 +253,7 @@ namespace dawn::native {
|
|||
|
||||
} // anonymous namespace
|
||||
|
||||
MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
|
||||
const BindGroupDescriptor* descriptor) {
|
||||
MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor) {
|
||||
DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
|
||||
|
||||
DAWN_TRY(device->ValidateObject(descriptor->layout));
|
||||
|
@ -486,8 +469,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag), mBindingData() {
|
||||
}
|
||||
: ApiObjectBase(device, tag), mBindingData() {}
|
||||
|
||||
// static
|
||||
BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
|
||||
|
@ -533,8 +515,7 @@ namespace dawn::native {
|
|||
ASSERT(!IsError());
|
||||
ASSERT(bindingIndex < mLayout->GetBindingCount());
|
||||
ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
|
||||
mLayout->GetBindingInfo(bindingIndex).bindingType ==
|
||||
BindingInfoType::StorageTexture);
|
||||
mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::StorageTexture);
|
||||
return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
|
||||
}
|
||||
|
||||
|
|
|
@ -31,8 +31,7 @@ namespace dawn::native {
|
|||
|
||||
class DeviceBase;
|
||||
|
||||
MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
|
||||
const BindGroupDescriptor* descriptor);
|
||||
MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor);
|
||||
|
||||
struct BufferBinding {
|
||||
BufferBase* buffer;
|
||||
|
|
|
@ -39,8 +39,7 @@ namespace dawn::native {
|
|||
|
||||
ASSERT(format != nullptr);
|
||||
DAWN_INVALID_IF(!format->supportsStorageUsage,
|
||||
"Texture format (%s) does not support storage textures.",
|
||||
storageTextureFormat);
|
||||
"Texture format (%s) does not support storage textures.", storageTextureFormat);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
@ -111,8 +110,7 @@ namespace dawn::native {
|
|||
viewDimension = texture.viewDimension;
|
||||
}
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
|
||||
DAWN_INVALID_IF(texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
|
||||
"View dimension (%s) for a multisampled texture bindings was not %s.",
|
||||
viewDimension, wgpu::TextureViewDimension::e2D);
|
||||
}
|
||||
|
@ -150,16 +148,14 @@ namespace dawn::native {
|
|||
"BindGroupLayoutEntry had more than one of buffer, sampler, texture, "
|
||||
"storageTexture, or externalTexture set");
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
!IsSubset(entry.visibility, allowedStages),
|
||||
DAWN_INVALID_IF(!IsSubset(entry.visibility, allowedStages),
|
||||
"%s bindings cannot be used with a visibility of %s. Only %s are allowed.",
|
||||
bindingType, entry.visibility, allowedStages);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(
|
||||
uint32_t binding,
|
||||
BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(uint32_t binding,
|
||||
wgpu::ShaderStage visibility) {
|
||||
BindGroupLayoutEntry entry;
|
||||
entry.binding = binding;
|
||||
|
@ -217,13 +213,11 @@ namespace dawn::native {
|
|||
dawn_native::ExternalTextureBindingExpansion bindingExpansion;
|
||||
|
||||
BindGroupLayoutEntry plane0Entry =
|
||||
CreateSampledTextureBindingForExternalTexture(entry.binding,
|
||||
entry.visibility);
|
||||
CreateSampledTextureBindingForExternalTexture(entry.binding, entry.visibility);
|
||||
bindingExpansion.plane0 = BindingNumber(plane0Entry.binding);
|
||||
expandedOutput.push_back(plane0Entry);
|
||||
|
||||
BindGroupLayoutEntry plane1Entry =
|
||||
CreateSampledTextureBindingForExternalTexture(
|
||||
BindGroupLayoutEntry plane1Entry = CreateSampledTextureBindingForExternalTexture(
|
||||
nextOpenBindingNumberForNewEntry++, entry.visibility);
|
||||
bindingExpansion.plane1 = BindingNumber(plane1Entry.binding);
|
||||
expandedOutput.push_back(plane1Entry);
|
||||
|
@ -260,8 +254,8 @@ namespace dawn::native {
|
|||
"Binding number (%u) exceeds the maximum binding number (%u).",
|
||||
uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped));
|
||||
DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0,
|
||||
"On entries[%u]: binding index (%u) was specified by a previous entry.",
|
||||
i, entry.binding);
|
||||
"On entries[%u]: binding index (%u) was specified by a previous entry.", i,
|
||||
entry.binding);
|
||||
|
||||
DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding),
|
||||
"validating entries[%u]", i);
|
||||
|
@ -419,8 +413,7 @@ namespace dawn::native {
|
|||
return aInfo.storageTexture.access < bInfo.storageTexture.access;
|
||||
}
|
||||
if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) {
|
||||
return aInfo.storageTexture.viewDimension <
|
||||
bInfo.storageTexture.viewDimension;
|
||||
return aInfo.storageTexture.viewDimension < bInfo.storageTexture.viewDimension;
|
||||
}
|
||||
if (aInfo.storageTexture.format != bInfo.storageTexture.format) {
|
||||
return aInfo.storageTexture.format < bInfo.storageTexture.format;
|
||||
|
@ -492,8 +485,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag) {
|
||||
}
|
||||
: ApiObjectBase(device, tag) {}
|
||||
|
||||
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
|
||||
: ApiObjectBase(device, kLabelNotImplemented) {
|
||||
|
@ -624,9 +616,8 @@ namespace dawn::native {
|
|||
// |-uint64_t[mUnverifiedBufferCount]-|
|
||||
size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
|
||||
ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
|
||||
size_t bufferSizeArrayStart =
|
||||
Align(objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>),
|
||||
sizeof(uint64_t));
|
||||
size_t bufferSizeArrayStart = Align(
|
||||
objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>), sizeof(uint64_t));
|
||||
ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
|
||||
return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
|
||||
}
|
||||
|
|
|
@ -41,8 +41,7 @@ namespace dawn::native {
|
|||
BindingNumber params;
|
||||
};
|
||||
|
||||
using ExternalTextureBindingExpansionMap =
|
||||
std::map<BindingNumber, ExternalTextureBindingExpansion>;
|
||||
using ExternalTextureBindingExpansionMap = std::map<BindingNumber, ExternalTextureBindingExpansion>;
|
||||
|
||||
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
|
@ -161,8 +160,7 @@ namespace dawn::native {
|
|||
ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
|
||||
|
||||
// Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
|
||||
const PipelineCompatibilityToken mPipelineCompatibilityToken =
|
||||
PipelineCompatibilityToken(0);
|
||||
const PipelineCompatibilityToken mPipelineCompatibilityToken = PipelineCompatibilityToken(0);
|
||||
|
||||
uint32_t mUnexpandedBindingCount;
|
||||
};
|
||||
|
|
|
@ -58,9 +58,7 @@ namespace dawn::native {
|
|||
SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
|
||||
}
|
||||
|
||||
void OnSetPipeline(PipelineBase* pipeline) {
|
||||
mPipelineLayout = pipeline->GetLayout();
|
||||
}
|
||||
void OnSetPipeline(PipelineBase* pipeline) { mPipelineLayout = pipeline->GetLayout(); }
|
||||
|
||||
protected:
|
||||
// The Derived class should call this before it applies bind groups.
|
||||
|
|
|
@ -84,12 +84,10 @@ namespace dawn::native {
|
|||
bindingCounts->perStage[stage].sampledTextureCount +=
|
||||
rhs.perStage[stage].sampledTextureCount;
|
||||
bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
|
||||
bindingCounts->perStage[stage].storageBufferCount +=
|
||||
rhs.perStage[stage].storageBufferCount;
|
||||
bindingCounts->perStage[stage].storageBufferCount += rhs.perStage[stage].storageBufferCount;
|
||||
bindingCounts->perStage[stage].storageTextureCount +=
|
||||
rhs.perStage[stage].storageTextureCount;
|
||||
bindingCounts->perStage[stage].uniformBufferCount +=
|
||||
rhs.perStage[stage].uniformBufferCount;
|
||||
bindingCounts->perStage[stage].uniformBufferCount += rhs.perStage[stage].uniformBufferCount;
|
||||
bindingCounts->perStage[stage].externalTextureCount +=
|
||||
rhs.perStage[stage].externalTextureCount;
|
||||
}
|
||||
|
@ -110,8 +108,7 @@ namespace dawn::native {
|
|||
|
||||
for (SingleShaderStage stage : IterateStages(kAllStages)) {
|
||||
DAWN_INVALID_IF(
|
||||
bindingCounts.perStage[stage].sampledTextureCount >
|
||||
kMaxSampledTexturesPerShaderStage,
|
||||
bindingCounts.perStage[stage].sampledTextureCount > kMaxSampledTexturesPerShaderStage,
|
||||
"The number of sampled textures (%u) in the %s stage exceeds the maximum "
|
||||
"per-stage limit (%u).",
|
||||
bindingCounts.perStage[stage].sampledTextureCount, stage,
|
||||
|
@ -119,8 +116,7 @@ namespace dawn::native {
|
|||
|
||||
// The per-stage number of external textures is bound by the maximum sampled textures
|
||||
// per stage.
|
||||
DAWN_INVALID_IF(
|
||||
bindingCounts.perStage[stage].externalTextureCount >
|
||||
DAWN_INVALID_IF(bindingCounts.perStage[stage].externalTextureCount >
|
||||
kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
|
||||
"The number of external textures (%u) in the %s stage exceeds the maximum "
|
||||
"per-stage limit (%u).",
|
||||
|
@ -152,8 +148,7 @@ namespace dawn::native {
|
|||
"The combination of samplers (%u) and external textures (%u) in the %s stage "
|
||||
"exceeds the maximum per-stage limit (%u).",
|
||||
bindingCounts.perStage[stage].samplerCount,
|
||||
bindingCounts.perStage[stage].externalTextureCount, stage,
|
||||
kMaxSamplersPerShaderStage);
|
||||
bindingCounts.perStage[stage].externalTextureCount, stage, kMaxSamplersPerShaderStage);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
|
||||
|
@ -163,8 +158,7 @@ namespace dawn::native {
|
|||
kMaxStorageBuffersPerShaderStage);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
bindingCounts.perStage[stage].storageTextureCount >
|
||||
kMaxStorageTexturesPerShaderStage,
|
||||
bindingCounts.perStage[stage].storageTextureCount > kMaxStorageTexturesPerShaderStage,
|
||||
"The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
|
||||
"limit (%u).",
|
||||
bindingCounts.perStage[stage].storageTextureCount, stage,
|
||||
|
|
|
@ -49,8 +49,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
BlobCache::BlobCache(dawn::platform::CachingInterface* cachingInterface)
|
||||
: mCache(cachingInterface) {
|
||||
}
|
||||
: mCache(cachingInterface) {}
|
||||
|
||||
CachedBlob BlobCache::Load(const CacheKey& key) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
|
|
|
@ -83,8 +83,8 @@ namespace dawn::native {
|
|||
// Allocation offset is always local to the memory.
|
||||
const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
|
||||
|
||||
return ResourceMemoryAllocation{
|
||||
info, memoryOffset, mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
|
||||
return ResourceMemoryAllocation{info, memoryOffset,
|
||||
mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
|
||||
}
|
||||
|
||||
void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
|
||||
|
|
|
@ -44,8 +44,7 @@ namespace dawn::native {
|
|||
ResourceHeapAllocator* heapAllocator);
|
||||
~BuddyMemoryAllocator() = default;
|
||||
|
||||
ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize,
|
||||
uint64_t alignment);
|
||||
ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize, uint64_t alignment);
|
||||
void Deallocate(const ResourceMemoryAllocation& allocation);
|
||||
|
||||
uint64_t GetMemoryBlockSize() const;
|
||||
|
|
|
@ -35,9 +35,7 @@ namespace dawn::native {
|
|||
|
||||
namespace {
|
||||
struct MapRequestTask : QueueBase::TaskInFlight {
|
||||
MapRequestTask(Ref<BufferBase> buffer, MapRequestID id)
|
||||
: buffer(std::move(buffer)), id(id) {
|
||||
}
|
||||
MapRequestTask(Ref<BufferBase> buffer, MapRequestID id) : buffer(std::move(buffer)), id(id) {}
|
||||
void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
|
||||
TRACE_EVENT1(platform, General, "Buffer::TaskInFlight::Finished", "serial",
|
||||
uint64_t(serial));
|
||||
|
@ -61,8 +59,7 @@ namespace dawn::native {
|
|||
// Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
|
||||
// is invalid, and on 32bit systems we should avoid a narrowing conversion that
|
||||
// would make size = 1 << 32 + 1 allocate one byte.
|
||||
bool isValidSize =
|
||||
descriptor->size != 0 &&
|
||||
bool isValidSize = descriptor->size != 0 &&
|
||||
descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
|
||||
|
||||
if (isValidSize) {
|
||||
|
@ -76,25 +73,17 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
private:
|
||||
bool IsCPUWritableAtCreation() const override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
bool IsCPUWritableAtCreation() const override { UNREACHABLE(); }
|
||||
|
||||
MaybeError MapAtCreationImpl() override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
MaybeError MapAtCreationImpl() override { UNREACHABLE(); }
|
||||
|
||||
MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void* GetMappedPointerImpl() override {
|
||||
return mFakeMappedData.get();
|
||||
}
|
||||
void* GetMappedPointerImpl() override { return mFakeMappedData.get(); }
|
||||
|
||||
void UnmapImpl() override {
|
||||
mFakeMappedData.reset();
|
||||
}
|
||||
void UnmapImpl() override { mFakeMappedData.reset(); }
|
||||
|
||||
std::unique_ptr<uint8_t[]> mFakeMappedData;
|
||||
};
|
||||
|
@ -279,8 +268,7 @@ namespace dawn::native {
|
|||
// is initialized.
|
||||
// TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
|
||||
// buffer so we don't create many small buffers.
|
||||
DAWN_TRY_ASSIGN(mStagingBuffer,
|
||||
GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
|
||||
DAWN_TRY_ASSIGN(mStagingBuffer, GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -357,12 +345,10 @@ namespace dawn::native {
|
|||
CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
|
||||
return;
|
||||
}
|
||||
std::unique_ptr<MapRequestTask> request =
|
||||
std::make_unique<MapRequestTask>(this, mLastMapID);
|
||||
std::unique_ptr<MapRequestTask> request = std::make_unique<MapRequestTask>(this, mLastMapID);
|
||||
TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Buffer::APIMapAsync", "serial",
|
||||
uint64_t(GetDevice()->GetPendingCommandSerial()));
|
||||
GetDevice()->GetQueue()->TrackTask(std::move(request),
|
||||
GetDevice()->GetPendingCommandSerial());
|
||||
GetDevice()->GetQueue()->TrackTask(std::move(request), GetDevice()->GetPendingCommandSerial());
|
||||
}
|
||||
|
||||
void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
|
||||
|
@ -400,8 +386,8 @@ namespace dawn::native {
|
|||
return {};
|
||||
}
|
||||
|
||||
DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0,
|
||||
GetAllocatedSize()));
|
||||
DAWN_TRY(
|
||||
GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0, GetAllocatedSize()));
|
||||
|
||||
DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
|
||||
uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
|
||||
|
@ -453,8 +439,7 @@ namespace dawn::native {
|
|||
DAWN_TRY(GetDevice()->ValidateObject(this));
|
||||
|
||||
DAWN_INVALID_IF(uint64_t(offset) > mSize,
|
||||
"Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize,
|
||||
this);
|
||||
"Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize, this);
|
||||
|
||||
DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
|
||||
DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
|
||||
|
@ -523,8 +508,7 @@ namespace dawn::native {
|
|||
return true;
|
||||
|
||||
case BufferState::Mapped:
|
||||
ASSERT(bool{mMapMode & wgpu::MapMode::Read} ^
|
||||
bool{mMapMode & wgpu::MapMode::Write});
|
||||
ASSERT(bool{mMapMode & wgpu::MapMode::Read} ^ bool{mMapMode & wgpu::MapMode::Write});
|
||||
return !writable || (mMapMode & wgpu::MapMode::Write);
|
||||
|
||||
case BufferState::Unmapped:
|
||||
|
@ -556,8 +540,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
bool BufferBase::NeedsInitialization() const {
|
||||
return !mIsDataInitialized &&
|
||||
GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
|
||||
return !mIsDataInitialized && GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
|
||||
}
|
||||
|
||||
bool BufferBase::IsDataInitialized() const {
|
||||
|
|
|
@ -88,9 +88,7 @@ namespace dawn::native {
|
|||
void APIDestroy();
|
||||
|
||||
protected:
|
||||
BufferBase(DeviceBase* device,
|
||||
const BufferDescriptor* descriptor,
|
||||
ObjectBase::ErrorTag tag);
|
||||
BufferBase(DeviceBase* device, const BufferDescriptor* descriptor, ObjectBase::ErrorTag tag);
|
||||
|
||||
// Constructor used only for mocking and testing.
|
||||
BufferBase(DeviceBase* device, BufferState state);
|
||||
|
|
|
@ -103,9 +103,7 @@ namespace dawn::native {
|
|||
template <size_t N>
|
||||
class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> {
|
||||
public:
|
||||
static void Serialize(CacheKey* key, const std::bitset<N>& t) {
|
||||
key->Record(t.to_ullong());
|
||||
}
|
||||
static void Serialize(CacheKey* key, const std::bitset<N>& t) { key->Record(t.to_ullong()); }
|
||||
};
|
||||
|
||||
// Specialized overload for bitsets since using the built-in to_ullong have a size limit.
|
||||
|
@ -196,9 +194,7 @@ namespace dawn::native {
|
|||
template <typename T>
|
||||
class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> {
|
||||
public:
|
||||
static void Serialize(CacheKey* key, const T& t) {
|
||||
key->Record(t.GetCacheKey());
|
||||
}
|
||||
static void Serialize(CacheKey* key, const T& t) { key->Record(t.GetCacheKey()); }
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -52,8 +52,7 @@ namespace dawn::native {
|
|||
return *this;
|
||||
}
|
||||
|
||||
CommandIterator::CommandIterator(CommandAllocator allocator)
|
||||
: mBlocks(allocator.AcquireBlocks()) {
|
||||
CommandIterator::CommandIterator(CommandAllocator allocator) : mBlocks(allocator.AcquireBlocks()) {
|
||||
Reset();
|
||||
}
|
||||
|
||||
|
@ -206,8 +205,7 @@ namespace dawn::native {
|
|||
|
||||
bool CommandAllocator::GetNewBlock(size_t minimumSize) {
|
||||
// Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
|
||||
mLastAllocationSize =
|
||||
std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
|
||||
mLastAllocationSize = std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
|
||||
|
||||
uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
|
||||
if (DAWN_UNLIKELY(block == nullptr)) {
|
||||
|
|
|
@ -166,8 +166,8 @@ namespace dawn::native {
|
|||
static_assert(sizeof(E) == sizeof(uint32_t));
|
||||
static_assert(alignof(E) == alignof(uint32_t));
|
||||
static_assert(alignof(T) <= kMaxSupportedAlignment);
|
||||
T* result = reinterpret_cast<T*>(
|
||||
Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
|
||||
T* result =
|
||||
reinterpret_cast<T*>(Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
|
||||
if (!result) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -242,9 +242,7 @@ namespace dawn::native {
|
|||
return AllocateInNewBlock(commandId, commandSize, commandAlignment);
|
||||
}
|
||||
|
||||
uint8_t* AllocateInNewBlock(uint32_t commandId,
|
||||
size_t commandSize,
|
||||
size_t commandAlignment);
|
||||
uint8_t* AllocateInNewBlock(uint32_t commandId, size_t commandSize, size_t commandAlignment);
|
||||
|
||||
DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
|
||||
return Allocate(detail::kAdditionalData, commandSize, commandAlignment);
|
||||
|
|
|
@ -39,8 +39,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag) {
|
||||
}
|
||||
: ApiObjectBase(device, tag) {}
|
||||
|
||||
// static
|
||||
CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
|
||||
|
@ -89,16 +88,14 @@ namespace dawn::native {
|
|||
UNREACHABLE();
|
||||
}
|
||||
|
||||
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
|
||||
const Extent3D& copySize) {
|
||||
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, const Extent3D& copySize) {
|
||||
switch (copy.texture->GetDimension()) {
|
||||
case wgpu::TextureDimension::e1D:
|
||||
ASSERT(copy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
|
||||
ASSERT(copy.mipLevel == 0);
|
||||
return {copy.aspect, {0, 1}, {0, 1}};
|
||||
case wgpu::TextureDimension::e2D:
|
||||
return {
|
||||
copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
|
||||
return {copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
|
||||
case wgpu::TextureDimension::e3D:
|
||||
return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
|
||||
}
|
||||
|
@ -194,8 +191,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
const TextureBase* texture = copy->source.texture.Get();
|
||||
const TexelBlockInfo& blockInfo =
|
||||
texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
|
||||
const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
|
||||
const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width;
|
||||
const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height;
|
||||
const bool multiSlice = copy->copySize.depthOrArrayLayers > 1;
|
||||
|
@ -234,8 +230,8 @@ namespace dawn::native {
|
|||
}
|
||||
std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) {
|
||||
const std::array<int32_t, 4> outputValue = {
|
||||
static_cast<int32_t>(color.r), static_cast<int32_t>(color.g),
|
||||
static_cast<int32_t>(color.b), static_cast<int32_t>(color.a)};
|
||||
static_cast<int32_t>(color.r), static_cast<int32_t>(color.g), static_cast<int32_t>(color.b),
|
||||
static_cast<int32_t>(color.a)};
|
||||
return outputValue;
|
||||
}
|
||||
|
||||
|
|
|
@ -60,8 +60,7 @@ namespace dawn::native {
|
|||
bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
|
||||
const Extent3D copySize,
|
||||
const uint32_t mipLevel);
|
||||
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
|
||||
const Extent3D& copySize);
|
||||
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, const Extent3D& copySize);
|
||||
|
||||
void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
|
||||
|
||||
|
|
|
@ -82,18 +82,15 @@ namespace dawn::native {
|
|||
return ValidateOperation(kDrawIndexedAspects);
|
||||
}
|
||||
|
||||
MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(
|
||||
uint32_t vertexCount,
|
||||
MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount,
|
||||
uint32_t firstVertex) {
|
||||
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
|
||||
|
||||
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
|
||||
vertexBufferSlotsUsedAsVertexBuffer =
|
||||
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& vertexBufferSlotsUsedAsVertexBuffer =
|
||||
lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer();
|
||||
|
||||
for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
|
||||
const VertexBufferInfo& vertexBuffer =
|
||||
lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
|
||||
const VertexBufferInfo& vertexBuffer = lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
|
||||
uint64_t arrayStride = vertexBuffer.arrayStride;
|
||||
uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
|
||||
|
||||
|
@ -106,8 +103,7 @@ namespace dawn::native {
|
|||
} else {
|
||||
uint64_t strideCount = static_cast<uint64_t>(firstVertex) + vertexCount;
|
||||
if (strideCount != 0u) {
|
||||
uint64_t requiredSize =
|
||||
(strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
|
||||
uint64_t requiredSize = (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
|
||||
// firstVertex and vertexCount are in uint32_t,
|
||||
// arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
|
||||
// currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
|
||||
|
@ -133,8 +129,7 @@ namespace dawn::native {
|
|||
uint32_t firstInstance) {
|
||||
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
|
||||
|
||||
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
|
||||
vertexBufferSlotsUsedAsInstanceBuffer =
|
||||
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& vertexBufferSlotsUsedAsInstanceBuffer =
|
||||
lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
|
||||
|
||||
for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
|
||||
|
@ -151,8 +146,7 @@ namespace dawn::native {
|
|||
} else {
|
||||
uint64_t strideCount = static_cast<uint64_t>(firstInstance) + instanceCount;
|
||||
if (strideCount != 0u) {
|
||||
uint64_t requiredSize =
|
||||
(strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
|
||||
uint64_t requiredSize = (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
|
||||
// firstInstance and instanceCount are in uint32_t,
|
||||
// arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
|
||||
// currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
|
||||
|
@ -310,8 +304,7 @@ namespace dawn::native {
|
|||
|
||||
DAWN_INVALID_IF(
|
||||
requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
|
||||
currentBGL->GetPipelineCompatibilityToken() !=
|
||||
PipelineCompatibilityToken(0),
|
||||
currentBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0),
|
||||
"%s at index %u uses a %s which was created as part of the default layout for "
|
||||
"a different pipeline than the current one (%s), and as a result is not "
|
||||
"compatible. Use an explicit bind group layout when creating bind groups and "
|
||||
|
|
|
@ -48,15 +48,12 @@ namespace dawn::native {
|
|||
!std::isnan(attachment.clearColor.b) || !std::isnan(attachment.clearColor.a);
|
||||
}
|
||||
|
||||
MaybeError ValidateB2BCopyAlignment(uint64_t dataSize,
|
||||
uint64_t srcOffset,
|
||||
uint64_t dstOffset) {
|
||||
MaybeError ValidateB2BCopyAlignment(uint64_t dataSize, uint64_t srcOffset, uint64_t dstOffset) {
|
||||
// Copy size must be a multiple of 4 bytes on macOS.
|
||||
DAWN_INVALID_IF(dataSize % 4 != 0, "Copy size (%u) is not a multiple of 4.", dataSize);
|
||||
|
||||
// SourceOffset and destinationOffset must be multiples of 4 bytes on macOS.
|
||||
DAWN_INVALID_IF(
|
||||
srcOffset % 4 != 0 || dstOffset % 4 != 0,
|
||||
DAWN_INVALID_IF(srcOffset % 4 != 0 || dstOffset % 4 != 0,
|
||||
"Source offset (%u) or destination offset (%u) is not a multiple of 4 bytes,",
|
||||
srcOffset, dstOffset);
|
||||
|
||||
|
@ -65,8 +62,8 @@ namespace dawn::native {
|
|||
|
||||
MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) {
|
||||
DAWN_INVALID_IF(texture->GetSampleCount() > 1,
|
||||
"%s sample count (%u) is not 1 when copying to or from a buffer.",
|
||||
texture, texture->GetSampleCount());
|
||||
"%s sample count (%u) is not 1 when copying to or from a buffer.", texture,
|
||||
texture->GetSampleCount());
|
||||
|
||||
return {};
|
||||
}
|
||||
|
@ -87,8 +84,7 @@ namespace dawn::native {
|
|||
return {};
|
||||
}
|
||||
|
||||
MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(
|
||||
const ImageCopyTexture& src) {
|
||||
MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(const ImageCopyTexture& src) {
|
||||
Aspect aspectUsed;
|
||||
DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(src));
|
||||
if (aspectUsed == Aspect::Depth) {
|
||||
|
@ -138,8 +134,7 @@ namespace dawn::native {
|
|||
*height = attachmentSize.height;
|
||||
DAWN_ASSERT(*width != 0 && *height != 0);
|
||||
} else {
|
||||
DAWN_INVALID_IF(
|
||||
*width != attachmentSize.width || *height != attachmentSize.height,
|
||||
DAWN_INVALID_IF(*width != attachmentSize.width || *height != attachmentSize.height,
|
||||
"Attachment %s size (width: %u, height: %u) does not match the size of the "
|
||||
"other attachments (width: %u, height: %u).",
|
||||
attachment, attachmentSize.width, attachmentSize.height, *width, *height);
|
||||
|
@ -177,8 +172,7 @@ namespace dawn::native {
|
|||
DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(),
|
||||
wgpu::TextureUsage::RenderAttachment, usageValidationMode));
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
!attachment->GetTexture()->IsMultisampledTexture(),
|
||||
DAWN_INVALID_IF(!attachment->GetTexture()->IsMultisampledTexture(),
|
||||
"Cannot set %s as a resolve target when the color attachment %s has a sample "
|
||||
"count of 1.",
|
||||
resolveTarget, attachment);
|
||||
|
@ -198,10 +192,8 @@ namespace dawn::native {
|
|||
const Extent3D& colorTextureSize =
|
||||
attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
|
||||
const Extent3D& resolveTextureSize =
|
||||
resolveTarget->GetTexture()->GetMipLevelVirtualSize(
|
||||
resolveTarget->GetBaseMipLevel());
|
||||
DAWN_INVALID_IF(
|
||||
colorTextureSize.width != resolveTextureSize.width ||
|
||||
resolveTarget->GetTexture()->GetMipLevelVirtualSize(resolveTarget->GetBaseMipLevel());
|
||||
DAWN_INVALID_IF(colorTextureSize.width != resolveTextureSize.width ||
|
||||
colorTextureSize.height != resolveTextureSize.height,
|
||||
"The Resolve target %s size (width: %u, height: %u) does not match the color "
|
||||
"attachment %s size (width: %u, height: %u).",
|
||||
|
@ -222,8 +214,7 @@ namespace dawn::native {
|
|||
return {};
|
||||
}
|
||||
|
||||
MaybeError ValidateRenderPassColorAttachment(
|
||||
DeviceBase* device,
|
||||
MaybeError ValidateRenderPassColorAttachment(DeviceBase* device,
|
||||
const RenderPassColorAttachment& colorAttachment,
|
||||
uint32_t* width,
|
||||
uint32_t* height,
|
||||
|
@ -234,20 +225,18 @@ namespace dawn::native {
|
|||
return {};
|
||||
}
|
||||
DAWN_TRY(device->ValidateObject(attachment));
|
||||
DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(),
|
||||
wgpu::TextureUsage::RenderAttachment, usageValidationMode));
|
||||
DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment,
|
||||
usageValidationMode));
|
||||
|
||||
DAWN_INVALID_IF(!(attachment->GetAspects() & Aspect::Color) ||
|
||||
!attachment->GetFormat().isRenderable,
|
||||
"The color attachment %s format (%s) is not color renderable.",
|
||||
attachment, attachment->GetFormat().format);
|
||||
DAWN_INVALID_IF(
|
||||
!(attachment->GetAspects() & Aspect::Color) || !attachment->GetFormat().isRenderable,
|
||||
"The color attachment %s format (%s) is not color renderable.", attachment,
|
||||
attachment->GetFormat().format);
|
||||
|
||||
DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp));
|
||||
DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp));
|
||||
DAWN_INVALID_IF(colorAttachment.loadOp == wgpu::LoadOp::Undefined,
|
||||
"loadOp must be set.");
|
||||
DAWN_INVALID_IF(colorAttachment.storeOp == wgpu::StoreOp::Undefined,
|
||||
"storeOp must be set.");
|
||||
DAWN_INVALID_IF(colorAttachment.loadOp == wgpu::LoadOp::Undefined, "loadOp must be set.");
|
||||
DAWN_INVALID_IF(colorAttachment.storeOp == wgpu::StoreOp::Undefined, "storeOp must be set.");
|
||||
|
||||
// TODO(dawn:1269): Remove after the deprecation period.
|
||||
bool useClearColor = HasDeprecatedColor(colorAttachment);
|
||||
|
@ -285,27 +274,24 @@ namespace dawn::native {
|
|||
|
||||
TextureViewBase* attachment = depthStencilAttachment->view;
|
||||
DAWN_TRY(device->ValidateObject(attachment));
|
||||
DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(),
|
||||
wgpu::TextureUsage::RenderAttachment, usageValidationMode));
|
||||
DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment,
|
||||
usageValidationMode));
|
||||
|
||||
const Format& format = attachment->GetFormat();
|
||||
DAWN_INVALID_IF(
|
||||
!format.HasDepthOrStencil(),
|
||||
DAWN_INVALID_IF(!format.HasDepthOrStencil(),
|
||||
"The depth stencil attachment %s format (%s) is not a depth stencil format.",
|
||||
attachment, format.format);
|
||||
|
||||
DAWN_INVALID_IF(!format.isRenderable,
|
||||
"The depth stencil attachment %s format (%s) is not renderable.",
|
||||
attachment, format.format);
|
||||
"The depth stencil attachment %s format (%s) is not renderable.", attachment,
|
||||
format.format);
|
||||
|
||||
DAWN_INVALID_IF(attachment->GetAspects() != format.aspects,
|
||||
"The depth stencil attachment %s must encompass all aspects.",
|
||||
attachment);
|
||||
"The depth stencil attachment %s must encompass all aspects.", attachment);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) &&
|
||||
depthStencilAttachment->depthReadOnly !=
|
||||
depthStencilAttachment->stencilReadOnly,
|
||||
depthStencilAttachment->depthReadOnly != depthStencilAttachment->stencilReadOnly,
|
||||
"depthReadOnly (%u) and stencilReadOnly (%u) must be the same when texture aspect "
|
||||
"is 'all'.",
|
||||
depthStencilAttachment->depthReadOnly, depthStencilAttachment->stencilReadOnly);
|
||||
|
@ -326,8 +312,7 @@ namespace dawn::native {
|
|||
"no depth aspect or depthReadOnly (%u) is true.",
|
||||
depthStencilAttachment->depthLoadOp, attachment,
|
||||
depthStencilAttachment->depthReadOnly);
|
||||
DAWN_INVALID_IF(
|
||||
depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Undefined,
|
||||
DAWN_INVALID_IF(depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Undefined,
|
||||
"depthStoreOp (%s) must not be set if the attachment (%s) has no depth "
|
||||
"aspect or depthReadOnly (%u) is true.",
|
||||
depthStencilAttachment->depthStoreOp, attachment,
|
||||
|
@ -372,15 +357,13 @@ namespace dawn::native {
|
|||
}
|
||||
} else {
|
||||
DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
|
||||
DAWN_INVALID_IF(
|
||||
depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Undefined,
|
||||
DAWN_INVALID_IF(depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Undefined,
|
||||
"stencilLoadOp (%s) must be set if the attachment (%s) has a stencil "
|
||||
"aspect and stencilReadOnly (%u) is false.",
|
||||
depthStencilAttachment->stencilLoadOp, attachment,
|
||||
depthStencilAttachment->stencilReadOnly);
|
||||
DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
|
||||
DAWN_INVALID_IF(
|
||||
depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Undefined,
|
||||
DAWN_INVALID_IF(depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Undefined,
|
||||
"stencilStoreOp (%s) must be set if the attachment (%s) has a stencil "
|
||||
"aspect and stencilReadOnly (%u) is false.",
|
||||
depthStencilAttachment->stencilStoreOp, attachment,
|
||||
|
@ -389,8 +372,7 @@ namespace dawn::native {
|
|||
|
||||
if (!std::isnan(depthStencilAttachment->clearDepth)) {
|
||||
// TODO(dawn:1269): Remove this branch after the deprecation period.
|
||||
device->EmitDeprecationWarning(
|
||||
"clearDepth is deprecated, prefer depthClearValue instead.");
|
||||
device->EmitDeprecationWarning("clearDepth is deprecated, prefer depthClearValue instead.");
|
||||
} else {
|
||||
DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
|
||||
std::isnan(depthStencilAttachment->depthClearValue),
|
||||
|
@ -436,9 +418,9 @@ namespace dawn::native {
|
|||
|
||||
bool isAllColorAttachmentNull = true;
|
||||
for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) {
|
||||
DAWN_TRY_CONTEXT(ValidateRenderPassColorAttachment(
|
||||
device, descriptor->colorAttachments[i], width, height,
|
||||
sampleCount, usageValidationMode),
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateRenderPassColorAttachment(device, descriptor->colorAttachments[i], width,
|
||||
height, sampleCount, usageValidationMode),
|
||||
"validating colorAttachments[%u].", i);
|
||||
if (descriptor->colorAttachments[i].view) {
|
||||
isAllColorAttachmentNull = false;
|
||||
|
@ -447,8 +429,8 @@ namespace dawn::native {
|
|||
|
||||
if (descriptor->depthStencilAttachment != nullptr) {
|
||||
DAWN_TRY_CONTEXT(ValidateRenderPassDepthStencilAttachment(
|
||||
device, descriptor->depthStencilAttachment, width, height,
|
||||
sampleCount, usageValidationMode),
|
||||
device, descriptor->depthStencilAttachment, width, height, sampleCount,
|
||||
usageValidationMode),
|
||||
"validating depthStencilAttachment.");
|
||||
} else {
|
||||
DAWN_INVALID_IF(
|
||||
|
@ -459,9 +441,9 @@ namespace dawn::native {
|
|||
if (descriptor->occlusionQuerySet != nullptr) {
|
||||
DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet));
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
|
||||
"The occlusionQuerySet %s type (%s) is not %s.", descriptor->occlusionQuerySet,
|
||||
DAWN_INVALID_IF(descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
|
||||
"The occlusionQuerySet %s type (%s) is not %s.",
|
||||
descriptor->occlusionQuerySet,
|
||||
descriptor->occlusionQuerySet->GetQueryType(), wgpu::QueryType::Occlusion);
|
||||
}
|
||||
|
||||
|
@ -479,10 +461,9 @@ namespace dawn::native {
|
|||
DAWN_ASSERT(querySet != nullptr);
|
||||
uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
|
||||
DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, querySet, queryIndex),
|
||||
"validating querySet and queryIndex of timestampWrites[%u].",
|
||||
i);
|
||||
DAWN_TRY_CONTEXT(ValidateRenderPassTimestampLocation(
|
||||
descriptor->timestampWrites[i].location),
|
||||
"validating querySet and queryIndex of timestampWrites[%u].", i);
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateRenderPassTimestampLocation(descriptor->timestampWrites[i].location),
|
||||
"validating location of timestampWrites[%u].", i);
|
||||
|
||||
auto checkIt = usedQueries.find(querySet);
|
||||
|
@ -497,8 +478,8 @@ namespace dawn::native {
|
|||
}
|
||||
}
|
||||
|
||||
DAWN_INVALID_IF(descriptor->colorAttachmentCount == 0 &&
|
||||
descriptor->depthStencilAttachment == nullptr,
|
||||
DAWN_INVALID_IF(
|
||||
descriptor->colorAttachmentCount == 0 && descriptor->depthStencilAttachment == nullptr,
|
||||
"Render pass has no attachments.");
|
||||
|
||||
return {};
|
||||
|
@ -515,12 +496,11 @@ namespace dawn::native {
|
|||
|
||||
for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
|
||||
DAWN_ASSERT(descriptor->timestampWrites[i].querySet != nullptr);
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateTimestampQuery(device, descriptor->timestampWrites[i].querySet,
|
||||
DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, descriptor->timestampWrites[i].querySet,
|
||||
descriptor->timestampWrites[i].queryIndex),
|
||||
"validating querySet and queryIndex of timestampWrites[%u].", i);
|
||||
DAWN_TRY_CONTEXT(ValidateComputePassTimestampLocation(
|
||||
descriptor->timestampWrites[i].location),
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateComputePassTimestampLocation(descriptor->timestampWrites[i].location),
|
||||
"validating location of timestampWrites[%u].", i);
|
||||
}
|
||||
}
|
||||
|
@ -534,8 +514,8 @@ namespace dawn::native {
|
|||
const BufferBase* destination,
|
||||
uint64_t destinationOffset) {
|
||||
DAWN_INVALID_IF(firstQuery >= querySet->GetQueryCount(),
|
||||
"First query (%u) exceeds the number of queries (%u) in %s.",
|
||||
firstQuery, querySet->GetQueryCount(), querySet);
|
||||
"First query (%u) exceeds the number of queries (%u) in %s.", firstQuery,
|
||||
querySet->GetQueryCount(), querySet);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
queryCount > querySet->GetQueryCount() - firstQuery,
|
||||
|
@ -544,20 +524,20 @@ namespace dawn::native {
|
|||
firstQuery, queryCount, querySet->GetQueryCount(), querySet);
|
||||
|
||||
DAWN_INVALID_IF(destinationOffset % 256 != 0,
|
||||
"The destination buffer %s offset (%u) is not a multiple of 256.",
|
||||
destination, destinationOffset);
|
||||
"The destination buffer %s offset (%u) is not a multiple of 256.", destination,
|
||||
destinationOffset);
|
||||
|
||||
uint64_t bufferSize = destination->GetSize();
|
||||
// The destination buffer must have enough storage, from destination offset, to contain
|
||||
// the result of resolved queries
|
||||
bool fitsInBuffer = destinationOffset <= bufferSize &&
|
||||
(static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <=
|
||||
(bufferSize - destinationOffset));
|
||||
bool fitsInBuffer =
|
||||
destinationOffset <= bufferSize &&
|
||||
(static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <= (bufferSize - destinationOffset));
|
||||
DAWN_INVALID_IF(
|
||||
!fitsInBuffer,
|
||||
"The resolved %s data size (%u) would not fit in %s with size %u at the offset %u.",
|
||||
querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination,
|
||||
bufferSize, destinationOffset);
|
||||
querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination, bufferSize,
|
||||
destinationOffset);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
@ -583,8 +563,7 @@ namespace dawn::native {
|
|||
Ref<BufferBase> availabilityBuffer;
|
||||
DAWN_TRY_ASSIGN(availabilityBuffer, device->CreateBuffer(&availabilityDesc));
|
||||
|
||||
DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0,
|
||||
availability.data(),
|
||||
DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0, availability.data(),
|
||||
availability.size() * sizeof(uint32_t)));
|
||||
|
||||
// Timestamp params uniform buffer
|
||||
|
@ -597,11 +576,10 @@ namespace dawn::native {
|
|||
Ref<BufferBase> paramsBuffer;
|
||||
DAWN_TRY_ASSIGN(paramsBuffer, device->CreateBuffer(&parmsDesc));
|
||||
|
||||
DAWN_TRY(
|
||||
device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, ¶ms, sizeof(params)));
|
||||
DAWN_TRY(device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, ¶ms, sizeof(params)));
|
||||
|
||||
return EncodeConvertTimestampsToNanoseconds(
|
||||
encoder, destination, availabilityBuffer.Get(), paramsBuffer.Get());
|
||||
return EncodeConvertTimestampsToNanoseconds(encoder, destination, availabilityBuffer.Get(),
|
||||
paramsBuffer.Get());
|
||||
}
|
||||
|
||||
bool IsReadOnlyDepthStencilAttachment(
|
||||
|
@ -702,13 +680,11 @@ namespace dawn::native {
|
|||
|
||||
// Implementation of the API's command recording methods
|
||||
|
||||
ComputePassEncoder* CommandEncoder::APIBeginComputePass(
|
||||
const ComputePassDescriptor* descriptor) {
|
||||
ComputePassEncoder* CommandEncoder::APIBeginComputePass(const ComputePassDescriptor* descriptor) {
|
||||
return BeginComputePass(descriptor).Detach();
|
||||
}
|
||||
|
||||
Ref<ComputePassEncoder> CommandEncoder::BeginComputePass(
|
||||
const ComputePassDescriptor* descriptor) {
|
||||
Ref<ComputePassEncoder> CommandEncoder::BeginComputePass(const ComputePassDescriptor* descriptor) {
|
||||
DeviceBase* device = GetDevice();
|
||||
|
||||
std::vector<TimestampWrite> timestampWritesAtBeginning;
|
||||
|
@ -786,8 +762,8 @@ namespace dawn::native {
|
|||
[&](CommandAllocator* allocator) -> MaybeError {
|
||||
uint32_t sampleCount = 0;
|
||||
|
||||
DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height,
|
||||
&sampleCount, mUsageValidationMode));
|
||||
DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height, &sampleCount,
|
||||
mUsageValidationMode));
|
||||
|
||||
ASSERT(width > 0 && height > 0 && sampleCount > 0);
|
||||
|
||||
|
@ -947,8 +923,7 @@ namespace dawn::native {
|
|||
|
||||
DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(source, sourceOffset, size),
|
||||
"validating source %s copy size.", source);
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
|
||||
DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
|
||||
"validating destination %s copy size.", destination);
|
||||
DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
|
||||
|
||||
|
@ -987,8 +962,7 @@ namespace dawn::native {
|
|||
"validating source %s usage.", source->buffer);
|
||||
|
||||
DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
|
||||
DAWN_TRY_CONTEXT(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
|
||||
mUsageValidationMode),
|
||||
"validating destination %s usage.", destination->texture);
|
||||
DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
|
||||
|
@ -1031,8 +1005,8 @@ namespace dawn::native {
|
|||
|
||||
return {};
|
||||
},
|
||||
"encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer,
|
||||
destination->texture, copySize);
|
||||
"encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer, destination->texture,
|
||||
copySize);
|
||||
}
|
||||
|
||||
void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
|
||||
|
@ -1050,8 +1024,7 @@ namespace dawn::native {
|
|||
DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
|
||||
|
||||
DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
|
||||
DAWN_TRY_CONTEXT(ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
|
||||
"validating destination %s usage.", destination->buffer);
|
||||
|
||||
// We validate texture copy range before validating linear texture data,
|
||||
|
@ -1090,8 +1063,8 @@ namespace dawn::native {
|
|||
|
||||
return {};
|
||||
},
|
||||
"encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture,
|
||||
destination->buffer, copySize);
|
||||
"encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture, destination->buffer,
|
||||
copySize);
|
||||
}
|
||||
|
||||
void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
|
||||
|
@ -1176,8 +1149,8 @@ namespace dawn::native {
|
|||
|
||||
uint64_t bufferSize = buffer->GetSize();
|
||||
DAWN_INVALID_IF(offset > bufferSize,
|
||||
"Buffer offset (%u) is larger than the size (%u) of %s.",
|
||||
offset, bufferSize, buffer);
|
||||
"Buffer offset (%u) is larger than the size (%u) of %s.", offset,
|
||||
bufferSize, buffer);
|
||||
|
||||
uint64_t remainingSize = bufferSize - offset;
|
||||
if (size == wgpu::kWholeSize) {
|
||||
|
@ -1245,8 +1218,7 @@ namespace dawn::native {
|
|||
this,
|
||||
[&](CommandAllocator* allocator) -> MaybeError {
|
||||
if (GetDevice()->IsValidationEnabled()) {
|
||||
DAWN_INVALID_IF(
|
||||
mDebugGroupStackSize == 0,
|
||||
DAWN_INVALID_IF(mDebugGroupStackSize == 0,
|
||||
"PopDebugGroup called when no debug groups are currently pushed.");
|
||||
}
|
||||
allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
|
||||
|
@ -1315,8 +1287,8 @@ namespace dawn::native {
|
|||
|
||||
return {};
|
||||
},
|
||||
"encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery,
|
||||
queryCount, destination, destinationOffset);
|
||||
"encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery, queryCount,
|
||||
destination, destinationOffset);
|
||||
}
|
||||
|
||||
void CommandEncoder::APIWriteBuffer(BufferBase* buffer,
|
||||
|
|
|
@ -162,8 +162,8 @@ namespace dawn::native {
|
|||
uint64_t bufferSize = buffer->GetSize();
|
||||
bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
|
||||
DAWN_INVALID_IF(!fitsInBuffer,
|
||||
"Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset,
|
||||
size, buffer.Get(), bufferSize);
|
||||
"Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset, size,
|
||||
buffer.Get(), bufferSize);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
@ -201,8 +201,7 @@ namespace dawn::native {
|
|||
// TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
|
||||
// validation message. Investigate ways to make it print as a more readable symbol.
|
||||
DAWN_INVALID_IF(
|
||||
copyExtent.depthOrArrayLayers > 1 &&
|
||||
(layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
|
||||
copyExtent.depthOrArrayLayers > 1 && (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
|
||||
layout.rowsPerImage == wgpu::kCopyStrideUndefined),
|
||||
"Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
|
||||
copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
|
||||
|
@ -214,8 +213,7 @@ namespace dawn::native {
|
|||
// Validation for other members in layout:
|
||||
ASSERT(copyExtent.width % blockInfo.width == 0);
|
||||
uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
|
||||
ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <=
|
||||
std::numeric_limits<uint32_t>::max());
|
||||
ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <= std::numeric_limits<uint32_t>::max());
|
||||
uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
|
||||
|
||||
// These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks,
|
||||
|
@ -225,18 +223,18 @@ namespace dawn::native {
|
|||
"The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow,
|
||||
layout.bytesPerRow);
|
||||
|
||||
DAWN_INVALID_IF(layout.rowsPerImage != wgpu::kCopyStrideUndefined &&
|
||||
heightInBlocks > layout.rowsPerImage,
|
||||
"The height of each image in blocks (%u) is > rowsPerImage (%u).",
|
||||
heightInBlocks, layout.rowsPerImage);
|
||||
DAWN_INVALID_IF(
|
||||
layout.rowsPerImage != wgpu::kCopyStrideUndefined && heightInBlocks > layout.rowsPerImage,
|
||||
"The height of each image in blocks (%u) is > rowsPerImage (%u).", heightInBlocks,
|
||||
layout.rowsPerImage);
|
||||
|
||||
// We compute required bytes in copy after validating texel block alignments
|
||||
// because the divisibility conditions are necessary for the algorithm to be valid,
|
||||
// also the bytesPerRow bound is necessary to avoid overflows.
|
||||
uint64_t requiredBytesInCopy;
|
||||
DAWN_TRY_ASSIGN(requiredBytesInCopy,
|
||||
ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow,
|
||||
layout.rowsPerImage));
|
||||
DAWN_TRY_ASSIGN(
|
||||
requiredBytesInCopy,
|
||||
ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow, layout.rowsPerImage));
|
||||
|
||||
bool fitsInData =
|
||||
layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
|
||||
|
@ -272,8 +270,7 @@ namespace dawn::native {
|
|||
textureCopy.mipLevel, texture->GetNumMipLevels(), texture);
|
||||
|
||||
DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
|
||||
DAWN_INVALID_IF(
|
||||
SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
|
||||
DAWN_INVALID_IF(SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
|
||||
"%s format (%s) does not have the selected aspect (%s).", texture,
|
||||
texture->GetFormat().format, textureCopy.aspect);
|
||||
|
||||
|
@ -287,8 +284,8 @@ namespace dawn::native {
|
|||
"Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
|
||||
"[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
|
||||
"format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
|
||||
&textureCopy.origin, ©Size, &subresourceSize, texture,
|
||||
texture->GetFormat().format, texture->GetSampleCount());
|
||||
&textureCopy.origin, ©Size, &subresourceSize, texture, texture->GetFormat().format,
|
||||
texture->GetSampleCount());
|
||||
}
|
||||
|
||||
return {};
|
||||
|
@ -311,8 +308,7 @@ namespace dawn::native {
|
|||
DAWN_INVALID_IF(
|
||||
static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
|
||||
static_cast<uint64_t>(mipSize.width) ||
|
||||
static_cast<uint64_t>(textureCopy.origin.y) +
|
||||
static_cast<uint64_t>(copySize.height) >
|
||||
static_cast<uint64_t>(textureCopy.origin.y) + static_cast<uint64_t>(copySize.height) >
|
||||
static_cast<uint64_t>(mipSize.height) ||
|
||||
static_cast<uint64_t>(textureCopy.origin.z) +
|
||||
static_cast<uint64_t>(copySize.depthOrArrayLayers) >
|
||||
|
@ -340,8 +336,7 @@ namespace dawn::native {
|
|||
"copySize.width (%u) is not a multiple of compressed texture format block width "
|
||||
"(%u).",
|
||||
copySize.width, blockInfo.width);
|
||||
DAWN_INVALID_IF(
|
||||
copySize.height % blockInfo.height != 0,
|
||||
DAWN_INVALID_IF(copySize.height % blockInfo.height != 0,
|
||||
"copySize.height (%u) is not a multiple of compressed texture format block "
|
||||
"height (%u).",
|
||||
copySize.height, blockInfo.height);
|
||||
|
@ -388,8 +383,8 @@ namespace dawn::native {
|
|||
return {};
|
||||
default:
|
||||
DAWN_INVALID_IF(aspectUsed == Aspect::Depth,
|
||||
"Cannot copy into the depth aspect of %s with format %s.",
|
||||
dst.texture, format.format);
|
||||
"Cannot copy into the depth aspect of %s with format %s.", dst.texture,
|
||||
format.format);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -427,14 +422,13 @@ namespace dawn::native {
|
|||
return DAWN_FORMAT_VALIDATION_ERROR("Copy is from %s to itself.", src.texture);
|
||||
|
||||
case wgpu::TextureDimension::e2D:
|
||||
DAWN_INVALID_IF(src.mipLevel == dst.mipLevel &&
|
||||
IsRangeOverlapped(src.origin.z, dst.origin.z,
|
||||
copySize.depthOrArrayLayers),
|
||||
DAWN_INVALID_IF(
|
||||
src.mipLevel == dst.mipLevel &&
|
||||
IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depthOrArrayLayers),
|
||||
"Copy source and destination are overlapping layer ranges "
|
||||
"([%u, %u) and [%u, %u)) of %s mip level %u",
|
||||
src.origin.z, src.origin.z + copySize.depthOrArrayLayers,
|
||||
dst.origin.z, dst.origin.z + copySize.depthOrArrayLayers,
|
||||
src.texture, src.mipLevel);
|
||||
src.origin.z, src.origin.z + copySize.depthOrArrayLayers, dst.origin.z,
|
||||
dst.origin.z + copySize.depthOrArrayLayers, src.texture, src.mipLevel);
|
||||
break;
|
||||
|
||||
case wgpu::TextureDimension::e3D:
|
||||
|
@ -453,8 +447,7 @@ namespace dawn::native {
|
|||
const Extent3D& copySize) {
|
||||
// Metal requires texture-to-texture copies happens between texture formats that equal to
|
||||
// each other or only have diff on srgb-ness.
|
||||
DAWN_INVALID_IF(
|
||||
!src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
|
||||
DAWN_INVALID_IF(!src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
|
||||
"Source %s format (%s) and destination %s format (%s) are not copy compatible.",
|
||||
src.texture, src.texture->GetFormat().format, dst.texture,
|
||||
dst.texture->GetFormat().format);
|
||||
|
@ -483,9 +476,8 @@ namespace dawn::native {
|
|||
|
||||
MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
|
||||
ASSERT(wgpu::HasZeroOrOneBits(usage));
|
||||
DAWN_INVALID_IF(!(buffer->GetUsageExternalOnly() & usage),
|
||||
"%s usage (%s) doesn't include %s.", buffer, buffer->GetUsageExternalOnly(),
|
||||
usage);
|
||||
DAWN_INVALID_IF(!(buffer->GetUsageExternalOnly() & usage), "%s usage (%s) doesn't include %s.",
|
||||
buffer, buffer->GetUsageExternalOnly(), usage);
|
||||
return {};
|
||||
}
|
||||
|
||||
|
|
|
@ -62,8 +62,7 @@ namespace dawn::native {
|
|||
break;
|
||||
}
|
||||
case Command::CopyTextureToTexture: {
|
||||
CopyTextureToTextureCmd* copy =
|
||||
commands->NextCommand<CopyTextureToTextureCmd>();
|
||||
CopyTextureToTextureCmd* copy = commands->NextCommand<CopyTextureToTextureCmd>();
|
||||
copy->~CopyTextureToTextureCmd();
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -52,8 +52,8 @@ namespace dawn::native {
|
|||
ASSERT(mCompilationInfo.messages == nullptr);
|
||||
|
||||
mMessageStrings.push_back(message);
|
||||
mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type),
|
||||
lineNum, linePos, offset, length});
|
||||
mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type), lineNum,
|
||||
linePos, offset, length});
|
||||
}
|
||||
|
||||
void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
|
||||
|
@ -114,8 +114,8 @@ namespace dawn::native {
|
|||
mMessageStrings.push_back(diagnostic.message);
|
||||
}
|
||||
|
||||
mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity),
|
||||
lineNum, linePos, offset, length});
|
||||
mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity), lineNum,
|
||||
linePos, offset, length});
|
||||
}
|
||||
|
||||
void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
|
||||
|
|
|
@ -82,8 +82,7 @@ namespace dawn::native {
|
|||
)"));
|
||||
|
||||
Ref<BindGroupLayoutBase> bindGroupLayout;
|
||||
DAWN_TRY_ASSIGN(
|
||||
bindGroupLayout,
|
||||
DAWN_TRY_ASSIGN(bindGroupLayout,
|
||||
utils::MakeBindGroupLayout(
|
||||
device,
|
||||
{
|
||||
|
@ -94,8 +93,7 @@ namespace dawn::native {
|
|||
/* allowInternalBinding */ true));
|
||||
|
||||
Ref<PipelineLayoutBase> pipelineLayout;
|
||||
DAWN_TRY_ASSIGN(pipelineLayout,
|
||||
utils::MakeBasicPipelineLayout(device, bindGroupLayout));
|
||||
DAWN_TRY_ASSIGN(pipelineLayout, utils::MakeBasicPipelineLayout(device, bindGroupLayout));
|
||||
|
||||
ComputePipelineDescriptor computePipelineDescriptor = {};
|
||||
computePipelineDescriptor.layout = pipelineLayout.Get();
|
||||
|
@ -128,16 +126,15 @@ namespace dawn::native {
|
|||
CommandEncoder* commandEncoder,
|
||||
EncodingContext* encodingContext,
|
||||
std::vector<TimestampWrite> timestampWritesAtEnd) {
|
||||
return AcquireRef(new ComputePassEncoder(device, descriptor, commandEncoder,
|
||||
encodingContext, std::move(timestampWritesAtEnd)));
|
||||
return AcquireRef(new ComputePassEncoder(device, descriptor, commandEncoder, encodingContext,
|
||||
std::move(timestampWritesAtEnd)));
|
||||
}
|
||||
|
||||
ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
|
||||
CommandEncoder* commandEncoder,
|
||||
EncodingContext* encodingContext,
|
||||
ErrorTag errorTag)
|
||||
: ProgrammableEncoder(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {
|
||||
}
|
||||
: ProgrammableEncoder(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {}
|
||||
|
||||
// static
|
||||
Ref<ComputePassEncoder> ComputePassEncoder::MakeError(DeviceBase* device,
|
||||
|
@ -258,12 +255,10 @@ namespace dawn::native {
|
|||
Ref<BindGroupLayoutBase> layout;
|
||||
DAWN_TRY_ASSIGN(layout, validationPipeline->GetBindGroupLayout(0));
|
||||
|
||||
uint32_t storageBufferOffsetAlignment =
|
||||
device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
uint32_t storageBufferOffsetAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
|
||||
// Let the offset be the indirectOffset, aligned down to |storageBufferOffsetAlignment|.
|
||||
const uint32_t clientOffsetFromAlignedBoundary =
|
||||
indirectOffset % storageBufferOffsetAlignment;
|
||||
const uint32_t clientOffsetFromAlignedBoundary = indirectOffset % storageBufferOffsetAlignment;
|
||||
const uint64_t clientOffsetAlignedDown = indirectOffset - clientOffsetFromAlignedBoundary;
|
||||
const uint64_t clientIndirectBindingOffset = clientOffsetAlignedDown;
|
||||
|
||||
|
@ -291,8 +286,8 @@ namespace dawn::native {
|
|||
params.enableValidation = static_cast<uint32_t>(IsValidationEnabled());
|
||||
params.duplicateNumWorkgroups = static_cast<uint32_t>(shouldDuplicateNumWorkgroups);
|
||||
|
||||
DAWN_TRY_ASSIGN(uniformBuffer, utils::CreateBufferFromData(
|
||||
device, wgpu::BufferUsage::Uniform, {params}));
|
||||
DAWN_TRY_ASSIGN(uniformBuffer,
|
||||
utils::CreateBufferFromData(device, wgpu::BufferUsage::Uniform, {params}));
|
||||
}
|
||||
|
||||
// Reserve space in the scratch buffer to hold the validated indirect params.
|
||||
|
@ -325,8 +320,7 @@ namespace dawn::native {
|
|||
return std::make_pair(std::move(validatedIndirectBuffer), uint64_t(0));
|
||||
}
|
||||
|
||||
void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer,
|
||||
uint64_t indirectOffset) {
|
||||
void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
|
||||
GetDevice()->EmitDeprecationWarning(
|
||||
"dispatchIndirect() has been deprecated. Use dispatchWorkgroupsIndirect() instead.");
|
||||
APIDispatchWorkgroupsIndirect(indirectBuffer, indirectOffset);
|
||||
|
@ -396,8 +390,7 @@ namespace dawn::native {
|
|||
dispatch->indirectOffset = indirectOffset;
|
||||
return {};
|
||||
},
|
||||
"encoding %s.DispatchWorkgroupsIndirect(%s, %u).", this, indirectBuffer,
|
||||
indirectOffset);
|
||||
"encoding %s.DispatchWorkgroupsIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
|
||||
}
|
||||
|
||||
void ComputePassEncoder::APISetPipeline(ComputePipelineBase* pipeline) {
|
||||
|
@ -429,15 +422,13 @@ namespace dawn::native {
|
|||
BindGroupIndex groupIndex(groupIndexIn);
|
||||
|
||||
if (IsValidationEnabled()) {
|
||||
DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
|
||||
dynamicOffsets));
|
||||
DAWN_TRY(
|
||||
ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets));
|
||||
}
|
||||
|
||||
mUsageTracker.AddResourcesReferencedByBindGroup(group);
|
||||
RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
|
||||
dynamicOffsets);
|
||||
mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount,
|
||||
dynamicOffsets);
|
||||
RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount, dynamicOffsets);
|
||||
mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets);
|
||||
|
||||
return {};
|
||||
},
|
||||
|
|
|
@ -40,12 +40,12 @@ namespace dawn::native {
|
|||
|
||||
ComputePipelineBase::ComputePipelineBase(DeviceBase* device,
|
||||
const ComputePipelineDescriptor* descriptor)
|
||||
: PipelineBase(device,
|
||||
: PipelineBase(
|
||||
device,
|
||||
descriptor->layout,
|
||||
descriptor->label,
|
||||
{{SingleShaderStage::Compute, descriptor->compute.module,
|
||||
descriptor->compute.entryPoint, descriptor->compute.constantCount,
|
||||
descriptor->compute.constants}}) {
|
||||
{{SingleShaderStage::Compute, descriptor->compute.module, descriptor->compute.entryPoint,
|
||||
descriptor->compute.constantCount, descriptor->compute.constants}}) {
|
||||
SetContentHash(ComputeContentHash());
|
||||
TrackInDevice();
|
||||
|
||||
|
@ -58,8 +58,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: PipelineBase(device, tag) {
|
||||
}
|
||||
: PipelineBase(device, tag) {}
|
||||
|
||||
ComputePipelineBase::~ComputePipelineBase() = default;
|
||||
|
||||
|
@ -75,8 +74,7 @@ namespace dawn::native {
|
|||
class ErrorComputePipeline final : public ComputePipelineBase {
|
||||
public:
|
||||
explicit ErrorComputePipeline(DeviceBase* device)
|
||||
: ComputePipelineBase(device, ObjectBase::kError) {
|
||||
}
|
||||
: ComputePipelineBase(device, ObjectBase::kError) {}
|
||||
|
||||
MaybeError Initialize() override {
|
||||
UNREACHABLE();
|
||||
|
|
|
@ -231,8 +231,8 @@ namespace dawn::native {
|
|||
case wgpu::TextureFormat::RGBA8Unorm:
|
||||
break;
|
||||
default:
|
||||
return DAWN_FORMAT_VALIDATION_ERROR(
|
||||
"Source texture format (%s) is not supported.", srcFormat);
|
||||
return DAWN_FORMAT_VALIDATION_ERROR("Source texture format (%s) is not supported.",
|
||||
srcFormat);
|
||||
}
|
||||
|
||||
switch (dstFormat) {
|
||||
|
@ -251,15 +251,14 @@ namespace dawn::native {
|
|||
case wgpu::TextureFormat::RGBA32Float:
|
||||
break;
|
||||
default:
|
||||
return DAWN_FORMAT_VALIDATION_ERROR(
|
||||
"Destination texture format (%s) is not supported.", dstFormat);
|
||||
return DAWN_FORMAT_VALIDATION_ERROR("Destination texture format (%s) is not supported.",
|
||||
dstFormat);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store,
|
||||
wgpu::TextureFormat dstFormat) {
|
||||
RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store, wgpu::TextureFormat dstFormat) {
|
||||
auto pipeline = store->copyTextureForBrowserPipelines.find(dstFormat);
|
||||
if (pipeline != store->copyTextureForBrowserPipelines.end()) {
|
||||
return pipeline->second.Get();
|
||||
|
@ -275,8 +274,7 @@ namespace dawn::native {
|
|||
if (GetCachedPipeline(store, dstFormat) == nullptr) {
|
||||
// Create vertex shader module if not cached before.
|
||||
if (store->copyTextureForBrowser == nullptr) {
|
||||
DAWN_TRY_ASSIGN(
|
||||
store->copyTextureForBrowser,
|
||||
DAWN_TRY_ASSIGN(store->copyTextureForBrowser,
|
||||
utils::CreateShaderModule(device, sCopyTextureForBrowserShader));
|
||||
}
|
||||
|
||||
|
@ -330,8 +328,7 @@ namespace dawn::native {
|
|||
DAWN_INVALID_IF(source->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
|
||||
"Source texture %s is destroyed.", source->texture);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
destination->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
|
||||
DAWN_INVALID_IF(destination->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
|
||||
"Destination texture %s is destroyed.", destination->texture);
|
||||
|
||||
DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *source, *copySize),
|
||||
|
@ -346,10 +343,9 @@ namespace dawn::native {
|
|||
|
||||
DAWN_TRY(ValidateTextureToTextureCopyCommonRestrictions(*source, *destination, *copySize));
|
||||
|
||||
DAWN_INVALID_IF(source->origin.z > 0, "Source has a non-zero z origin (%u).",
|
||||
source->origin.z);
|
||||
DAWN_INVALID_IF(copySize->depthOrArrayLayers > 1,
|
||||
"Copy is for more than one array layer (%u)", copySize->depthOrArrayLayers);
|
||||
DAWN_INVALID_IF(source->origin.z > 0, "Source has a non-zero z origin (%u).", source->origin.z);
|
||||
DAWN_INVALID_IF(copySize->depthOrArrayLayers > 1, "Copy is for more than one array layer (%u)",
|
||||
copySize->depthOrArrayLayers);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
source->texture->GetSampleCount() > 1 || destination->texture->GetSampleCount() > 1,
|
||||
|
@ -461,8 +457,7 @@ namespace dawn::native {
|
|||
constexpr uint32_t kDecodeForSrgbDstFormat = 0x20;
|
||||
|
||||
if (options->srcAlphaMode == wgpu::AlphaMode::Premultiplied) {
|
||||
if (options->needsColorSpaceConversion ||
|
||||
options->srcAlphaMode != options->dstAlphaMode) {
|
||||
if (options->needsColorSpaceConversion || options->srcAlphaMode != options->dstAlphaMode) {
|
||||
stepsMask |= kUnpremultiplyStep;
|
||||
}
|
||||
}
|
||||
|
@ -471,9 +466,9 @@ namespace dawn::native {
|
|||
stepsMask |= kDecodeToLinearStep;
|
||||
const float* decodingParams = options->srcTransferFunctionParameters;
|
||||
|
||||
uniformData.gammaDecodingParams = {
|
||||
decodingParams[0], decodingParams[1], decodingParams[2], decodingParams[3],
|
||||
decodingParams[4], decodingParams[5], decodingParams[6]};
|
||||
uniformData.gammaDecodingParams = {decodingParams[0], decodingParams[1], decodingParams[2],
|
||||
decodingParams[3], decodingParams[4], decodingParams[5],
|
||||
decodingParams[6]};
|
||||
|
||||
stepsMask |= kConvertToDstGamutStep;
|
||||
const float* matrix = options->conversionMatrix;
|
||||
|
@ -495,14 +490,13 @@ namespace dawn::native {
|
|||
stepsMask |= kEncodeToGammaStep;
|
||||
const float* encodingParams = options->dstTransferFunctionParameters;
|
||||
|
||||
uniformData.gammaEncodingParams = {
|
||||
encodingParams[0], encodingParams[1], encodingParams[2], encodingParams[3],
|
||||
encodingParams[4], encodingParams[5], encodingParams[6]};
|
||||
uniformData.gammaEncodingParams = {encodingParams[0], encodingParams[1], encodingParams[2],
|
||||
encodingParams[3], encodingParams[4], encodingParams[5],
|
||||
encodingParams[6]};
|
||||
}
|
||||
|
||||
if (options->dstAlphaMode == wgpu::AlphaMode::Premultiplied) {
|
||||
if (options->needsColorSpaceConversion ||
|
||||
options->srcAlphaMode != options->dstAlphaMode) {
|
||||
if (options->needsColorSpaceConversion || options->srcAlphaMode != options->dstAlphaMode) {
|
||||
stepsMask |= kPremultiplyStep;
|
||||
}
|
||||
}
|
||||
|
@ -531,8 +525,8 @@ namespace dawn::native {
|
|||
Ref<BufferBase> uniformBuffer;
|
||||
DAWN_TRY_ASSIGN(
|
||||
uniformBuffer,
|
||||
utils::CreateBufferFromData(
|
||||
device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform, {uniformData}));
|
||||
utils::CreateBufferFromData(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform,
|
||||
{uniformData}));
|
||||
|
||||
// Prepare binding 1 resource: sampler
|
||||
// Use default configuration, filterMode set to Nearest for min and mag.
|
||||
|
@ -551,8 +545,8 @@ namespace dawn::native {
|
|||
|
||||
// Create bind group after all binding entries are set.
|
||||
Ref<BindGroupBase> bindGroup;
|
||||
DAWN_TRY_ASSIGN(bindGroup, utils::MakeBindGroup(
|
||||
device, layout,
|
||||
DAWN_TRY_ASSIGN(bindGroup,
|
||||
utils::MakeBindGroup(device, layout,
|
||||
{{0, uniformBuffer}, {1, sampler}, {2, srcTextureView}}));
|
||||
|
||||
// Create command encoder.
|
||||
|
@ -567,8 +561,7 @@ namespace dawn::native {
|
|||
dstTextureViewDesc.arrayLayerCount = 1;
|
||||
Ref<TextureViewBase> dstView;
|
||||
|
||||
DAWN_TRY_ASSIGN(dstView,
|
||||
device->CreateTextureView(destination->texture, &dstTextureViewDesc));
|
||||
DAWN_TRY_ASSIGN(dstView, device->CreateTextureView(destination->texture, &dstTextureViewDesc));
|
||||
// Prepare render pass color attachment descriptor.
|
||||
RenderPassColorAttachment colorAttachmentDesc;
|
||||
|
||||
|
|
|
@ -26,11 +26,9 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
CreatePipelineAsyncCallbackTaskBase::CreatePipelineAsyncCallbackTaskBase(
|
||||
std::string errorMessage,
|
||||
CreatePipelineAsyncCallbackTaskBase::CreatePipelineAsyncCallbackTaskBase(std::string errorMessage,
|
||||
void* userdata)
|
||||
: mErrorMessage(errorMessage), mUserData(userdata) {
|
||||
}
|
||||
: mErrorMessage(errorMessage), mUserData(userdata) {}
|
||||
|
||||
CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask(
|
||||
Ref<ComputePipelineBase> pipeline,
|
||||
|
@ -39,8 +37,7 @@ namespace dawn::native {
|
|||
void* userdata)
|
||||
: CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
|
||||
mPipeline(std::move(pipeline)),
|
||||
mCreateComputePipelineAsyncCallback(callback) {
|
||||
}
|
||||
mCreateComputePipelineAsyncCallback(callback) {}
|
||||
|
||||
void CreateComputePipelineAsyncCallbackTask::Finish() {
|
||||
ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
|
||||
|
@ -75,8 +72,7 @@ namespace dawn::native {
|
|||
void* userdata)
|
||||
: CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
|
||||
mPipeline(std::move(pipeline)),
|
||||
mCreateRenderPipelineAsyncCallback(callback) {
|
||||
}
|
||||
mCreateRenderPipelineAsyncCallback(callback) {}
|
||||
|
||||
void CreateRenderPipelineAsyncCallbackTask::Finish() {
|
||||
ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
|
||||
|
@ -119,8 +115,7 @@ namespace dawn::native {
|
|||
|
||||
DeviceBase* device = mComputePipeline->GetDevice();
|
||||
TRACE_EVENT_FLOW_END1(device->GetPlatform(), General,
|
||||
"CreateComputePipelineAsyncTask::RunAsync", this, "label",
|
||||
eventLabel);
|
||||
"CreateComputePipelineAsyncTask::RunAsync", this, "label", eventLabel);
|
||||
TRACE_EVENT1(device->GetPlatform(), General, "CreateComputePipelineAsyncTask::Run", "label",
|
||||
eventLabel);
|
||||
|
||||
|
@ -139,8 +134,7 @@ namespace dawn::native {
|
|||
std::unique_ptr<CreateComputePipelineAsyncTask> task) {
|
||||
DeviceBase* device = task->mComputePipeline->GetDevice();
|
||||
|
||||
const char* eventLabel =
|
||||
utils::GetLabelForTrace(task->mComputePipeline->GetLabel().c_str());
|
||||
const char* eventLabel = utils::GetLabelForTrace(task->mComputePipeline->GetLabel().c_str());
|
||||
|
||||
// Using "taskPtr = std::move(task)" causes compilation error while it should be supported
|
||||
// since C++14:
|
||||
|
@ -170,8 +164,8 @@ namespace dawn::native {
|
|||
const char* eventLabel = utils::GetLabelForTrace(mRenderPipeline->GetLabel().c_str());
|
||||
|
||||
DeviceBase* device = mRenderPipeline->GetDevice();
|
||||
TRACE_EVENT_FLOW_END1(device->GetPlatform(), General,
|
||||
"CreateRenderPipelineAsyncTask::RunAsync", this, "label", eventLabel);
|
||||
TRACE_EVENT_FLOW_END1(device->GetPlatform(), General, "CreateRenderPipelineAsyncTask::RunAsync",
|
||||
this, "label", eventLabel);
|
||||
TRACE_EVENT1(device->GetPlatform(), General, "CreateRenderPipelineAsyncTask::Run", "label",
|
||||
eventLabel);
|
||||
|
||||
|
@ -182,12 +176,10 @@ namespace dawn::native {
|
|||
errorMessage = maybeError.AcquireError()->GetMessage();
|
||||
}
|
||||
|
||||
device->AddRenderPipelineAsyncCallbackTask(mRenderPipeline, errorMessage, mCallback,
|
||||
mUserdata);
|
||||
device->AddRenderPipelineAsyncCallbackTask(mRenderPipeline, errorMessage, mCallback, mUserdata);
|
||||
}
|
||||
|
||||
void CreateRenderPipelineAsyncTask::RunAsync(
|
||||
std::unique_ptr<CreateRenderPipelineAsyncTask> task) {
|
||||
void CreateRenderPipelineAsyncTask::RunAsync(std::unique_ptr<CreateRenderPipelineAsyncTask> task) {
|
||||
DeviceBase* device = task->mRenderPipeline->GetDevice();
|
||||
|
||||
const char* eventLabel = utils::GetLabelForTrace(task->mRenderPipeline->GetLabel().c_str());
|
||||
|
|
|
@ -30,8 +30,7 @@ namespace dawn::native {
|
|||
|
||||
namespace {
|
||||
struct ComboDeprecatedDawnDeviceDescriptor : DeviceDescriptor {
|
||||
explicit ComboDeprecatedDawnDeviceDescriptor(
|
||||
const DawnDeviceDescriptor* deviceDescriptor) {
|
||||
explicit ComboDeprecatedDawnDeviceDescriptor(const DawnDeviceDescriptor* deviceDescriptor) {
|
||||
dawn::WarningLog() << "DawnDeviceDescriptor is deprecated. Please use "
|
||||
"WGPUDeviceDescriptor instead.";
|
||||
|
||||
|
@ -40,12 +39,9 @@ namespace dawn::native {
|
|||
if (deviceDescriptor != nullptr) {
|
||||
desc->nextInChain = &mTogglesDesc;
|
||||
mTogglesDesc.forceEnabledToggles = deviceDescriptor->forceEnabledToggles.data();
|
||||
mTogglesDesc.forceEnabledTogglesCount =
|
||||
deviceDescriptor->forceEnabledToggles.size();
|
||||
mTogglesDesc.forceDisabledToggles =
|
||||
deviceDescriptor->forceDisabledToggles.data();
|
||||
mTogglesDesc.forceDisabledTogglesCount =
|
||||
deviceDescriptor->forceDisabledToggles.size();
|
||||
mTogglesDesc.forceEnabledTogglesCount = deviceDescriptor->forceEnabledToggles.size();
|
||||
mTogglesDesc.forceDisabledToggles = deviceDescriptor->forceDisabledToggles.data();
|
||||
mTogglesDesc.forceDisabledTogglesCount = deviceDescriptor->forceDisabledToggles.size();
|
||||
|
||||
desc->requiredLimits =
|
||||
reinterpret_cast<const RequiredLimits*>(deviceDescriptor->requiredLimits);
|
||||
|
@ -91,8 +87,7 @@ namespace dawn::native {
|
|||
mImpl = nullptr;
|
||||
}
|
||||
|
||||
Adapter::Adapter(const Adapter& other) : Adapter(other.mImpl) {
|
||||
}
|
||||
Adapter::Adapter(const Adapter& other) : Adapter(other.mImpl) {}
|
||||
|
||||
Adapter& Adapter::operator=(const Adapter& other) {
|
||||
if (this != &other) {
|
||||
|
@ -185,14 +180,12 @@ namespace dawn::native {
|
|||
// AdapterDiscoverOptionsBase
|
||||
|
||||
AdapterDiscoveryOptionsBase::AdapterDiscoveryOptionsBase(WGPUBackendType type)
|
||||
: backendType(type) {
|
||||
}
|
||||
: backendType(type) {}
|
||||
|
||||
// Instance
|
||||
|
||||
Instance::Instance(const WGPUInstanceDescriptor* desc)
|
||||
: mImpl(APICreateInstance(reinterpret_cast<const InstanceDescriptor*>(desc))) {
|
||||
}
|
||||
: mImpl(APICreateInstance(reinterpret_cast<const InstanceDescriptor*>(desc))) {}
|
||||
|
||||
Instance::~Instance() {
|
||||
if (mImpl != nullptr) {
|
||||
|
@ -283,8 +276,7 @@ namespace dawn::native {
|
|||
|
||||
// ExternalImageDescriptor
|
||||
|
||||
ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageType type) : mType(type) {
|
||||
}
|
||||
ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageType type) : mType(type) {}
|
||||
|
||||
ExternalImageType ExternalImageDescriptor::GetType() const {
|
||||
return mType;
|
||||
|
@ -292,8 +284,7 @@ namespace dawn::native {
|
|||
|
||||
// ExternalImageExportInfo
|
||||
|
||||
ExternalImageExportInfo::ExternalImageExportInfo(ExternalImageType type) : mType(type) {
|
||||
}
|
||||
ExternalImageExportInfo::ExternalImageExportInfo(ExternalImageType type) : mType(type) {}
|
||||
|
||||
ExternalImageType ExternalImageExportInfo::GetType() const {
|
||||
return mType;
|
||||
|
|
|
@ -104,18 +104,14 @@ namespace dawn::native {
|
|||
// may already disposed, we must keep a local copy in the CallbackTask.
|
||||
}
|
||||
|
||||
void Finish() override {
|
||||
mCallback(mLoggingType, mMessage.c_str(), mUserdata);
|
||||
}
|
||||
void Finish() override { mCallback(mLoggingType, mMessage.c_str(), mUserdata); }
|
||||
|
||||
void HandleShutDown() override {
|
||||
// Do the logging anyway
|
||||
mCallback(mLoggingType, mMessage.c_str(), mUserdata);
|
||||
}
|
||||
|
||||
void HandleDeviceLoss() override {
|
||||
mCallback(mLoggingType, mMessage.c_str(), mUserdata);
|
||||
}
|
||||
void HandleDeviceLoss() override { mCallback(mLoggingType, mMessage.c_str(), mUserdata); }
|
||||
|
||||
private:
|
||||
// As all deferred callback tasks will be triggered before modifying the registered
|
||||
|
@ -127,8 +123,7 @@ namespace dawn::native {
|
|||
void* mUserdata;
|
||||
};
|
||||
|
||||
ResultOrError<Ref<PipelineLayoutBase>>
|
||||
ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
|
||||
ResultOrError<Ref<PipelineLayoutBase>> ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
|
||||
DeviceBase* device,
|
||||
const ComputePipelineDescriptor& descriptor,
|
||||
ComputePipelineDescriptor* outDescriptor) {
|
||||
|
@ -150,8 +145,7 @@ namespace dawn::native {
|
|||
return layoutRef;
|
||||
}
|
||||
|
||||
ResultOrError<Ref<PipelineLayoutBase>>
|
||||
ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
|
||||
ResultOrError<Ref<PipelineLayoutBase>> ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
|
||||
DeviceBase* device,
|
||||
const RenderPipelineDescriptor& descriptor,
|
||||
RenderPipelineDescriptor* outDescriptor) {
|
||||
|
@ -161,9 +155,9 @@ namespace dawn::native {
|
|||
if (descriptor.layout == nullptr) {
|
||||
// Ref will keep the pipeline layout alive until the end of the function where
|
||||
// the pipeline will take another reference.
|
||||
DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
|
||||
device, GetRenderStagesAndSetPlaceholderShader(
|
||||
device, &descriptor)));
|
||||
DAWN_TRY_ASSIGN(layoutRef,
|
||||
PipelineLayoutBase::CreateDefault(
|
||||
device, GetRenderStagesAndSetPlaceholderShader(device, &descriptor)));
|
||||
outDescriptor->layout = layoutRef.Get();
|
||||
}
|
||||
|
||||
|
@ -568,8 +562,7 @@ namespace dawn::native {
|
|||
return returnValue;
|
||||
}
|
||||
ErrorScope scope = mErrorScopeStack->Pop();
|
||||
callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(),
|
||||
userdata);
|
||||
callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(), userdata);
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
|
@ -731,8 +724,7 @@ namespace dawn::native {
|
|||
if (iter != mCaches->bindGroupLayouts.end()) {
|
||||
result = *iter;
|
||||
} else {
|
||||
DAWN_TRY_ASSIGN(result,
|
||||
CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
|
||||
DAWN_TRY_ASSIGN(result, CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
|
||||
result->SetIsCachedReference();
|
||||
result->SetContentHash(blueprintHash);
|
||||
mCaches->bindGroupLayouts.insert(result.Get());
|
||||
|
@ -921,8 +913,8 @@ namespace dawn::native {
|
|||
// now, so call validate. Most of |ValidateShaderModuleDescriptor| is parsing, but
|
||||
// we can consider splitting it if additional validation is added.
|
||||
ASSERT(!IsValidationEnabled());
|
||||
DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor, parseResult,
|
||||
compilationMessages));
|
||||
DAWN_TRY(
|
||||
ValidateShaderModuleDescriptor(this, descriptor, parseResult, compilationMessages));
|
||||
}
|
||||
DAWN_TRY_ASSIGN(result, CreateShaderModuleImpl(descriptor, parseResult));
|
||||
result->SetIsCachedReference();
|
||||
|
@ -939,8 +931,7 @@ namespace dawn::native {
|
|||
ASSERT(removedCount == 1);
|
||||
}
|
||||
|
||||
Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
|
||||
AttachmentStateBlueprint* blueprint) {
|
||||
Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(AttachmentStateBlueprint* blueprint) {
|
||||
auto iter = mCaches->attachmentStates.find(blueprint);
|
||||
if (iter != mCaches->attachmentStates.end()) {
|
||||
return static_cast<AttachmentState*>(*iter);
|
||||
|
@ -981,8 +972,8 @@ namespace dawn::native {
|
|||
|
||||
BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
|
||||
Ref<BindGroupBase> result;
|
||||
if (ConsumedError(CreateBindGroup(descriptor), &result, "calling %s.CreateBindGroup(%s).",
|
||||
this, descriptor)) {
|
||||
if (ConsumedError(CreateBindGroup(descriptor), &result, "calling %s.CreateBindGroup(%s).", this,
|
||||
descriptor)) {
|
||||
return BindGroupBase::MakeError(this);
|
||||
}
|
||||
return result.Detach();
|
||||
|
@ -1005,8 +996,7 @@ namespace dawn::native {
|
|||
}
|
||||
return result.Detach();
|
||||
}
|
||||
CommandEncoder* DeviceBase::APICreateCommandEncoder(
|
||||
const CommandEncoderDescriptor* descriptor) {
|
||||
CommandEncoder* DeviceBase::APICreateCommandEncoder(const CommandEncoderDescriptor* descriptor) {
|
||||
Ref<CommandEncoder> result;
|
||||
if (ConsumedError(CreateCommandEncoder(descriptor), &result,
|
||||
"calling %s.CreateCommandEncoder(%s).", this, descriptor)) {
|
||||
|
@ -1055,8 +1045,8 @@ namespace dawn::native {
|
|||
}
|
||||
QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
|
||||
Ref<QuerySetBase> result;
|
||||
if (ConsumedError(CreateQuerySet(descriptor), &result, "calling %s.CreateQuerySet(%s).",
|
||||
this, descriptor)) {
|
||||
if (ConsumedError(CreateQuerySet(descriptor), &result, "calling %s.CreateQuerySet(%s).", this,
|
||||
descriptor)) {
|
||||
return QuerySetBase::MakeError(this);
|
||||
}
|
||||
return result.Detach();
|
||||
|
@ -1265,9 +1255,8 @@ namespace dawn::native {
|
|||
void DeviceBase::EmitLog(WGPULoggingType loggingType, const char* message) {
|
||||
if (mLoggingCallback != nullptr) {
|
||||
// Use the thread-safe CallbackTaskManager routine
|
||||
std::unique_ptr<LoggingCallbackTask> callbackTask =
|
||||
std::make_unique<LoggingCallbackTask>(mLoggingCallback, loggingType, message,
|
||||
mLoggingUserdata);
|
||||
std::unique_ptr<LoggingCallbackTask> callbackTask = std::make_unique<LoggingCallbackTask>(
|
||||
mLoggingCallback, loggingType, message, mLoggingUserdata);
|
||||
mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
|
||||
}
|
||||
}
|
||||
|
@ -1315,8 +1304,8 @@ namespace dawn::native {
|
|||
const BindGroupDescriptor* descriptor) {
|
||||
DAWN_TRY(ValidateIsAlive());
|
||||
if (IsValidationEnabled()) {
|
||||
DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor),
|
||||
"validating %s against %s", descriptor, descriptor->layout);
|
||||
DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor), "validating %s against %s",
|
||||
descriptor, descriptor->layout);
|
||||
}
|
||||
return CreateBindGroupImpl(descriptor);
|
||||
}
|
||||
|
@ -1326,8 +1315,7 @@ namespace dawn::native {
|
|||
bool allowInternalBinding) {
|
||||
DAWN_TRY(ValidateIsAlive());
|
||||
if (IsValidationEnabled()) {
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateBindGroupLayoutDescriptor(this, descriptor, allowInternalBinding),
|
||||
DAWN_TRY_CONTEXT(ValidateBindGroupLayoutDescriptor(this, descriptor, allowInternalBinding),
|
||||
"validating %s", descriptor);
|
||||
}
|
||||
return GetOrCreateBindGroupLayout(descriptor);
|
||||
|
@ -1336,8 +1324,7 @@ namespace dawn::native {
|
|||
ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
|
||||
DAWN_TRY(ValidateIsAlive());
|
||||
if (IsValidationEnabled()) {
|
||||
DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s",
|
||||
descriptor);
|
||||
DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s", descriptor);
|
||||
}
|
||||
|
||||
Ref<BufferBase> buffer;
|
||||
|
@ -1390,8 +1377,7 @@ namespace dawn::native {
|
|||
return CommandEncoder::Create(this, descriptor);
|
||||
}
|
||||
|
||||
MaybeError DeviceBase::CreateComputePipelineAsync(
|
||||
const ComputePipelineDescriptor* descriptor,
|
||||
MaybeError DeviceBase::CreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
|
||||
WGPUCreateComputePipelineAsyncCallback callback,
|
||||
void* userdata) {
|
||||
DAWN_TRY(ValidateIsAlive());
|
||||
|
@ -1412,8 +1398,8 @@ namespace dawn::native {
|
|||
GetCachedComputePipeline(uninitializedComputePipeline.Get());
|
||||
if (cachedComputePipeline.Get() != nullptr) {
|
||||
// TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
|
||||
callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()),
|
||||
"", userdata);
|
||||
callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()), "",
|
||||
userdata);
|
||||
} else {
|
||||
// Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
|
||||
// where the pipeline object may be initialized asynchronously and the result will be
|
||||
|
@ -1427,8 +1413,7 @@ namespace dawn::native {
|
|||
|
||||
// This function is overwritten with the async version on the backends that supports
|
||||
// initializing compute pipelines asynchronously.
|
||||
void DeviceBase::InitializeComputePipelineAsyncImpl(
|
||||
Ref<ComputePipelineBase> computePipeline,
|
||||
void DeviceBase::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
|
||||
WGPUCreateComputePipelineAsyncCallback callback,
|
||||
void* userdata) {
|
||||
Ref<ComputePipelineBase> result;
|
||||
|
@ -1443,15 +1428,14 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask =
|
||||
std::make_unique<CreateComputePipelineAsyncCallbackTask>(
|
||||
std::move(result), errorMessage, callback, userdata);
|
||||
std::make_unique<CreateComputePipelineAsyncCallbackTask>(std::move(result), errorMessage,
|
||||
callback, userdata);
|
||||
mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
|
||||
}
|
||||
|
||||
// This function is overwritten with the async version on the backends
|
||||
// that supports initializing render pipeline asynchronously
|
||||
void DeviceBase::InitializeRenderPipelineAsyncImpl(
|
||||
Ref<RenderPipelineBase> renderPipeline,
|
||||
void DeviceBase::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
|
||||
WGPUCreateRenderPipelineAsyncCallback callback,
|
||||
void* userdata) {
|
||||
Ref<RenderPipelineBase> result;
|
||||
|
@ -1490,12 +1474,10 @@ namespace dawn::native {
|
|||
return ExternalTextureBase::Create(this, descriptor);
|
||||
}
|
||||
|
||||
ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet(
|
||||
const QuerySetDescriptor* descriptor) {
|
||||
ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet(const QuerySetDescriptor* descriptor) {
|
||||
DAWN_TRY(ValidateIsAlive());
|
||||
if (IsValidationEnabled()) {
|
||||
DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s",
|
||||
descriptor);
|
||||
DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s", descriptor);
|
||||
}
|
||||
return CreateQuerySetImpl(descriptor);
|
||||
}
|
||||
|
@ -1559,8 +1541,8 @@ namespace dawn::native {
|
|||
GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
|
||||
if (cachedRenderPipeline != nullptr) {
|
||||
// TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
|
||||
callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()),
|
||||
"", userdata);
|
||||
callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()), "",
|
||||
userdata);
|
||||
} else {
|
||||
// Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
|
||||
// where the pipeline object may be initialized asynchronously and the result will be
|
||||
|
@ -1577,8 +1559,7 @@ namespace dawn::native {
|
|||
DAWN_TRY(ValidateIsAlive());
|
||||
descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor;
|
||||
if (IsValidationEnabled()) {
|
||||
DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s",
|
||||
descriptor);
|
||||
DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s", descriptor);
|
||||
}
|
||||
return GetOrCreateSampler(descriptor);
|
||||
}
|
||||
|
@ -1607,8 +1588,8 @@ namespace dawn::native {
|
|||
const SwapChainDescriptor* descriptor) {
|
||||
DAWN_TRY(ValidateIsAlive());
|
||||
if (IsValidationEnabled()) {
|
||||
DAWN_TRY_CONTEXT(ValidateSwapChainDescriptor(this, surface, descriptor),
|
||||
"validating %s", descriptor);
|
||||
DAWN_TRY_CONTEXT(ValidateSwapChainDescriptor(this, surface, descriptor), "validating %s",
|
||||
descriptor);
|
||||
}
|
||||
|
||||
// TODO(dawn:269): Remove this code path once implementation-based swapchains are removed.
|
||||
|
@ -1637,8 +1618,7 @@ namespace dawn::native {
|
|||
ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
|
||||
DAWN_TRY(ValidateIsAlive());
|
||||
if (IsValidationEnabled()) {
|
||||
DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.",
|
||||
descriptor);
|
||||
DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.", descriptor);
|
||||
}
|
||||
return CreateTextureImpl(descriptor);
|
||||
}
|
||||
|
@ -1771,8 +1751,7 @@ namespace dawn::native {
|
|||
std::move(pipeline), errorMessage, callback, userdata));
|
||||
}
|
||||
|
||||
void DeviceBase::AddRenderPipelineAsyncCallbackTask(
|
||||
Ref<RenderPipelineBase> pipeline,
|
||||
void DeviceBase::AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
|
||||
std::string errorMessage,
|
||||
WGPUCreateRenderPipelineAsyncCallback callback,
|
||||
void* userdata) {
|
||||
|
@ -1816,8 +1795,7 @@ namespace dawn::native {
|
|||
SetLabelImpl();
|
||||
}
|
||||
|
||||
void DeviceBase::SetLabelImpl() {
|
||||
}
|
||||
void DeviceBase::SetLabelImpl() {}
|
||||
|
||||
bool DeviceBase::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
|
||||
ComputePipelineBase* computePipeline) const {
|
||||
|
|
|
@ -219,16 +219,14 @@ namespace dawn::native {
|
|||
MaybeError CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
|
||||
WGPUCreateRenderPipelineAsyncCallback callback,
|
||||
void* userdata);
|
||||
ResultOrError<Ref<SamplerBase>> CreateSampler(
|
||||
const SamplerDescriptor* descriptor = nullptr);
|
||||
ResultOrError<Ref<SamplerBase>> CreateSampler(const SamplerDescriptor* descriptor = nullptr);
|
||||
ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(
|
||||
const ShaderModuleDescriptor* descriptor,
|
||||
OwnedCompilationMessages* compilationMessages = nullptr);
|
||||
ResultOrError<Ref<SwapChainBase>> CreateSwapChain(Surface* surface,
|
||||
const SwapChainDescriptor* descriptor);
|
||||
ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* descriptor);
|
||||
ResultOrError<Ref<TextureViewBase>> CreateTextureView(
|
||||
TextureBase* texture,
|
||||
ResultOrError<Ref<TextureViewBase>> CreateTextureView(TextureBase* texture,
|
||||
const TextureViewDescriptor* descriptor);
|
||||
|
||||
// Implementation of API object creation methods. DO NOT use them in a reentrant manner.
|
||||
|
@ -277,8 +275,7 @@ namespace dawn::native {
|
|||
|
||||
BlobCache* GetBlobCache();
|
||||
|
||||
virtual ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(
|
||||
size_t size) = 0;
|
||||
virtual ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) = 0;
|
||||
virtual MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
|
||||
uint64_t sourceOffset,
|
||||
BufferBase* destination,
|
||||
|
@ -380,8 +377,7 @@ namespace dawn::native {
|
|||
void APISetLabel(const char* label);
|
||||
void APIDestroy();
|
||||
|
||||
virtual void AppendDebugLayerMessages(ErrorData* error) {
|
||||
}
|
||||
virtual void AppendDebugLayerMessages(ErrorData* error) {}
|
||||
|
||||
protected:
|
||||
// Constructor used only for mocking and testing.
|
||||
|
@ -403,8 +399,7 @@ namespace dawn::native {
|
|||
virtual ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken) = 0;
|
||||
virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(
|
||||
const BufferDescriptor* descriptor) = 0;
|
||||
virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) = 0;
|
||||
virtual ResultOrError<Ref<ExternalTextureBase>> CreateExternalTextureImpl(
|
||||
const ExternalTextureDescriptor* descriptor);
|
||||
virtual ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
|
||||
|
@ -445,14 +440,11 @@ namespace dawn::native {
|
|||
RenderPipelineBase* uninitializedRenderPipeline);
|
||||
Ref<ComputePipelineBase> AddOrGetCachedComputePipeline(
|
||||
Ref<ComputePipelineBase> computePipeline);
|
||||
Ref<RenderPipelineBase> AddOrGetCachedRenderPipeline(
|
||||
Ref<RenderPipelineBase> renderPipeline);
|
||||
virtual void InitializeComputePipelineAsyncImpl(
|
||||
Ref<ComputePipelineBase> computePipeline,
|
||||
Ref<RenderPipelineBase> AddOrGetCachedRenderPipeline(Ref<RenderPipelineBase> renderPipeline);
|
||||
virtual void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
|
||||
WGPUCreateComputePipelineAsyncCallback callback,
|
||||
void* userdata);
|
||||
virtual void InitializeRenderPipelineAsyncImpl(
|
||||
Ref<RenderPipelineBase> renderPipeline,
|
||||
virtual void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
|
||||
WGPUCreateRenderPipelineAsyncCallback callback,
|
||||
void* userdata);
|
||||
|
||||
|
|
|
@ -22,13 +22,12 @@
|
|||
namespace dawn::native {
|
||||
|
||||
DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) {
|
||||
mRingBuffers.emplace_back(std::unique_ptr<RingBuffer>(
|
||||
new RingBuffer{nullptr, RingBufferAllocator(kRingBufferSize)}));
|
||||
mRingBuffers.emplace_back(
|
||||
std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, RingBufferAllocator(kRingBufferSize)}));
|
||||
}
|
||||
|
||||
void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) {
|
||||
mReleasedStagingBuffers.Enqueue(std::move(stagingBuffer),
|
||||
mDevice->GetPendingCommandSerial());
|
||||
mReleasedStagingBuffers.Enqueue(std::move(stagingBuffer), mDevice->GetPendingCommandSerial());
|
||||
}
|
||||
|
||||
ResultOrError<UploadHandle> DynamicUploader::AllocateInternal(uint64_t allocationSize,
|
||||
|
@ -120,12 +119,10 @@ namespace dawn::native {
|
|||
uint64_t offsetAlignment) {
|
||||
ASSERT(offsetAlignment > 0);
|
||||
UploadHandle uploadHandle;
|
||||
DAWN_TRY_ASSIGN(uploadHandle,
|
||||
AllocateInternal(allocationSize + offsetAlignment - 1, serial));
|
||||
DAWN_TRY_ASSIGN(uploadHandle, AllocateInternal(allocationSize + offsetAlignment - 1, serial));
|
||||
uint64_t additionalOffset =
|
||||
Align(uploadHandle.startOffset, offsetAlignment) - uploadHandle.startOffset;
|
||||
uploadHandle.mappedBuffer =
|
||||
static_cast<uint8_t*>(uploadHandle.mappedBuffer) + additionalOffset;
|
||||
uploadHandle.mappedBuffer = static_cast<uint8_t*>(uploadHandle.mappedBuffer) + additionalOffset;
|
||||
uploadHandle.startOffset += additionalOffset;
|
||||
return uploadHandle;
|
||||
}
|
||||
|
|
|
@ -57,8 +57,7 @@ namespace dawn::native {
|
|||
RingBufferAllocator mAllocator;
|
||||
};
|
||||
|
||||
ResultOrError<UploadHandle> AllocateInternal(uint64_t allocationSize,
|
||||
ExecutionSerial serial);
|
||||
ResultOrError<UploadHandle> AllocateInternal(uint64_t allocationSize, ExecutionSerial serial);
|
||||
|
||||
std::vector<std::unique_ptr<RingBuffer>> mRingBuffers;
|
||||
SerialQueue<ExecutionSerial, std::unique_ptr<StagingBufferBase>> mReleasedStagingBuffers;
|
||||
|
|
|
@ -25,8 +25,7 @@
|
|||
namespace dawn::native {
|
||||
|
||||
EncodingContext::EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder)
|
||||
: mDevice(device), mTopLevelEncoder(initialEncoder), mCurrentEncoder(initialEncoder) {
|
||||
}
|
||||
: mDevice(device), mTopLevelEncoder(initialEncoder), mCurrentEncoder(initialEncoder) {}
|
||||
|
||||
EncodingContext::~EncodingContext() {
|
||||
Destroy();
|
||||
|
@ -91,8 +90,7 @@ namespace dawn::native {
|
|||
|
||||
void EncodingContext::WillBeginRenderPass() {
|
||||
ASSERT(mCurrentEncoder == mTopLevelEncoder);
|
||||
if (mDevice->IsValidationEnabled() ||
|
||||
mDevice->MayRequireDuplicationOfIndirectParameters()) {
|
||||
if (mDevice->IsValidationEnabled() || mDevice->MayRequireDuplicationOfIndirectParameters()) {
|
||||
// When validation is enabled or indirect parameters require duplication, we are going
|
||||
// to want to capture all commands encoded between and including BeginRenderPassCmd and
|
||||
// EndRenderPassCmd, and defer their sequencing util after we have a chance to insert
|
||||
|
@ -120,8 +118,7 @@ namespace dawn::native {
|
|||
|
||||
mCurrentEncoder = mTopLevelEncoder;
|
||||
|
||||
if (mDevice->IsValidationEnabled() ||
|
||||
mDevice->MayRequireDuplicationOfIndirectParameters()) {
|
||||
if (mDevice->IsValidationEnabled() || mDevice->MayRequireDuplicationOfIndirectParameters()) {
|
||||
// With validation enabled, commands were committed just before BeginRenderPassCmd was
|
||||
// encoded by our RenderPassEncoder (see WillBeginRenderPass above). This means
|
||||
// mPendingCommands contains only the commands from BeginRenderPassCmd to
|
||||
|
|
|
@ -59,9 +59,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
template <typename... Args>
|
||||
inline bool ConsumedError(MaybeError maybeError,
|
||||
const char* formatStr,
|
||||
const Args&... args) {
|
||||
inline bool ConsumedError(MaybeError maybeError, const char* formatStr, const Args&... args) {
|
||||
if (DAWN_UNLIKELY(maybeError.IsError())) {
|
||||
std::unique_ptr<ErrorData> error = maybeError.AcquireError();
|
||||
if (error->GetType() == InternalErrorType::Validation) {
|
||||
|
@ -70,8 +68,8 @@ namespace dawn::native {
|
|||
if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
|
||||
error->AppendContext(std::move(out));
|
||||
} else {
|
||||
error->AppendContext(absl::StrFormat(
|
||||
"[Failed to format error message: \"%s\"].", formatStr));
|
||||
error->AppendContext(
|
||||
absl::StrFormat("[Failed to format error message: \"%s\"].", formatStr));
|
||||
}
|
||||
}
|
||||
HandleError(std::move(error));
|
||||
|
@ -83,8 +81,7 @@ namespace dawn::native {
|
|||
inline bool CheckCurrentEncoder(const ApiObjectBase* encoder) {
|
||||
if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
|
||||
if (mDestroyed) {
|
||||
HandleError(
|
||||
DAWN_FORMAT_VALIDATION_ERROR("Recording in a destroyed %s.", encoder));
|
||||
HandleError(DAWN_FORMAT_VALIDATION_ERROR("Recording in a destroyed %s.", encoder));
|
||||
} else if (mCurrentEncoder != mTopLevelEncoder) {
|
||||
// The top level encoder was used when a pass encoder was current.
|
||||
HandleError(DAWN_FORMAT_VALIDATION_ERROR(
|
||||
|
|
|
@ -36,21 +36,16 @@ namespace dawn::native {
|
|||
|
||||
class Iterator final {
|
||||
public:
|
||||
explicit Iterator(const typename BitSetIterator<N, U>::Iterator& iter) : mIter(iter) {
|
||||
}
|
||||
explicit Iterator(const typename BitSetIterator<N, U>::Iterator& iter) : mIter(iter) {}
|
||||
|
||||
Iterator& operator++() {
|
||||
++mIter;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator==(const Iterator& other) const {
|
||||
return mIter == other.mIter;
|
||||
}
|
||||
bool operator==(const Iterator& other) const { return mIter == other.mIter; }
|
||||
|
||||
bool operator!=(const Iterator& other) const {
|
||||
return mIter != other.mIter;
|
||||
}
|
||||
bool operator!=(const Iterator& other) const { return mIter != other.mIter; }
|
||||
|
||||
T operator*() const {
|
||||
U value = *mIter;
|
||||
|
@ -61,13 +56,9 @@ namespace dawn::native {
|
|||
typename BitSetIterator<N, U>::Iterator mIter;
|
||||
};
|
||||
|
||||
Iterator begin() const {
|
||||
return Iterator(mBitSetIterator.begin());
|
||||
}
|
||||
Iterator begin() const { return Iterator(mBitSetIterator.begin()); }
|
||||
|
||||
Iterator end() const {
|
||||
return Iterator(mBitSetIterator.end());
|
||||
}
|
||||
Iterator end() const { return Iterator(mBitSetIterator.end()); }
|
||||
|
||||
private:
|
||||
BitSetIterator<N, U> mBitSetIterator;
|
||||
|
|
|
@ -33,8 +33,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
ErrorData::ErrorData(InternalErrorType type, std::string message)
|
||||
: mType(type), mMessage(std::move(message)) {
|
||||
}
|
||||
: mType(type), mMessage(std::move(message)) {}
|
||||
|
||||
void ErrorData::AppendBacktrace(const char* file, const char* function, int line) {
|
||||
BacktraceRecord record;
|
||||
|
@ -95,8 +94,8 @@ namespace dawn::native {
|
|||
// stack trace for debugging purposes.
|
||||
if (mContexts.empty() || mType != InternalErrorType::Validation) {
|
||||
for (const auto& callsite : mBacktrace) {
|
||||
ss << " at " << callsite.function << " (" << callsite.file << ":"
|
||||
<< callsite.line << ")\n";
|
||||
ss << " at " << callsite.function << " (" << callsite.file << ":" << callsite.line
|
||||
<< ")\n";
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,8 +35,10 @@ namespace dawn::native {
|
|||
|
||||
class [[nodiscard]] ErrorData {
|
||||
public:
|
||||
[[nodiscard]] static std::unique_ptr<ErrorData> Create(
|
||||
InternalErrorType type, std::string message, const char* file, const char* function,
|
||||
[[nodiscard]] static std::unique_ptr<ErrorData> Create(InternalErrorType type,
|
||||
std::string message,
|
||||
const char* file,
|
||||
const char* function,
|
||||
int line);
|
||||
ErrorData(InternalErrorType type, std::string message);
|
||||
|
||||
|
|
|
@ -35,8 +35,7 @@ namespace dawn::native {
|
|||
} // namespace
|
||||
|
||||
ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter)
|
||||
: mMatchedErrorType(ErrorFilterToErrorType(errorFilter)) {
|
||||
}
|
||||
: mMatchedErrorType(ErrorFilterToErrorType(errorFilter)) {}
|
||||
|
||||
wgpu::ErrorType ErrorScope::GetErrorType() const {
|
||||
return mCapturedError;
|
||||
|
|
|
@ -37,12 +37,12 @@ namespace dawn::native {
|
|||
textureView->GetDimension());
|
||||
|
||||
DAWN_INVALID_IF(textureView->GetLevelCount() > 1,
|
||||
"The external texture plane (%s) mip level count (%u) is not 1.",
|
||||
textureView, textureView->GetLevelCount());
|
||||
"The external texture plane (%s) mip level count (%u) is not 1.", textureView,
|
||||
textureView->GetLevelCount());
|
||||
|
||||
DAWN_INVALID_IF(textureView->GetTexture()->GetSampleCount() != 1,
|
||||
"The external texture plane (%s) sample count (%u) is not one.",
|
||||
textureView, textureView->GetTexture()->GetSampleCount());
|
||||
"The external texture plane (%s) sample count (%u) is not one.", textureView,
|
||||
textureView->GetTexture()->GetSampleCount());
|
||||
|
||||
return {};
|
||||
}
|
||||
|
@ -118,8 +118,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
ExternalTextureBase::ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag) {
|
||||
}
|
||||
: ApiObjectBase(device, tag) {}
|
||||
|
||||
ExternalTextureBase::~ExternalTextureBase() = default;
|
||||
|
||||
|
@ -191,8 +190,8 @@ namespace dawn::native {
|
|||
return {};
|
||||
}
|
||||
|
||||
const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>&
|
||||
ExternalTextureBase::GetTextureViews() const {
|
||||
const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& ExternalTextureBase::GetTextureViews()
|
||||
const {
|
||||
return mTextureViews;
|
||||
}
|
||||
|
||||
|
|
|
@ -82,8 +82,7 @@ namespace dawn::native {
|
|||
"dawn_internal_usages.md"},
|
||||
&WGPUDeviceProperties::dawnInternalUsages},
|
||||
{Feature::MultiPlanarFormats,
|
||||
{"multiplanar-formats",
|
||||
"Import and use multi-planar texture formats with per plane views",
|
||||
{"multiplanar-formats", "Import and use multi-planar texture formats with per plane views",
|
||||
"https://bugs.chromium.org/p/dawn/issues/detail?id=551"},
|
||||
&WGPUDeviceProperties::multiPlanarFormats},
|
||||
{Feature::DawnNative,
|
||||
|
@ -250,8 +249,8 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
// TODO(dawn:550): Remove this fallback logic when Chromium is updated.
|
||||
constexpr std::array<std::pair<const char*, const char*>, 6>
|
||||
kReplacementsForDeprecatedNames = {{
|
||||
constexpr std::array<std::pair<const char*, const char*>, 6> kReplacementsForDeprecatedNames = {
|
||||
{
|
||||
{"texture_compression_bc", "texture-compression-bc"},
|
||||
{"depth_clamping", "depth-clamping"},
|
||||
{"pipeline_statistics_query", "pipeline-statistics-query"},
|
||||
|
|
|
@ -142,8 +142,7 @@ namespace dawn::native {
|
|||
FormatIndex ComputeFormatIndex(wgpu::TextureFormat format) {
|
||||
// This takes advantage of overflows to make the index of TextureFormat::Undefined outside
|
||||
// of the range of the FormatTable.
|
||||
static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 >
|
||||
kKnownFormatCount);
|
||||
static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 > kKnownFormatCount);
|
||||
return static_cast<FormatIndex>(static_cast<uint32_t>(format) - 1);
|
||||
}
|
||||
|
||||
|
@ -284,8 +283,8 @@ namespace dawn::native {
|
|||
};
|
||||
|
||||
auto AddCompressedFormat =
|
||||
[&AddFormat](wgpu::TextureFormat format, uint32_t byteSize, uint32_t width,
|
||||
uint32_t height, bool isSupported, uint8_t componentCount,
|
||||
[&AddFormat](wgpu::TextureFormat format, uint32_t byteSize, uint32_t width, uint32_t height,
|
||||
bool isSupported, uint8_t componentCount,
|
||||
wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
|
||||
Format internalFormat;
|
||||
internalFormat.format = format;
|
||||
|
@ -315,11 +314,10 @@ namespace dawn::native {
|
|||
AddFormat(internalFormat);
|
||||
};
|
||||
|
||||
auto AddMultiAspectFormat = [&AddFormat, &table](wgpu::TextureFormat format, Aspect aspects,
|
||||
wgpu::TextureFormat firstFormat,
|
||||
wgpu::TextureFormat secondFormat,
|
||||
bool isRenderable, bool isSupported,
|
||||
bool supportsMultisample,
|
||||
auto AddMultiAspectFormat =
|
||||
[&AddFormat, &table](wgpu::TextureFormat format, Aspect aspects,
|
||||
wgpu::TextureFormat firstFormat, wgpu::TextureFormat secondFormat,
|
||||
bool isRenderable, bool isSupported, bool supportsMultisample,
|
||||
uint8_t componentCount) {
|
||||
Format internalFormat;
|
||||
internalFormat.format = format;
|
||||
|
|
|
@ -32,8 +32,7 @@ namespace dawn::native {
|
|||
|
||||
IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::IndexedIndirectBufferValidationInfo(
|
||||
BufferBase* indirectBuffer)
|
||||
: mIndirectBuffer(indirectBuffer) {
|
||||
}
|
||||
: mIndirectBuffer(indirectBuffer) {}
|
||||
|
||||
void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddIndirectDraw(
|
||||
uint32_t maxDrawCallsPerIndirectValidationBatch,
|
||||
|
@ -99,8 +98,8 @@ namespace dawn::native {
|
|||
IndirectValidationBatch& batch = *it;
|
||||
uint64_t min = std::min(newBatch.minOffset, batch.minOffset);
|
||||
uint64_t max = std::max(newBatch.maxOffset, batch.maxOffset);
|
||||
if (max - min <= maxBatchOffsetRange && batch.draws.size() + newBatch.draws.size() <=
|
||||
maxDrawCallsPerIndirectValidationBatch) {
|
||||
if (max - min <= maxBatchOffsetRange &&
|
||||
batch.draws.size() + newBatch.draws.size() <= maxDrawCallsPerIndirectValidationBatch) {
|
||||
// This batch fits within the limits of an existing batch. Merge it.
|
||||
batch.minOffset = min;
|
||||
batch.maxOffset = max;
|
||||
|
@ -124,8 +123,7 @@ namespace dawn::native {
|
|||
|
||||
IndirectDrawMetadata::IndirectDrawMetadata(const CombinedLimits& limits)
|
||||
: mMaxBatchOffsetRange(ComputeMaxIndirectValidationBatchOffsetRange(limits)),
|
||||
mMaxDrawCallsPerBatch(ComputeMaxDrawCallsPerIndirectValidationBatch(limits)) {
|
||||
}
|
||||
mMaxDrawCallsPerBatch(ComputeMaxDrawCallsPerIndirectValidationBatch(limits)) {}
|
||||
|
||||
IndirectDrawMetadata::~IndirectDrawMetadata() = default;
|
||||
|
||||
|
@ -213,16 +211,14 @@ namespace dawn::native {
|
|||
bool IndirectDrawMetadata::IndexedIndirectConfig::operator<(
|
||||
const IndexedIndirectConfig& other) const {
|
||||
return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
|
||||
drawType) < std::tie(other.inputIndirectBuffer,
|
||||
other.numIndexBufferElements,
|
||||
drawType) < std::tie(other.inputIndirectBuffer, other.numIndexBufferElements,
|
||||
other.duplicateBaseVertexInstance, other.drawType);
|
||||
}
|
||||
|
||||
bool IndirectDrawMetadata::IndexedIndirectConfig::operator==(
|
||||
const IndexedIndirectConfig& other) const {
|
||||
return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
|
||||
drawType) == std::tie(other.inputIndirectBuffer,
|
||||
other.numIndexBufferElements,
|
||||
drawType) == std::tie(other.inputIndirectBuffer, other.numIndexBufferElements,
|
||||
other.duplicateBaseVertexInstance, other.drawType);
|
||||
}
|
||||
|
||||
|
|
|
@ -178,15 +178,13 @@ namespace dawn::native {
|
|||
}
|
||||
)";
|
||||
|
||||
ResultOrError<ComputePipelineBase*> GetOrCreateRenderValidationPipeline(
|
||||
DeviceBase* device) {
|
||||
ResultOrError<ComputePipelineBase*> GetOrCreateRenderValidationPipeline(DeviceBase* device) {
|
||||
InternalPipelineStore* store = device->GetInternalPipelineStore();
|
||||
|
||||
if (store->renderValidationPipeline == nullptr) {
|
||||
// Create compute shader module if not cached before.
|
||||
if (store->renderValidationShader == nullptr) {
|
||||
DAWN_TRY_ASSIGN(
|
||||
store->renderValidationShader,
|
||||
DAWN_TRY_ASSIGN(store->renderValidationShader,
|
||||
utils::CreateShaderModule(device, sRenderValidationShaderSource));
|
||||
}
|
||||
|
||||
|
@ -196,16 +194,14 @@ namespace dawn::native {
|
|||
utils::MakeBindGroupLayout(
|
||||
device,
|
||||
{
|
||||
{0, wgpu::ShaderStage::Compute,
|
||||
wgpu::BufferBindingType::ReadOnlyStorage},
|
||||
{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage},
|
||||
{1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
|
||||
{2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
|
||||
},
|
||||
/* allowInternalBinding */ true));
|
||||
|
||||
Ref<PipelineLayoutBase> pipelineLayout;
|
||||
DAWN_TRY_ASSIGN(pipelineLayout,
|
||||
utils::MakeBasicPipelineLayout(device, bindGroupLayout));
|
||||
DAWN_TRY_ASSIGN(pipelineLayout, utils::MakeBasicPipelineLayout(device, bindGroupLayout));
|
||||
|
||||
ComputePipelineDescriptor computePipelineDescriptor = {};
|
||||
computePipelineDescriptor.layout = pipelineLayout.Get();
|
||||
|
@ -272,15 +268,13 @@ namespace dawn::native {
|
|||
return {};
|
||||
}
|
||||
|
||||
const uint64_t maxStorageBufferBindingSize =
|
||||
device->GetLimits().v1.maxStorageBufferBindingSize;
|
||||
const uint64_t maxStorageBufferBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
|
||||
const uint32_t minStorageBufferOffsetAlignment =
|
||||
device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
|
||||
for (auto& [config, validationInfo] : bufferInfoMap) {
|
||||
const uint64_t indirectDrawCommandSize =
|
||||
config.drawType == IndirectDrawMetadata::DrawType::Indexed
|
||||
? kDrawIndexedIndirectSize
|
||||
config.drawType == IndirectDrawMetadata::DrawType::Indexed ? kDrawIndexedIndirectSize
|
||||
: kDrawIndirectSize;
|
||||
|
||||
uint64_t outputIndirectSize = indirectDrawCommandSize;
|
||||
|
@ -292,8 +286,7 @@ namespace dawn::native {
|
|||
validationInfo.GetBatches()) {
|
||||
const uint64_t minOffsetFromAlignedBoundary =
|
||||
batch.minOffset % minStorageBufferOffsetAlignment;
|
||||
const uint64_t minOffsetAlignedDown =
|
||||
batch.minOffset - minOffsetFromAlignedBoundary;
|
||||
const uint64_t minOffsetAlignedDown = batch.minOffset - minOffsetFromAlignedBoundary;
|
||||
|
||||
Batch newBatch;
|
||||
newBatch.metadata = &batch;
|
||||
|
@ -304,8 +297,7 @@ namespace dawn::native {
|
|||
batch.maxOffset + indirectDrawCommandSize - minOffsetAlignedDown;
|
||||
|
||||
newBatch.outputParamsSize = batch.draws.size() * outputIndirectSize;
|
||||
newBatch.outputParamsOffset =
|
||||
Align(outputParamsSize, minStorageBufferOffsetAlignment);
|
||||
newBatch.outputParamsOffset = Align(outputParamsSize, minStorageBufferOffsetAlignment);
|
||||
outputParamsSize = newBatch.outputParamsOffset + newBatch.outputParamsSize;
|
||||
if (outputParamsSize > maxStorageBufferBindingSize) {
|
||||
return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate");
|
||||
|
@ -376,8 +368,8 @@ namespace dawn::native {
|
|||
uint64_t outputParamsOffset = batch.outputParamsOffset;
|
||||
for (auto& draw : batch.metadata->draws) {
|
||||
// The shader uses this to index an array of u32, hence the division by 4 bytes.
|
||||
*indirectOffsets++ = static_cast<uint32_t>(
|
||||
(draw.inputBufferOffset - batch.inputIndirectOffset) / 4);
|
||||
*indirectOffsets++ =
|
||||
static_cast<uint32_t>((draw.inputBufferOffset - batch.inputIndirectOffset) / 4);
|
||||
|
||||
draw.cmd->indirectBuffer = outputParamsBuffer.GetBuffer();
|
||||
draw.cmd->indirectOffset = outputParamsOffset;
|
||||
|
|
|
@ -332,8 +332,7 @@ namespace dawn::native {
|
|||
|
||||
#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
|
||||
case wgpu::BackendType::OpenGL:
|
||||
Register(opengl::Connect(this, wgpu::BackendType::OpenGL),
|
||||
wgpu::BackendType::OpenGL);
|
||||
Register(opengl::Connect(this, wgpu::BackendType::OpenGL), wgpu::BackendType::OpenGL);
|
||||
break;
|
||||
#endif // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
|
||||
|
||||
|
|
|
@ -106,8 +106,7 @@ namespace dawn::native {
|
|||
|
||||
MaybeError DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options);
|
||||
|
||||
ResultOrError<Ref<AdapterBase>> RequestAdapterInternal(
|
||||
const RequestAdapterOptions* options);
|
||||
ResultOrError<Ref<AdapterBase>> RequestAdapterInternal(const RequestAdapterOptions* options);
|
||||
|
||||
std::vector<std::string> mRuntimeSearchPaths;
|
||||
|
||||
|
|
|
@ -28,10 +28,9 @@ namespace dawn::native {
|
|||
|
||||
InternalPipelineStore::InternalPipelineStore(DeviceBase* device)
|
||||
: scratchStorage(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage),
|
||||
scratchIndirectStorage(device,
|
||||
wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Indirect |
|
||||
wgpu::BufferUsage::Storage) {
|
||||
}
|
||||
scratchIndirectStorage(
|
||||
device,
|
||||
wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage) {}
|
||||
|
||||
InternalPipelineStore::~InternalPipelineStore() = default;
|
||||
|
||||
|
|
|
@ -33,8 +33,7 @@ namespace dawn::native {
|
|||
explicit InternalPipelineStore(DeviceBase* device);
|
||||
~InternalPipelineStore();
|
||||
|
||||
std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>>
|
||||
copyTextureForBrowserPipelines;
|
||||
std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>> copyTextureForBrowserPipelines;
|
||||
|
||||
Ref<ShaderModuleBase> copyTextureForBrowser;
|
||||
|
||||
|
|
|
@ -100,10 +100,10 @@ namespace dawn::native {
|
|||
template <typename T>
|
||||
static MaybeError Validate(T supported, T required) {
|
||||
DAWN_INVALID_IF(IsBetter(required, supported),
|
||||
"Required limit (%u) is lower than the supported limit (%u).",
|
||||
required, supported);
|
||||
DAWN_INVALID_IF(!IsPowerOfTwo(required),
|
||||
"Required limit (%u) is not a power of two.", required);
|
||||
"Required limit (%u) is lower than the supported limit (%u).", required,
|
||||
supported);
|
||||
DAWN_INVALID_IF(!IsPowerOfTwo(required), "Required limit (%u) is not a power of two.",
|
||||
required);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
@ -118,8 +118,8 @@ namespace dawn::native {
|
|||
template <typename T>
|
||||
static MaybeError Validate(T supported, T required) {
|
||||
DAWN_INVALID_IF(IsBetter(required, supported),
|
||||
"Required limit (%u) is greater than the supported limit (%u).",
|
||||
required, supported);
|
||||
"Required limit (%u) is greater than the supported limit (%u).", required,
|
||||
supported);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
|
|
@ -14,20 +14,17 @@
|
|||
|
||||
#include <mutex>
|
||||
|
||||
#include "dawn/native/ObjectBase.h"
|
||||
#include "dawn/native/Device.h"
|
||||
#include "dawn/native/ObjectBase.h"
|
||||
|
||||
namespace dawn::native {
|
||||
|
||||
static constexpr uint64_t kErrorPayload = 0;
|
||||
static constexpr uint64_t kNotErrorPayload = 1;
|
||||
|
||||
ObjectBase::ObjectBase(DeviceBase* device) : RefCounted(kNotErrorPayload), mDevice(device) {
|
||||
}
|
||||
ObjectBase::ObjectBase(DeviceBase* device) : RefCounted(kNotErrorPayload), mDevice(device) {}
|
||||
|
||||
ObjectBase::ObjectBase(DeviceBase* device, ErrorTag)
|
||||
: RefCounted(kErrorPayload), mDevice(device) {
|
||||
}
|
||||
ObjectBase::ObjectBase(DeviceBase* device, ErrorTag) : RefCounted(kErrorPayload), mDevice(device) {}
|
||||
|
||||
DeviceBase* ObjectBase::GetDevice() const {
|
||||
return mDevice;
|
||||
|
@ -43,12 +40,9 @@ namespace dawn::native {
|
|||
}
|
||||
}
|
||||
|
||||
ApiObjectBase::ApiObjectBase(DeviceBase* device, ErrorTag tag) : ObjectBase(device, tag) {
|
||||
}
|
||||
ApiObjectBase::ApiObjectBase(DeviceBase* device, ErrorTag tag) : ObjectBase(device, tag) {}
|
||||
|
||||
ApiObjectBase::ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag)
|
||||
: ObjectBase(device) {
|
||||
}
|
||||
ApiObjectBase::ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag) : ObjectBase(device) {}
|
||||
|
||||
ApiObjectBase::~ApiObjectBase() {
|
||||
ASSERT(!IsAlive());
|
||||
|
@ -63,8 +57,7 @@ namespace dawn::native {
|
|||
return mLabel;
|
||||
}
|
||||
|
||||
void ApiObjectBase::SetLabelImpl() {
|
||||
}
|
||||
void ApiObjectBase::SetLabelImpl() {}
|
||||
|
||||
bool ApiObjectBase::IsAlive() const {
|
||||
return IsInList();
|
||||
|
|
|
@ -44,8 +44,7 @@ namespace dawn::native {
|
|||
texture->GetNumMipLevels(), wgpu::TextureUsage::None));
|
||||
TextureSubresourceUsage& textureUsage = it.first->second;
|
||||
|
||||
textureUsage.Update(range,
|
||||
[usage](const SubresourceRange&, wgpu::TextureUsage* storedUsage) {
|
||||
textureUsage.Update(range, [usage](const SubresourceRange&, wgpu::TextureUsage* storedUsage) {
|
||||
// TODO(crbug.com/dawn/1001): Consider optimizing to have fewer
|
||||
// branches.
|
||||
if ((*storedUsage & wgpu::TextureUsage::RenderAttachment) != 0 &&
|
||||
|
@ -71,8 +70,8 @@ namespace dawn::native {
|
|||
texture->GetNumMipLevels(), wgpu::TextureUsage::None));
|
||||
TextureSubresourceUsage* passTextureUsage = &it.first->second;
|
||||
|
||||
passTextureUsage->Merge(
|
||||
textureUsage, [](const SubresourceRange&, wgpu::TextureUsage* storedUsage,
|
||||
passTextureUsage->Merge(textureUsage,
|
||||
[](const SubresourceRange&, wgpu::TextureUsage* storedUsage,
|
||||
const wgpu::TextureUsage& addedUsage) {
|
||||
ASSERT((addedUsage & wgpu::TextureUsage::RenderAttachment) == 0);
|
||||
*storedUsage |= addedUsage;
|
||||
|
|
|
@ -49,9 +49,7 @@ namespace dawn::native {
|
|||
class PerStage {
|
||||
public:
|
||||
PerStage() = default;
|
||||
explicit PerStage(const T& initialValue) {
|
||||
mData.fill(initialValue);
|
||||
}
|
||||
explicit PerStage(const T& initialValue) { mData.fill(initialValue); }
|
||||
|
||||
T& operator[](SingleShaderStage stage) {
|
||||
DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
|
||||
|
|
|
@ -71,8 +71,8 @@ namespace dawn::native {
|
|||
std::unordered_set<std::string> stageInitializedConstantIdentifiers;
|
||||
for (uint32_t i = 0; i < constantCount; i++) {
|
||||
DAWN_INVALID_IF(metadata.overridableConstants.count(constants[i].key) == 0,
|
||||
"Pipeline overridable constant \"%s\" not found in %s.",
|
||||
constants[i].key, module);
|
||||
"Pipeline overridable constant \"%s\" not found in %s.", constants[i].key,
|
||||
module);
|
||||
|
||||
if (stageInitializedConstantIdentifiers.count(constants[i].key) == 0) {
|
||||
if (metadata.uninitializedOverridableConstants.count(constants[i].key) > 0) {
|
||||
|
@ -159,12 +159,10 @@ namespace dawn::native {
|
|||
}
|
||||
}
|
||||
|
||||
PipelineBase::PipelineBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
|
||||
}
|
||||
PipelineBase::PipelineBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {}
|
||||
|
||||
PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag) {
|
||||
}
|
||||
: ApiObjectBase(device, tag) {}
|
||||
|
||||
PipelineBase::~PipelineBase() = default;
|
||||
|
||||
|
@ -200,15 +198,13 @@ namespace dawn::native {
|
|||
DAWN_TRY(GetDevice()->ValidateIsAlive());
|
||||
DAWN_TRY(GetDevice()->ValidateObject(this));
|
||||
DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get()));
|
||||
DAWN_INVALID_IF(
|
||||
groupIndex >= kMaxBindGroups,
|
||||
DAWN_INVALID_IF(groupIndex >= kMaxBindGroups,
|
||||
"Bind group layout index (%u) exceeds the maximum number of bind groups (%u).",
|
||||
groupIndex, kMaxBindGroups);
|
||||
return {};
|
||||
}
|
||||
|
||||
ResultOrError<Ref<BindGroupLayoutBase>> PipelineBase::GetBindGroupLayout(
|
||||
uint32_t groupIndexIn) {
|
||||
ResultOrError<Ref<BindGroupLayoutBase>> PipelineBase::GetBindGroupLayout(uint32_t groupIndexIn) {
|
||||
DAWN_TRY(ValidateGetBindGroupLayout(groupIndexIn));
|
||||
|
||||
BindGroupIndex groupIndex(groupIndexIn);
|
||||
|
|
|
@ -29,8 +29,7 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
MaybeError ValidatePipelineLayoutDescriptor(
|
||||
DeviceBase* device,
|
||||
MaybeError ValidatePipelineLayoutDescriptor(DeviceBase* device,
|
||||
const PipelineLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken) {
|
||||
if (descriptor->nextInChain != nullptr) {
|
||||
|
@ -84,8 +83,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag) {
|
||||
}
|
||||
: ApiObjectBase(device, tag) {}
|
||||
|
||||
PipelineLayoutBase::~PipelineLayoutBase() = default;
|
||||
|
||||
|
@ -209,8 +207,7 @@ namespace dawn::native {
|
|||
(SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat)) {
|
||||
// Default to UnfilterableFloat. It will be promoted to Float if it
|
||||
// is used with a sampler.
|
||||
entry.texture.sampleType =
|
||||
wgpu::TextureSampleType::UnfilterableFloat;
|
||||
entry.texture.sampleType = wgpu::TextureSampleType::UnfilterableFloat;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
@ -290,8 +287,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
// Promote any Unfilterable textures used with a sampler to Filtering.
|
||||
for (const EntryPointMetadata::SamplerTexturePair& pair :
|
||||
metadata.samplerTexturePairs) {
|
||||
for (const EntryPointMetadata::SamplerTexturePair& pair : metadata.samplerTexturePairs) {
|
||||
BindGroupLayoutEntry* entry = &entryData[pair.texture.group][pair.texture.binding];
|
||||
if (entry->texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat) {
|
||||
entry->texture.sampleType = wgpu::TextureSampleType::Float;
|
||||
|
@ -332,8 +328,7 @@ namespace dawn::native {
|
|||
// Check in debug that the pipeline layout is compatible with the current pipeline.
|
||||
for (const StageAndDescriptor& stage : stages) {
|
||||
const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
|
||||
ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get())
|
||||
.IsSuccess());
|
||||
ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get()).IsSuccess());
|
||||
}
|
||||
|
||||
return std::move(result);
|
||||
|
@ -366,8 +361,7 @@ namespace dawn::native {
|
|||
return mMask;
|
||||
}
|
||||
|
||||
BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask(
|
||||
const PipelineLayoutBase* other) const {
|
||||
BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask(const PipelineLayoutBase* other) const {
|
||||
ASSERT(!IsError());
|
||||
return {(1 << static_cast<uint32_t>(GroupsInheritUpTo(other))) - 1u};
|
||||
}
|
||||
|
|
|
@ -38,8 +38,7 @@ namespace dawn::native {
|
|||
const PipelineLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
|
||||
|
||||
using BindGroupLayoutArray =
|
||||
ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
|
||||
using BindGroupLayoutArray = ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
|
||||
using BindGroupLayoutMask = ityp::bitset<BindGroupIndex, kMaxBindGroups>;
|
||||
|
||||
struct StageAndDescriptor {
|
||||
|
|
|
@ -20,10 +20,8 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
PooledResourceMemoryAllocator::PooledResourceMemoryAllocator(
|
||||
ResourceHeapAllocator* heapAllocator)
|
||||
: mHeapAllocator(heapAllocator) {
|
||||
}
|
||||
PooledResourceMemoryAllocator::PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator)
|
||||
: mHeapAllocator(heapAllocator) {}
|
||||
|
||||
void PooledResourceMemoryAllocator::DestroyPool() {
|
||||
for (auto& resourceHeap : mPool) {
|
||||
|
|
|
@ -34,8 +34,7 @@ namespace dawn::native {
|
|||
explicit PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator);
|
||||
~PooledResourceMemoryAllocator() override = default;
|
||||
|
||||
ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
|
||||
uint64_t size) override;
|
||||
ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override;
|
||||
void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
|
||||
|
||||
void DestroyPool();
|
||||
|
|
|
@ -33,16 +33,14 @@ namespace dawn::native {
|
|||
EncodingContext* encodingContext)
|
||||
: ApiObjectBase(device, label),
|
||||
mEncodingContext(encodingContext),
|
||||
mValidationEnabled(device->IsValidationEnabled()) {
|
||||
}
|
||||
mValidationEnabled(device->IsValidationEnabled()) {}
|
||||
|
||||
ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
|
||||
EncodingContext* encodingContext,
|
||||
ErrorTag errorTag)
|
||||
: ApiObjectBase(device, errorTag),
|
||||
mEncodingContext(encodingContext),
|
||||
mValidationEnabled(device->IsValidationEnabled()) {
|
||||
}
|
||||
mValidationEnabled(device->IsValidationEnabled()) {}
|
||||
|
||||
bool ProgrammableEncoder::IsValidationEnabled() const {
|
||||
return mValidationEnabled;
|
||||
|
@ -76,8 +74,7 @@ namespace dawn::native {
|
|||
this,
|
||||
[&](CommandAllocator* allocator) -> MaybeError {
|
||||
if (IsValidationEnabled()) {
|
||||
DAWN_INVALID_IF(
|
||||
mDebugGroupStackSize == 0,
|
||||
DAWN_INVALID_IF(mDebugGroupStackSize == 0,
|
||||
"PopDebugGroup called when no debug groups are currently pushed.");
|
||||
}
|
||||
allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
|
||||
|
@ -114,8 +111,7 @@ namespace dawn::native {
|
|||
const uint32_t* dynamicOffsetsIn) const {
|
||||
DAWN_TRY(GetDevice()->ValidateObject(group));
|
||||
|
||||
DAWN_INVALID_IF(index >= kMaxBindGroupsTyped,
|
||||
"Bind group index (%u) exceeds the maximum (%u).",
|
||||
DAWN_INVALID_IF(index >= kMaxBindGroupsTyped, "Bind group index (%u) exceeds the maximum (%u).",
|
||||
static_cast<uint32_t>(index), kMaxBindGroups);
|
||||
|
||||
ityp::span<BindingIndex, const uint32_t> dynamicOffsets(dynamicOffsetsIn,
|
||||
|
@ -153,8 +149,8 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
DAWN_INVALID_IF(!IsAligned(dynamicOffsets[i], requiredAlignment),
|
||||
"Dynamic Offset[%u] (%u) is not %u byte aligned.",
|
||||
static_cast<uint32_t>(i), dynamicOffsets[i], requiredAlignment);
|
||||
"Dynamic Offset[%u] (%u) is not %u byte aligned.", static_cast<uint32_t>(i),
|
||||
dynamicOffsets[i], requiredAlignment);
|
||||
|
||||
BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);
|
||||
|
||||
|
|
|
@ -30,9 +30,7 @@ namespace dawn::native {
|
|||
// Base class for shared functionality between programmable encoders.
|
||||
class ProgrammableEncoder : public ApiObjectBase {
|
||||
public:
|
||||
ProgrammableEncoder(DeviceBase* device,
|
||||
const char* label,
|
||||
EncodingContext* encodingContext);
|
||||
ProgrammableEncoder(DeviceBase* device, const char* label, EncodingContext* encodingContext);
|
||||
|
||||
void APIInsertDebugMarker(const char* groupLabel);
|
||||
void APIPopDebugGroup();
|
||||
|
@ -55,9 +53,7 @@ namespace dawn::native {
|
|||
const uint32_t* dynamicOffsets) const;
|
||||
|
||||
// Construct an "error" programmable pass encoder.
|
||||
ProgrammableEncoder(DeviceBase* device,
|
||||
EncodingContext* encodingContext,
|
||||
ErrorTag errorTag);
|
||||
ProgrammableEncoder(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
|
||||
|
||||
EncodingContext* mEncodingContext = nullptr;
|
||||
|
||||
|
|
|
@ -116,15 +116,13 @@ namespace dawn::native {
|
|||
}
|
||||
)";
|
||||
|
||||
ResultOrError<ComputePipelineBase*> GetOrCreateTimestampComputePipeline(
|
||||
DeviceBase* device) {
|
||||
ResultOrError<ComputePipelineBase*> GetOrCreateTimestampComputePipeline(DeviceBase* device) {
|
||||
InternalPipelineStore* store = device->GetInternalPipelineStore();
|
||||
|
||||
if (store->timestampComputePipeline == nullptr) {
|
||||
// Create compute shader module if not cached before.
|
||||
if (store->timestampCS == nullptr) {
|
||||
DAWN_TRY_ASSIGN(
|
||||
store->timestampCS,
|
||||
DAWN_TRY_ASSIGN(store->timestampCS,
|
||||
utils::CreateShaderModule(device, sConvertTimestampsToNanoseconds));
|
||||
}
|
||||
|
||||
|
@ -135,8 +133,7 @@ namespace dawn::native {
|
|||
device,
|
||||
{
|
||||
{0, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
|
||||
{1, wgpu::ShaderStage::Compute,
|
||||
wgpu::BufferBindingType::ReadOnlyStorage},
|
||||
{1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage},
|
||||
{2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
|
||||
},
|
||||
/* allowInternalBinding */ true));
|
||||
|
@ -200,9 +197,9 @@ namespace dawn::native {
|
|||
|
||||
// Create bind group after all binding entries are set.
|
||||
Ref<BindGroupBase> bindGroup;
|
||||
DAWN_TRY_ASSIGN(bindGroup,
|
||||
utils::MakeBindGroup(device, layout,
|
||||
{{0, timestamps}, {1, availability}, {2, params}}));
|
||||
DAWN_TRY_ASSIGN(
|
||||
bindGroup,
|
||||
utils::MakeBindGroup(device, layout, {{0, timestamps}, {1, availability}, {2, params}}));
|
||||
|
||||
// Create compute encoder and issue dispatch.
|
||||
Ref<ComputePassEncoder> pass = encoder->BeginComputePass();
|
||||
|
|
|
@ -27,19 +27,15 @@ namespace dawn::native {
|
|||
|
||||
class ErrorQuerySet final : public QuerySetBase {
|
||||
public:
|
||||
explicit ErrorQuerySet(DeviceBase* device) : QuerySetBase(device, ObjectBase::kError) {
|
||||
}
|
||||
explicit ErrorQuerySet(DeviceBase* device) : QuerySetBase(device, ObjectBase::kError) {}
|
||||
|
||||
private:
|
||||
void DestroyImpl() override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
void DestroyImpl() override { UNREACHABLE(); }
|
||||
};
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
MaybeError ValidateQuerySetDescriptor(DeviceBase* device,
|
||||
const QuerySetDescriptor* descriptor) {
|
||||
MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor) {
|
||||
DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
|
||||
|
||||
DAWN_TRY(ValidateQueryType(descriptor->type));
|
||||
|
@ -118,8 +114,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
QuerySetBase::QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag) {
|
||||
}
|
||||
: ApiObjectBase(device, tag) {}
|
||||
|
||||
QuerySetBase::~QuerySetBase() {
|
||||
// Uninitialized or already destroyed
|
||||
|
|
|
@ -49,8 +49,7 @@ namespace dawn::native {
|
|||
uint32_t actualBytesPerRow,
|
||||
uint32_t dstBytesPerRow,
|
||||
uint32_t srcBytesPerRow) {
|
||||
bool copyWholeLayer =
|
||||
actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
|
||||
bool copyWholeLayer = actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
|
||||
bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0;
|
||||
|
||||
if (!copyWholeLayer) { // copy row by row
|
||||
|
@ -87,19 +86,16 @@ namespace dawn::native {
|
|||
const TexelBlockInfo& blockInfo,
|
||||
const Extent3D& writeSizePixel) {
|
||||
uint64_t newDataSizeBytes;
|
||||
DAWN_TRY_ASSIGN(
|
||||
newDataSizeBytes,
|
||||
ComputeRequiredBytesInCopy(blockInfo, writeSizePixel, optimallyAlignedBytesPerRow,
|
||||
alignedRowsPerImage));
|
||||
DAWN_TRY_ASSIGN(newDataSizeBytes,
|
||||
ComputeRequiredBytesInCopy(blockInfo, writeSizePixel,
|
||||
optimallyAlignedBytesPerRow, alignedRowsPerImage));
|
||||
|
||||
uint64_t optimalOffsetAlignment =
|
||||
device->GetOptimalBufferToTextureCopyOffsetAlignment();
|
||||
uint64_t optimalOffsetAlignment = device->GetOptimalBufferToTextureCopyOffsetAlignment();
|
||||
ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
|
||||
ASSERT(IsPowerOfTwo(blockInfo.byteSize));
|
||||
// We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
|
||||
// since both of them are powers of two, we only need to align to the max value.
|
||||
uint64_t offsetAlignment =
|
||||
std::max(optimalOffsetAlignment, uint64_t(blockInfo.byteSize));
|
||||
uint64_t offsetAlignment = std::max(optimalOffsetAlignment, uint64_t(blockInfo.byteSize));
|
||||
|
||||
// For depth-stencil texture, buffer offset must be a multiple of 4, which is required
|
||||
// by WebGPU and Vulkan SPEC.
|
||||
|
@ -109,9 +105,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
UploadHandle uploadHandle;
|
||||
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
|
||||
newDataSizeBytes, device->GetPendingCommandSerial(),
|
||||
offsetAlignment));
|
||||
DAWN_TRY_ASSIGN(uploadHandle,
|
||||
device->GetDynamicUploader()->Allocate(
|
||||
newDataSizeBytes, device->GetPendingCommandSerial(), offsetAlignment));
|
||||
ASSERT(uploadHandle.mappedBuffer != nullptr);
|
||||
|
||||
uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
|
||||
|
@ -127,17 +123,16 @@ namespace dawn::native {
|
|||
uint64_t imageAdditionalStride =
|
||||
dataLayout.bytesPerRow * (dataRowsPerImage - alignedRowsPerImage);
|
||||
|
||||
CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers,
|
||||
alignedRowsPerImage, imageAdditionalStride, alignedBytesPerRow,
|
||||
optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
|
||||
CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers, alignedRowsPerImage,
|
||||
imageAdditionalStride, alignedBytesPerRow, optimallyAlignedBytesPerRow,
|
||||
dataLayout.bytesPerRow);
|
||||
|
||||
return uploadHandle;
|
||||
}
|
||||
|
||||
struct SubmittedWorkDone : QueueBase::TaskInFlight {
|
||||
SubmittedWorkDone(WGPUQueueWorkDoneCallback callback, void* userdata)
|
||||
: mCallback(callback), mUserdata(userdata) {
|
||||
}
|
||||
: mCallback(callback), mUserdata(userdata) {}
|
||||
void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
|
||||
ASSERT(mCallback != nullptr);
|
||||
TRACE_EVENT1(platform, General, "Queue::SubmittedWorkDone::Finished", "serial",
|
||||
|
@ -159,12 +154,10 @@ namespace dawn::native {
|
|||
|
||||
class ErrorQueue : public QueueBase {
|
||||
public:
|
||||
explicit ErrorQueue(DeviceBase* device) : QueueBase(device, ObjectBase::kError) {
|
||||
}
|
||||
explicit ErrorQueue(DeviceBase* device) : QueueBase(device, ObjectBase::kError) {}
|
||||
|
||||
private:
|
||||
MaybeError SubmitImpl(uint32_t commandCount,
|
||||
CommandBufferBase* const* commands) override {
|
||||
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
};
|
||||
|
@ -172,23 +165,18 @@ namespace dawn::native {
|
|||
|
||||
// QueueBase
|
||||
|
||||
QueueBase::TaskInFlight::~TaskInFlight() {
|
||||
}
|
||||
QueueBase::TaskInFlight::~TaskInFlight() {}
|
||||
|
||||
QueueBase::QueueBase(DeviceBase* device, const QueueDescriptor* descriptor)
|
||||
: ApiObjectBase(device, descriptor->label) {
|
||||
}
|
||||
: ApiObjectBase(device, descriptor->label) {}
|
||||
|
||||
QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag) {
|
||||
}
|
||||
QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag) : ApiObjectBase(device, tag) {}
|
||||
|
||||
QueueBase::~QueueBase() {
|
||||
ASSERT(mTasksInFlight.Empty());
|
||||
}
|
||||
|
||||
void QueueBase::DestroyImpl() {
|
||||
}
|
||||
void QueueBase::DestroyImpl() {}
|
||||
|
||||
// static
|
||||
QueueBase* QueueBase::MakeError(DeviceBase* device) {
|
||||
|
@ -291,8 +279,8 @@ namespace dawn::native {
|
|||
DeviceBase* device = GetDevice();
|
||||
|
||||
UploadHandle uploadHandle;
|
||||
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
|
||||
size, device->GetPendingCommandSerial(),
|
||||
DAWN_TRY_ASSIGN(uploadHandle,
|
||||
device->GetDynamicUploader()->Allocate(size, device->GetPendingCommandSerial(),
|
||||
kCopyBufferToBufferOffsetAlignment));
|
||||
ASSERT(uploadHandle.mappedBuffer != nullptr);
|
||||
|
||||
|
@ -347,15 +335,13 @@ namespace dawn::native {
|
|||
uint32_t alignedRowsPerImage = writeSizePixel.height / blockInfo.height;
|
||||
|
||||
uint32_t optimalBytesPerRowAlignment = GetDevice()->GetOptimalBytesPerRowAlignment();
|
||||
uint32_t optimallyAlignedBytesPerRow =
|
||||
Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
|
||||
uint32_t optimallyAlignedBytesPerRow = Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
|
||||
|
||||
UploadHandle uploadHandle;
|
||||
DAWN_TRY_ASSIGN(uploadHandle,
|
||||
UploadTextureDataAligningBytesPerRowAndOffset(
|
||||
GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
|
||||
alignedRowsPerImage, dataLayout, format.HasDepthOrStencil(), blockInfo,
|
||||
writeSizePixel));
|
||||
DAWN_TRY_ASSIGN(uploadHandle, UploadTextureDataAligningBytesPerRowAndOffset(
|
||||
GetDevice(), data, alignedBytesPerRow,
|
||||
optimallyAlignedBytesPerRow, alignedRowsPerImage, dataLayout,
|
||||
format.HasDepthOrStencil(), blockInfo, writeSizePixel));
|
||||
|
||||
TextureDataLayout passDataLayout = dataLayout;
|
||||
passDataLayout.offset = uploadHandle.startOffset;
|
||||
|
@ -384,8 +370,7 @@ namespace dawn::native {
|
|||
CopyTextureForBrowserInternal(source, destination, copySize, options));
|
||||
}
|
||||
|
||||
MaybeError QueueBase::CopyTextureForBrowserInternal(
|
||||
const ImageCopyTexture* source,
|
||||
MaybeError QueueBase::CopyTextureForBrowserInternal(const ImageCopyTexture* source,
|
||||
const ImageCopyTexture* destination,
|
||||
const Extent3D* copySize,
|
||||
const CopyTextureForBrowserOptions* options) {
|
||||
|
@ -481,9 +466,8 @@ namespace dawn::native {
|
|||
"Usage (%s) of %s does not include %s.", destination->texture->GetUsage(),
|
||||
destination->texture, wgpu::TextureUsage::CopyDst);
|
||||
|
||||
DAWN_INVALID_IF(destination->texture->GetSampleCount() > 1,
|
||||
"Sample count (%u) of %s is not 1", destination->texture->GetSampleCount(),
|
||||
destination->texture);
|
||||
DAWN_INVALID_IF(destination->texture->GetSampleCount() > 1, "Sample count (%u) of %s is not 1",
|
||||
destination->texture->GetSampleCount(), destination->texture);
|
||||
|
||||
DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
|
||||
// We validate texture copy range before validating linear texture data,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue