Consistent formatting for Dawn/Tint.

This CL updates the clang format files to have a single shared format
between Dawn and Tint. The major changes are tabs are 4 spaces, lines
are 100 columns and namespaces are not indented.

Bug: dawn:1339
Change-Id: I4208742c95643998d9fd14e77a9cc558071ded39
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87603
Commit-Queue: Dan Sinclair <dsinclair@chromium.org>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Kokoro: Kokoro <noreply+kokoro@google.com>
This commit is contained in:
dan sinclair 2022-05-01 14:40:55 +00:00 committed by Dawn LUCI CQ
parent 73b1d1dafa
commit 41e4d9a34c
1827 changed files with 218382 additions and 227741 deletions

View File

@ -1,8 +1,5 @@
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html # http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Chromium BasedOnStyle: Chromium
Standard: Cpp11
AllowShortFunctionsOnASingleLine: false
ColumnLimit: 100 ColumnLimit: 100
@ -11,10 +8,3 @@ IndentWidth: 4
ObjCBlockIndentWidth: 4 ObjCBlockIndentWidth: 4
AccessModifierOffset: -2 AccessModifierOffset: -2
CompactNamespaces: true
# This should result in only one indentation level with compacted namespaces
NamespaceIndentation: All
# Use this option once clang-format 6 is out.
IndentPPDirectives: AfterHash

View File

@ -121,7 +121,7 @@ def _NonInclusiveFileFilter(file):
"third_party/khronos/KHR/khrplatform.h", # Third party file "third_party/khronos/KHR/khrplatform.h", # Third party file
"tools/roll-all", # Branch name "tools/roll-all", # Branch name
"tools/src/container/key.go", # External URL "tools/src/container/key.go", # External URL
"tools/src/go.sum", # External URL "go.sum", # External URL
] ]
return file.LocalPath() not in filter_list return file.LocalPath() not in filter_list

View File

@ -1 +0,0 @@
filter=-runtime/indentation_namespace

View File

@ -45,9 +45,7 @@ namespace dawn {
struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> { struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> {
static constexpr bool enable = true; static constexpr bool enable = true;
using type = T; using type = T;
constexpr static T Lower(T t) { constexpr static T Lower(T t) { return t; }
return t;
}
}; };
template <typename T> template <typename T>
@ -55,14 +53,9 @@ namespace dawn {
using Integral = typename std::underlying_type<T>::type; using Integral = typename std::underlying_type<T>::type;
// NOLINTNEXTLINE(runtime/explicit) // NOLINTNEXTLINE(runtime/explicit)
constexpr BoolConvertible(Integral value) : value(value) { constexpr BoolConvertible(Integral value) : value(value) {}
} constexpr operator bool() const { return value != 0; }
constexpr operator bool() const { constexpr operator T() const { return static_cast<T>(value); }
return value != 0;
}
constexpr operator T() const {
return static_cast<T>(value);
}
Integral value; Integral value;
}; };
@ -71,15 +64,13 @@ namespace dawn {
struct LowerBitmask<BoolConvertible<T>> { struct LowerBitmask<BoolConvertible<T>> {
static constexpr bool enable = true; static constexpr bool enable = true;
using type = T; using type = T;
static constexpr type Lower(BoolConvertible<T> t) { static constexpr type Lower(BoolConvertible<T> t) { return t; }
return t;
}
}; };
template <typename T1, template <
typename T1,
typename T2, typename T2,
typename = typename std::enable_if<LowerBitmask<T1>::enable && typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
LowerBitmask<T2>::enable>::type>
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) { constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
using T = typename LowerBitmask<T1>::type; using T = typename LowerBitmask<T1>::type;
using Integral = typename std::underlying_type<T>::type; using Integral = typename std::underlying_type<T>::type;
@ -87,10 +78,10 @@ namespace dawn {
static_cast<Integral>(LowerBitmask<T2>::Lower(right)); static_cast<Integral>(LowerBitmask<T2>::Lower(right));
} }
template <typename T1, template <
typename T1,
typename T2, typename T2,
typename = typename std::enable_if<LowerBitmask<T1>::enable && typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
LowerBitmask<T2>::enable>::type>
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) { constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
using T = typename LowerBitmask<T1>::type; using T = typename LowerBitmask<T1>::type;
using Integral = typename std::underlying_type<T>::type; using Integral = typename std::underlying_type<T>::type;
@ -98,10 +89,10 @@ namespace dawn {
static_cast<Integral>(LowerBitmask<T2>::Lower(right)); static_cast<Integral>(LowerBitmask<T2>::Lower(right));
} }
template <typename T1, template <
typename T1,
typename T2, typename T2,
typename = typename std::enable_if<LowerBitmask<T1>::enable && typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
LowerBitmask<T2>::enable>::type>
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) { constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
using T = typename LowerBitmask<T1>::type; using T = typename LowerBitmask<T1>::type;
using Integral = typename std::underlying_type<T>::type; using Integral = typename std::underlying_type<T>::type;
@ -116,30 +107,30 @@ namespace dawn {
return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t)); return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t));
} }
template <typename T, template <
typename T,
typename T2, typename T2,
typename = typename std::enable_if<IsDawnBitmask<T>::enable && typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
LowerBitmask<T2>::enable>::type>
constexpr T& operator&=(T& l, T2 right) { constexpr T& operator&=(T& l, T2 right) {
T r = LowerBitmask<T2>::Lower(right); T r = LowerBitmask<T2>::Lower(right);
l = l & r; l = l & r;
return l; return l;
} }
template <typename T, template <
typename T,
typename T2, typename T2,
typename = typename std::enable_if<IsDawnBitmask<T>::enable && typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
LowerBitmask<T2>::enable>::type>
constexpr T& operator|=(T& l, T2 right) { constexpr T& operator|=(T& l, T2 right) {
T r = LowerBitmask<T2>::Lower(right); T r = LowerBitmask<T2>::Lower(right);
l = l | r; l = l | r;
return l; return l;
} }
template <typename T, template <
typename T,
typename T2, typename T2,
typename = typename std::enable_if<IsDawnBitmask<T>::enable && typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
LowerBitmask<T2>::enable>::type>
constexpr T& operator^=(T& l, T2 right) { constexpr T& operator^=(T& l, T2 right) {
T r = LowerBitmask<T2>::Lower(right); T r = LowerBitmask<T2>::Lower(right);
l = l ^ r; l = l ^ r;

View File

@ -48,8 +48,8 @@ namespace dawn::native::metal {
uint32_t plane; uint32_t plane;
}; };
DAWN_NATIVE_EXPORT WGPUTexture DAWN_NATIVE_EXPORT WGPUTexture WrapIOSurface(WGPUDevice device,
WrapIOSurface(WGPUDevice device, const ExternalImageDescriptorIOSurface* descriptor); const ExternalImageDescriptorIOSurface* descriptor);
// When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't // When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't
// mean that the operations will be visible to other APIs/Metal devices right away. macOS // mean that the operations will be visible to other APIs/Metal devices right away. macOS

View File

@ -35,8 +35,9 @@ namespace dawn::native::opengl {
}; };
using PresentCallback = void (*)(void*); using PresentCallback = void (*)(void*);
DAWN_NATIVE_EXPORT DawnSwapChainImplementation DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
CreateNativeSwapChainImpl(WGPUDevice device, PresentCallback present, void* presentUserdata); PresentCallback present,
void* presentUserdata);
DAWN_NATIVE_EXPORT WGPUTextureFormat DAWN_NATIVE_EXPORT WGPUTextureFormat
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain); GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);

View File

@ -28,8 +28,8 @@ namespace dawn::native::vulkan {
DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName); DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
DAWN_NATIVE_EXPORT DawnSwapChainImplementation DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surface); ::VkSurfaceKHR surface);
DAWN_NATIVE_EXPORT WGPUTextureFormat DAWN_NATIVE_EXPORT WGPUTextureFormat
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain); GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);

View File

@ -101,8 +101,7 @@ namespace dawn::platform {
// The |fingerprint| is provided by Dawn to inform the client to discard the Dawn caches // The |fingerprint| is provided by Dawn to inform the client to discard the Dawn caches
// when the fingerprint changes. The returned CachingInterface is expected to outlive the // when the fingerprint changes. The returned CachingInterface is expected to outlive the
// device which uses it to persistently cache objects. // device which uses it to persistently cache objects.
virtual CachingInterface* GetCachingInterface(const void* fingerprint, virtual CachingInterface* GetCachingInterface(const void* fingerprint, size_t fingerprintSize);
size_t fingerprintSize);
virtual std::unique_ptr<WorkerTaskPool> CreateWorkerTaskPool(); virtual std::unique_ptr<WorkerTaskPool> CreateWorkerTaskPool();
private: private:

View File

@ -53,8 +53,7 @@ namespace dawn::wire {
DAWN_WIRE_EXPORT size_t DAWN_WIRE_EXPORT size_t
SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties); SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties);
DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties( DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties(const WGPUDeviceProperties* deviceProperties,
const WGPUDeviceProperties* deviceProperties,
char* serializeBuffer); char* serializeBuffer);
DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties, DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,

View File

@ -160,9 +160,7 @@ namespace dawn::wire {
// the subrange (offset, offset + size) of the allocation at buffer unmap // the subrange (offset, offset + size) of the allocation at buffer unmap
// This subrange is always the whole mapped region for now // This subrange is always the whole mapped region for now
// There could be nothing to be serialized (if using shared memory) // There could be nothing to be serialized (if using shared memory)
virtual void SerializeDataUpdate(void* serializePointer, virtual void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) = 0;
size_t offset,
size_t size) = 0;
private: private:
WriteHandle(const WriteHandle&) = delete; WriteHandle(const WriteHandle&) = delete;

View File

@ -1,2 +0,0 @@
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Chromium

View File

@ -1 +0,0 @@
filter=-runtime/indentation_namespace

View File

@ -62,24 +62,18 @@ class BitSetIterator final {
uint32_t mOffset; uint32_t mOffset;
}; };
Iterator begin() const { Iterator begin() const { return Iterator(mBits); }
return Iterator(mBits); Iterator end() const { return Iterator(std::bitset<N>(0)); }
}
Iterator end() const {
return Iterator(std::bitset<N>(0));
}
private: private:
const std::bitset<N> mBits; const std::bitset<N> mBits;
}; };
template <size_t N, typename T> template <size_t N, typename T>
BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) { BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {}
}
template <size_t N, typename T> template <size_t N, typename T>
BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) { BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {}
}
template <size_t N, typename T> template <size_t N, typename T>
BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) { BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) {

View File

@ -22,12 +22,8 @@
template <typename T> template <typename T>
struct CoreFoundationRefTraits { struct CoreFoundationRefTraits {
static constexpr T kNullValue = nullptr; static constexpr T kNullValue = nullptr;
static void Reference(T value) { static void Reference(T value) { CFRetain(value); }
CFRetain(value); static void Release(T value) { CFRelease(value); }
}
static void Release(T value) {
CFRelease(value);
}
}; };
template <typename T> template <typename T>

View File

@ -25,17 +25,17 @@ namespace gpu_info {
// Referenced from the following Mesa source code: // Referenced from the following Mesa source code:
// https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h // https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
// gen9 // gen9
const std::array<uint32_t, 25> Skylake = { const std::array<uint32_t, 25> Skylake = {{0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913,
{0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913, 0x1915, 0x1916, 0x1915, 0x1916, 0x1917, 0x191A, 0x191B, 0x191D, 0x191E,
0x1917, 0x191A, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, 0x1926, 0x1927, 0x1921, 0x1923, 0x1926, 0x1927, 0x192A, 0x192B, 0x192D,
0x192A, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D}}; 0x1932, 0x193A, 0x193B, 0x193D}};
// gen9p5 // gen9p5
const std::array<uint32_t, 20> Kabylake = { const std::array<uint32_t, 20> Kabylake = {{0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E,
{0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E, 0x591E, 0x5912, 0x5917, 0x591E, 0x5912, 0x5917, 0x5902, 0x591B, 0x593B, 0x590B,
0x5902, 0x591B, 0x593B, 0x590B, 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}}; 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
const std::array<uint32_t, 17> Coffeelake = { const std::array<uint32_t, 17> Coffeelake = {{0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91,
{0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B, 0x3E94,
0x3E94, 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}}; 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}}; const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
const std::array<uint32_t, 21> Cometlake = { const std::array<uint32_t, 21> Cometlake = {
{0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0, {0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,

View File

@ -22,12 +22,8 @@
template <typename T> template <typename T>
struct IOKitRefTraits { struct IOKitRefTraits {
static constexpr T kNullValue = IO_OBJECT_NULL; static constexpr T kNullValue = IO_OBJECT_NULL;
static void Reference(T value) { static void Reference(T value) { IOObjectRetain(value); }
IOObjectRetain(value); static void Release(T value) { IOObjectRelease(value); }
}
static void Release(T value) {
IOObjectRelease(value);
}
}; };
template <typename T> template <typename T>

View File

@ -99,10 +99,8 @@ class LinkedList;
template <typename T> template <typename T>
class LinkNode { class LinkNode {
public: public:
LinkNode() : previous_(nullptr), next_(nullptr) { LinkNode() : previous_(nullptr), next_(nullptr) {}
} LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {}
LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {
}
LinkNode(LinkNode<T>&& rhs) { LinkNode(LinkNode<T>&& rhs) {
next_ = rhs.next_; next_ = rhs.next_;
@ -154,22 +152,14 @@ class LinkNode {
return true; return true;
} }
LinkNode<T>* previous() const { LinkNode<T>* previous() const { return previous_; }
return previous_;
}
LinkNode<T>* next() const { LinkNode<T>* next() const { return next_; }
return next_;
}
// Cast from the node-type to the value type. // Cast from the node-type to the value type.
const T* value() const { const T* value() const { return static_cast<const T*>(this); }
return static_cast<const T*>(this);
}
T* value() { T* value() { return static_cast<T*>(this); }
return static_cast<T*>(this);
}
private: private:
friend class LinkedList<T>; friend class LinkedList<T>;
@ -183,8 +173,7 @@ class LinkedList {
// The "root" node is self-referential, and forms the basis of a circular // The "root" node is self-referential, and forms the basis of a circular
// list (root_.next() will point back to the start of the list, // list (root_.next() will point back to the start of the list,
// and root_->previous() wraps around to the end of the list). // and root_->previous() wraps around to the end of the list).
LinkedList() : root_(&root_, &root_) { LinkedList() : root_(&root_, &root_) {}
}
~LinkedList() { ~LinkedList() {
// If any LinkNodes still exist in the LinkedList, there will be outstanding references to // If any LinkNodes still exist in the LinkedList, there will be outstanding references to
@ -194,9 +183,7 @@ class LinkedList {
} }
// Appends |e| to the end of the linked list. // Appends |e| to the end of the linked list.
void Append(LinkNode<T>* e) { void Append(LinkNode<T>* e) { e->InsertBefore(&root_); }
e->InsertBefore(&root_);
}
// Moves all elements (in order) of the list and appends them into |l| leaving the list empty. // Moves all elements (in order) of the list and appends them into |l| leaving the list empty.
void MoveInto(LinkedList<T>* l) { void MoveInto(LinkedList<T>* l) {
@ -212,21 +199,13 @@ class LinkedList {
root_.previous_ = &root_; root_.previous_ = &root_;
} }
LinkNode<T>* head() const { LinkNode<T>* head() const { return root_.next(); }
return root_.next();
}
LinkNode<T>* tail() const { LinkNode<T>* tail() const { return root_.previous(); }
return root_.previous();
}
const LinkNode<T>* end() const { const LinkNode<T>* end() const { return &root_; }
return &root_;
}
bool empty() const { bool empty() const { return head() == end(); }
return head() == end();
}
private: private:
LinkNode<T> root_; LinkNode<T> root_;
@ -235,8 +214,7 @@ class LinkedList {
template <typename T> template <typename T>
class LinkedListIterator { class LinkedListIterator {
public: public:
explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) { explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {}
}
// We keep an early reference to the next node in the list so that even if the current element // We keep an early reference to the next node in the list so that even if the current element
// is modified or removed from the list, we have a valid next node. // is modified or removed from the list, we have a valid next node.
@ -246,13 +224,9 @@ class LinkedListIterator {
return *this; return *this;
} }
bool operator!=(const LinkedListIterator<T>& other) const { bool operator!=(const LinkedListIterator<T>& other) const { return current_ != other.current_; }
return current_ != other.current_;
}
LinkNode<T>* operator*() const { LinkNode<T>* operator*() const { return current_; }
return current_;
}
private: private:
LinkNode<T>* current_; LinkNode<T>* current_;

View File

@ -64,8 +64,7 @@ namespace dawn {
} // anonymous namespace } // anonymous namespace
LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) { LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {}
}
LogMessage::~LogMessage() { LogMessage::~LogMessage() {
std::string fullMessage = mStream.str(); std::string fullMessage = mStream.str();

View File

@ -67,12 +67,8 @@
template <typename T> template <typename T>
struct NSRefTraits { struct NSRefTraits {
static constexpr T kNullValue = nullptr; static constexpr T kNullValue = nullptr;
static void Reference(T value) { static void Reference(T value) { [value retain]; }
[value retain]; static void Release(T value) { [value release]; }
}
static void Release(T value) {
[value release];
}
}; };
template <typename T> template <typename T>
@ -80,13 +76,9 @@ class NSRef : public RefBase<T*, NSRefTraits<T*>> {
public: public:
using RefBase<T*, NSRefTraits<T*>>::RefBase; using RefBase<T*, NSRefTraits<T*>>::RefBase;
const T* operator*() const { const T* operator*() const { return this->Get(); }
return this->Get();
}
T* operator*() { T* operator*() { return this->Get(); }
return this->Get();
}
}; };
template <typename T> template <typename T>
@ -104,13 +96,9 @@ class NSPRef : public RefBase<T, NSRefTraits<T>> {
public: public:
using RefBase<T, NSRefTraits<T>>::RefBase; using RefBase<T, NSRefTraits<T>>::RefBase;
const T operator*() const { const T operator*() const { return this->Get(); }
return this->Get();
}
T operator*() { T operator*() { return this->Get(); }
return this->Get();
}
}; };
template <typename T> template <typename T>

View File

@ -36,17 +36,13 @@ template <typename T, typename Traits>
class RefBase { class RefBase {
public: public:
// Default constructor and destructor. // Default constructor and destructor.
RefBase() : mValue(Traits::kNullValue) { RefBase() : mValue(Traits::kNullValue) {}
}
~RefBase() { ~RefBase() { Release(mValue); }
Release(mValue);
}
// Constructors from nullptr. // Constructors from nullptr.
// NOLINTNEXTLINE(runtime/explicit) // NOLINTNEXTLINE(runtime/explicit)
constexpr RefBase(std::nullptr_t) : RefBase() { constexpr RefBase(std::nullptr_t) : RefBase() {}
}
RefBase<T, Traits>& operator=(std::nullptr_t) { RefBase<T, Traits>& operator=(std::nullptr_t) {
Set(Traits::kNullValue); Set(Traits::kNullValue);
@ -55,9 +51,7 @@ class RefBase {
// Constructors from a value T. // Constructors from a value T.
// NOLINTNEXTLINE(runtime/explicit) // NOLINTNEXTLINE(runtime/explicit)
RefBase(T value) : mValue(value) { RefBase(T value) : mValue(value) { Reference(value); }
Reference(value);
}
RefBase<T, Traits>& operator=(const T& value) { RefBase<T, Traits>& operator=(const T& value) {
Set(value); Set(value);
@ -65,18 +59,14 @@ class RefBase {
} }
// Constructors from a RefBase<T> // Constructors from a RefBase<T>
RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) { RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) { Reference(other.mValue); }
Reference(other.mValue);
}
RefBase<T, Traits>& operator=(const RefBase<T, Traits>& other) { RefBase<T, Traits>& operator=(const RefBase<T, Traits>& other) {
Set(other.mValue); Set(other.mValue);
return *this; return *this;
} }
RefBase(RefBase<T, Traits>&& other) { RefBase(RefBase<T, Traits>&& other) { mValue = other.Detach(); }
mValue = other.Detach();
}
RefBase<T, Traits>& operator=(RefBase<T, Traits>&& other) { RefBase<T, Traits>& operator=(RefBase<T, Traits>&& other) {
if (&other != this) { if (&other != this) {
@ -113,28 +103,16 @@ class RefBase {
} }
// Comparison operators. // Comparison operators.
bool operator==(const T& other) const { bool operator==(const T& other) const { return mValue == other; }
return mValue == other;
}
bool operator!=(const T& other) const { bool operator!=(const T& other) const { return mValue != other; }
return mValue != other;
}
const T operator->() const { const T operator->() const { return mValue; }
return mValue; T operator->() { return mValue; }
}
T operator->() {
return mValue;
}
// Smart pointer methods. // Smart pointer methods.
const T& Get() const { const T& Get() const { return mValue; }
return mValue; T& Get() { return mValue; }
}
T& Get() {
return mValue;
}
[[nodiscard]] T Detach() { [[nodiscard]] T Detach() {
T value{std::move(mValue)}; T value{std::move(mValue)};

View File

@ -45,12 +45,8 @@ class RefCounted {
template <typename T> template <typename T>
struct RefCountedTraits { struct RefCountedTraits {
static constexpr T* kNullValue = nullptr; static constexpr T* kNullValue = nullptr;
static void Reference(T* value) { static void Reference(T* value) { value->Reference(); }
value->Reference(); static void Release(T* value) { value->Release(); }
}
static void Release(T* value) {
value->Release();
}
}; };
template <typename T> template <typename T>

View File

@ -237,16 +237,13 @@ class [[nodiscard]] Result {
// Implementation of Result<void, E> // Implementation of Result<void, E>
template <typename E> template <typename E>
Result<void, E>::Result() { Result<void, E>::Result() {}
}
template <typename E> template <typename E>
Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) { Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {}
}
template <typename E> template <typename E>
Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) { Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {}
}
template <typename E> template <typename E>
Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) { Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) {
@ -271,8 +268,7 @@ bool Result<void, E>::IsSuccess() const {
} }
template <typename E> template <typename E>
void Result<void, E>::AcquireSuccess() { void Result<void, E>::AcquireSuccess() {}
}
template <typename E> template <typename E>
std::unique_ptr<E> Result<void, E>::AcquireError() { std::unique_ptr<E> Result<void, E>::AcquireError() {
@ -298,13 +294,11 @@ namespace detail {
// Implementation of Result<T*, E> // Implementation of Result<T*, E>
template <typename T, typename E> template <typename T, typename E>
Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) { Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {}
}
template <typename T, typename E> template <typename T, typename E>
Result<T*, E>::Result(std::unique_ptr<E> error) Result<T*, E>::Result(std::unique_ptr<E> error)
: mPayload(detail::MakePayload(error.release(), detail::Error)) { : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
}
template <typename T, typename E> template <typename T, typename E>
template <typename TChild> template <typename TChild>
@ -355,13 +349,11 @@ std::unique_ptr<E> Result<T*, E>::AcquireError() {
// Implementation of Result<const T*, E*> // Implementation of Result<const T*, E*>
template <typename T, typename E> template <typename T, typename E>
Result<const T*, E>::Result(const T* success) Result<const T*, E>::Result(const T* success)
: mPayload(detail::MakePayload(success, detail::Success)) { : mPayload(detail::MakePayload(success, detail::Success)) {}
}
template <typename T, typename E> template <typename T, typename E>
Result<const T*, E>::Result(std::unique_ptr<E> error) Result<const T*, E>::Result(std::unique_ptr<E> error)
: mPayload(detail::MakePayload(error.release(), detail::Error)) { : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
}
template <typename T, typename E> template <typename T, typename E>
Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) { Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) {
@ -415,13 +407,11 @@ Result<Ref<T>, E>::Result(Ref<U>&& success)
template <typename T, typename E> template <typename T, typename E>
template <typename U> template <typename U>
Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) { Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {}
}
template <typename T, typename E> template <typename T, typename E>
Result<Ref<T>, E>::Result(std::unique_ptr<E> error) Result<Ref<T>, E>::Result(std::unique_ptr<E> error)
: mPayload(detail::MakePayload(error.release(), detail::Error)) { : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
}
template <typename T, typename E> template <typename T, typename E>
template <typename U> template <typename U>
@ -473,12 +463,10 @@ std::unique_ptr<E> Result<Ref<T>, E>::AcquireError() {
// Implementation of Result<T, E> // Implementation of Result<T, E>
template <typename T, typename E> template <typename T, typename E>
Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) { Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {}
}
template <typename T, typename E> template <typename T, typename E>
Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) { Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {}
}
template <typename T, typename E> template <typename T, typename E>
Result<T, E>::~Result() { Result<T, E>::~Result() {

View File

@ -193,8 +193,7 @@ typename SerialStorage<Derived>::StorageIterator SerialStorage<Derived>::FindUpT
template <typename Derived> template <typename Derived>
SerialStorage<Derived>::BeginEnd::BeginEnd(typename SerialStorage<Derived>::StorageIterator start, SerialStorage<Derived>::BeginEnd::BeginEnd(typename SerialStorage<Derived>::StorageIterator start,
typename SerialStorage<Derived>::StorageIterator end) typename SerialStorage<Derived>::StorageIterator end)
: mStartIt(start), mEndIt(end) { : mStartIt(start), mEndIt(end) {}
}
template <typename Derived> template <typename Derived>
typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::begin() const { typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::begin() const {
@ -210,8 +209,7 @@ typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::end(
template <typename Derived> template <typename Derived>
SerialStorage<Derived>::Iterator::Iterator(typename SerialStorage<Derived>::StorageIterator start) SerialStorage<Derived>::Iterator::Iterator(typename SerialStorage<Derived>::StorageIterator start)
: mStorageIterator(start), mSerialIterator(nullptr) { : mStorageIterator(start), mSerialIterator(nullptr) {}
}
template <typename Derived> template <typename Derived>
typename SerialStorage<Derived>::Iterator& SerialStorage<Derived>::Iterator::operator++() { typename SerialStorage<Derived>::Iterator& SerialStorage<Derived>::Iterator::operator++() {
@ -257,8 +255,7 @@ template <typename Derived>
SerialStorage<Derived>::ConstBeginEnd::ConstBeginEnd( SerialStorage<Derived>::ConstBeginEnd::ConstBeginEnd(
typename SerialStorage<Derived>::ConstStorageIterator start, typename SerialStorage<Derived>::ConstStorageIterator start,
typename SerialStorage<Derived>::ConstStorageIterator end) typename SerialStorage<Derived>::ConstStorageIterator end)
: mStartIt(start), mEndIt(end) { : mStartIt(start), mEndIt(end) {}
}
template <typename Derived> template <typename Derived>
typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::begin() typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::begin()
@ -276,8 +273,7 @@ typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBegi
template <typename Derived> template <typename Derived>
SerialStorage<Derived>::ConstIterator::ConstIterator( SerialStorage<Derived>::ConstIterator::ConstIterator(
typename SerialStorage<Derived>::ConstStorageIterator start) typename SerialStorage<Derived>::ConstStorageIterator start)
: mStorageIterator(start), mSerialIterator(nullptr) { : mStorageIterator(start), mSerialIterator(nullptr) {}
}
template <typename Derived> template <typename Derived>
typename SerialStorage<Derived>::ConstIterator& typename SerialStorage<Derived>::ConstIterator&

View File

@ -25,19 +25,16 @@
// IndexLinkNode // IndexLinkNode
SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex) SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
: index(index), nextIndex(nextIndex) { : index(index), nextIndex(nextIndex) {}
}
// Slab // Slab
SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head) SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head)
: allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) { : allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {}
}
SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default; SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default;
SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) { SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {}
}
SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default; SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default;
@ -83,8 +80,7 @@ SlabAllocatorImpl::SlabAllocatorImpl(SlabAllocatorImpl&& rhs)
mTotalAllocationSize(rhs.mTotalAllocationSize), mTotalAllocationSize(rhs.mTotalAllocationSize),
mAvailableSlabs(std::move(rhs.mAvailableSlabs)), mAvailableSlabs(std::move(rhs.mAvailableSlabs)),
mFullSlabs(std::move(rhs.mFullSlabs)), mFullSlabs(std::move(rhs.mFullSlabs)),
mRecycledSlabs(std::move(rhs.mRecycledSlabs)) { mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {}
}
SlabAllocatorImpl::~SlabAllocatorImpl() = default; SlabAllocatorImpl::~SlabAllocatorImpl() = default;

View File

@ -168,8 +168,7 @@ class SlabAllocator : public SlabAllocatorImpl {
SlabAllocator(size_t totalObjectBytes, SlabAllocator(size_t totalObjectBytes,
uint32_t objectSize = u32_sizeof<T>, uint32_t objectSize = u32_sizeof<T>,
uint32_t objectAlignment = u32_alignof<T>) uint32_t objectAlignment = u32_alignof<T>)
: SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) { : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {}
}
template <typename... Args> template <typename... Args>
T* Allocate(Args&&... args) { T* Allocate(Args&&... args) {
@ -177,9 +176,7 @@ class SlabAllocator : public SlabAllocatorImpl {
return new (ptr) T(std::forward<Args>(args)...); return new (ptr) T(std::forward<Args>(args)...);
} }
void Deallocate(T* object) { void Deallocate(T* object) { SlabAllocatorImpl::Deallocate(object); }
SlabAllocatorImpl::Deallocate(object);
}
}; };
#endif // SRC_DAWN_COMMON_SLABALLOCATOR_H_ #endif // SRC_DAWN_COMMON_SLABALLOCATOR_H_

View File

@ -41,16 +41,11 @@ class StackAllocator : public std::allocator<T> {
// maintaining this for as long as any containers using this allocator are // maintaining this for as long as any containers using this allocator are
// live. // live.
struct Source { struct Source {
Source() : used_stack_buffer_(false) { Source() : used_stack_buffer_(false) {}
}
// Casts the buffer in its right type. // Casts the buffer in its right type.
T* stack_buffer() { T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); }
return reinterpret_cast<T*>(stack_buffer_); const T* stack_buffer() const { return reinterpret_cast<const T*>(&stack_buffer_); }
}
const T* stack_buffer() const {
return reinterpret_cast<const T*>(&stack_buffer_);
}
// The buffer itself. It is not of type T because we don't want the // The buffer itself. It is not of type T because we don't want the
// constructors and destructors to be automatically called. Define a POD // constructors and destructors to be automatically called. Define a POD
@ -73,8 +68,7 @@ class StackAllocator : public std::allocator<T> {
// For the straight up copy c-tor, we can share storage. // For the straight up copy c-tor, we can share storage.
StackAllocator(const StackAllocator<T, stack_capacity>& rhs) StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
: std::allocator<T>(), source_(rhs.source_) { : std::allocator<T>(), source_(rhs.source_) {}
}
// ISO C++ requires the following constructor to be defined, // ISO C++ requires the following constructor to be defined,
// and std::vector in VC++2008SP1 Release fails with an error // and std::vector in VC++2008SP1 Release fails with an error
@ -84,18 +78,15 @@ class StackAllocator : public std::allocator<T> {
// no guarantee that the Source buffer of Ts is large enough // no guarantee that the Source buffer of Ts is large enough
// for Us. // for Us.
template <typename U, size_t other_capacity> template <typename U, size_t other_capacity>
StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) { StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {}
}
// This constructor must exist. It creates a default allocator that doesn't // This constructor must exist. It creates a default allocator that doesn't
// actually have a stack buffer. glibc's std::string() will compare the // actually have a stack buffer. glibc's std::string() will compare the
// current allocator against the default-constructed allocator, so this // current allocator against the default-constructed allocator, so this
// should be fast. // should be fast.
StackAllocator() : source_(nullptr) { StackAllocator() : source_(nullptr) {}
}
explicit StackAllocator(Source* source) : source_(source) { explicit StackAllocator(Source* source) : source_(source) {}
}
// Actually do the allocation. Use the stack buffer if nobody has used it yet // Actually do the allocation. Use the stack buffer if nobody has used it yet
// and the size requested fits. Otherwise, fall through to the standard // and the size requested fits. Otherwise, fall through to the standard
@ -154,28 +145,18 @@ class StackContainer {
// shorter lifetimes than the source. The copy will share the same allocator // shorter lifetimes than the source. The copy will share the same allocator
// and therefore the same stack buffer as the original. Use std::copy to // and therefore the same stack buffer as the original. Use std::copy to
// copy into a "real" container for longer-lived objects. // copy into a "real" container for longer-lived objects.
ContainerType& container() { ContainerType& container() { return container_; }
return container_; const ContainerType& container() const { return container_; }
}
const ContainerType& container() const {
return container_;
}
// Support operator-> to get to the container. This allows nicer syntax like: // Support operator-> to get to the container. This allows nicer syntax like:
// StackContainer<...> foo; // StackContainer<...> foo;
// std::sort(foo->begin(), foo->end()); // std::sort(foo->begin(), foo->end());
ContainerType* operator->() { ContainerType* operator->() { return &container_; }
return &container_; const ContainerType* operator->() const { return &container_; }
}
const ContainerType* operator->() const {
return &container_;
}
// Retrieves the stack source so that that unit tests can verify that the // Retrieves the stack source so that that unit tests can verify that the
// buffer is being used properly. // buffer is being used properly.
const typename Allocator::Source& stack_data() const { const typename Allocator::Source& stack_data() const { return stack_data_; }
return stack_data_;
}
protected: protected:
typename Allocator::Source stack_data_; typename Allocator::Source stack_data_;
@ -225,8 +206,7 @@ class StackVector
: public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> { : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> {
public: public:
StackVector() StackVector()
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() { : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {}
}
// We need to put this in STL containers sometimes, which requires a copy // We need to put this in STL containers sometimes, which requires a copy
// constructor. We can't call the regular copy constructor because that will // constructor. We can't call the regular copy constructor because that will
@ -244,12 +224,8 @@ class StackVector
// Vectors are commonly indexed, which isn't very convenient even with // Vectors are commonly indexed, which isn't very convenient even with
// operator-> (using "->at()" does exception stuff we don't want). // operator-> (using "->at()" does exception stuff we don't want).
T& operator[](size_t i) { T& operator[](size_t i) { return this->container().operator[](i); }
return this->container().operator[](i); const T& operator[](size_t i) const { return this->container().operator[](i); }
}
const T& operator[](size_t i) const {
return this->container().operator[](i);
}
private: private:
// StackVector(const StackVector& rhs) = delete; // StackVector(const StackVector& rhs) = delete;

View File

@ -208,8 +208,7 @@ std::optional<std::string> GetModuleDirectory() {
ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value) ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value)
: mName(variableName), : mName(variableName),
mOriginalValue(GetEnvironmentVar(variableName)), mOriginalValue(GetEnvironmentVar(variableName)),
mIsSet(SetEnvironmentVar(variableName, value)) { mIsSet(SetEnvironmentVar(variableName, value)) {}
}
ScopedEnvironmentVar::~ScopedEnvironmentVar() { ScopedEnvironmentVar::~ScopedEnvironmentVar() {
if (mIsSet) { if (mIsSet) {

View File

@ -75,25 +75,20 @@ namespace detail {
// Construction from non-narrowing integral types. // Construction from non-narrowing integral types.
template <typename I, template <typename I,
typename = std::enable_if_t< typename =
std::is_integral<I>::value && std::enable_if_t<std::is_integral<I>::value &&
std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() && std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>> std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) { explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {}
}
// Allow explicit casts only to the underlying type. If you're casting out of an // Allow explicit casts only to the underlying type. If you're casting out of an
// TypedInteger, you should know what what you're doing, and exactly what type you // TypedInteger, you should know what what you're doing, and exactly what type you
// expect. // expect.
explicit constexpr operator T() const { explicit constexpr operator T() const { return static_cast<T>(this->mValue); }
return static_cast<T>(this->mValue);
}
// Same-tag TypedInteger comparison operators // Same-tag TypedInteger comparison operators
#define TYPED_COMPARISON(op) \ #define TYPED_COMPARISON(op) \
constexpr bool operator op(const TypedIntegerImpl& rhs) const { \ constexpr bool operator op(const TypedIntegerImpl& rhs) const { return mValue op rhs.mValue; }
return mValue op rhs.mValue; \
}
TYPED_COMPARISON(<) TYPED_COMPARISON(<)
TYPED_COMPARISON(<=) TYPED_COMPARISON(<=)
TYPED_COMPARISON(>) TYPED_COMPARISON(>)
@ -132,8 +127,9 @@ namespace detail {
} }
template <typename T2 = T> template <typename T2 = T>
static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))> static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))> AddImpl(
AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) { TypedIntegerImpl<Tag, T> lhs,
TypedIntegerImpl<Tag, T2> rhs) {
static_assert(std::is_same<T, T2>::value); static_assert(std::is_same<T, T2>::value);
// Overflow would wrap around // Overflow would wrap around
@ -142,8 +138,9 @@ namespace detail {
} }
template <typename T2 = T> template <typename T2 = T>
static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))> static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))> AddImpl(
AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) { TypedIntegerImpl<Tag, T> lhs,
TypedIntegerImpl<Tag, T2> rhs) {
static_assert(std::is_same<T, T2>::value); static_assert(std::is_same<T, T2>::value);
if (lhs.mValue > 0) { if (lhs.mValue > 0) {
@ -160,8 +157,9 @@ namespace detail {
} }
template <typename T2 = T> template <typename T2 = T>
static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))> static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))> SubImpl(
SubImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) { TypedIntegerImpl<Tag, T> lhs,
TypedIntegerImpl<Tag, T2> rhs) {
static_assert(std::is_same<T, T2>::value); static_assert(std::is_same<T, T2>::value);
// Overflow would wrap around // Overflow would wrap around

View File

@ -42,8 +42,7 @@ namespace ityp {
template <typename... Values> template <typename... Values>
// NOLINTNEXTLINE(runtime/explicit) // NOLINTNEXTLINE(runtime/explicit)
constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} { constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {}
}
Value& operator[](Index i) { Value& operator[](Index i) {
I index = static_cast<I>(i); I index = static_cast<I>(i);
@ -69,25 +68,15 @@ namespace ityp {
return Base::at(index); return Base::at(index);
} }
typename Base::iterator begin() noexcept { typename Base::iterator begin() noexcept { return Base::begin(); }
return Base::begin();
}
typename Base::const_iterator begin() const noexcept { typename Base::const_iterator begin() const noexcept { return Base::begin(); }
return Base::begin();
}
typename Base::iterator end() noexcept { typename Base::iterator end() noexcept { return Base::end(); }
return Base::end();
}
typename Base::const_iterator end() const noexcept { typename Base::const_iterator end() const noexcept { return Base::end(); }
return Base::end();
}
constexpr Index size() const { constexpr Index size() const { return Index(I(Size)); }
return Index(I(Size));
}
using Base::back; using Base::back;
using Base::data; using Base::data;

View File

@ -30,30 +30,21 @@ namespace ityp {
static_assert(sizeof(I) <= sizeof(size_t)); static_assert(sizeof(I) <= sizeof(size_t));
explicit constexpr bitset(const Base& rhs) : Base(rhs) { explicit constexpr bitset(const Base& rhs) : Base(rhs) {}
}
public: public:
using reference = typename Base::reference; using reference = typename Base::reference;
constexpr bitset() noexcept : Base() { constexpr bitset() noexcept : Base() {}
}
// NOLINTNEXTLINE(runtime/explicit) // NOLINTNEXTLINE(runtime/explicit)
constexpr bitset(uint64_t value) noexcept : Base(value) { constexpr bitset(uint64_t value) noexcept : Base(value) {}
}
constexpr bool operator[](Index i) const { constexpr bool operator[](Index i) const { return Base::operator[](static_cast<I>(i)); }
return Base::operator[](static_cast<I>(i));
}
typename Base::reference operator[](Index i) { typename Base::reference operator[](Index i) { return Base::operator[](static_cast<I>(i)); }
return Base::operator[](static_cast<I>(i));
}
bool test(Index i) const { bool test(Index i) const { return Base::test(static_cast<I>(i)); }
return Base::test(static_cast<I>(i));
}
using Base::all; using Base::all;
using Base::any; using Base::any;
@ -81,33 +72,21 @@ namespace ityp {
return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other))); return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
} }
bitset operator~() const noexcept { bitset operator~() const noexcept { return bitset(*this).flip(); }
return bitset(*this).flip();
}
bitset& set() noexcept { bitset& set() noexcept { return static_cast<bitset&>(Base::set()); }
return static_cast<bitset&>(Base::set());
}
bitset& set(Index i, bool value = true) { bitset& set(Index i, bool value = true) {
return static_cast<bitset&>(Base::set(static_cast<I>(i), value)); return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
} }
bitset& reset() noexcept { bitset& reset() noexcept { return static_cast<bitset&>(Base::reset()); }
return static_cast<bitset&>(Base::reset());
}
bitset& reset(Index i) { bitset& reset(Index i) { return static_cast<bitset&>(Base::reset(static_cast<I>(i))); }
return static_cast<bitset&>(Base::reset(static_cast<I>(i)));
}
bitset& flip() noexcept { bitset& flip() noexcept { return static_cast<bitset&>(Base::flip()); }
return static_cast<bitset&>(Base::flip());
}
bitset& flip(Index i) { bitset& flip(Index i) { return static_cast<bitset&>(Base::flip(static_cast<I>(i))); }
return static_cast<bitset&>(Base::flip(static_cast<I>(i)));
}
using Base::to_string; using Base::to_string;
using Base::to_ullong; using Base::to_ullong;

View File

@ -31,39 +31,25 @@ namespace ityp {
using I = UnderlyingType<Index>; using I = UnderlyingType<Index>;
public: public:
constexpr span() : mData(nullptr), mSize(0) { constexpr span() : mData(nullptr), mSize(0) {}
} constexpr span(Value* data, Index size) : mData(data), mSize(size) {}
constexpr span(Value* data, Index size) : mData(data), mSize(size) {
}
constexpr Value& operator[](Index i) const { constexpr Value& operator[](Index i) const {
ASSERT(i < mSize); ASSERT(i < mSize);
return mData[static_cast<I>(i)]; return mData[static_cast<I>(i)];
} }
Value* data() noexcept { Value* data() noexcept { return mData; }
return mData;
}
const Value* data() const noexcept { const Value* data() const noexcept { return mData; }
return mData;
}
Value* begin() noexcept { Value* begin() noexcept { return mData; }
return mData;
}
const Value* begin() const noexcept { const Value* begin() const noexcept { return mData; }
return mData;
}
Value* end() noexcept { Value* end() noexcept { return mData + static_cast<I>(mSize); }
return mData + static_cast<I>(mSize);
}
const Value* end() const noexcept { const Value* end() const noexcept { return mData + static_cast<I>(mSize); }
return mData + static_cast<I>(mSize);
}
Value& front() { Value& front() {
ASSERT(mData != nullptr); ASSERT(mData != nullptr);
@ -89,9 +75,7 @@ namespace ityp {
return *(mData + static_cast<I>(mSize) - 1); return *(mData + static_cast<I>(mSize) - 1);
} }
Index size() const { Index size() const { return mSize; }
return mSize;
}
private: private:
Value* mData; Value* mData;

View File

@ -32,11 +32,8 @@ namespace ityp {
static_assert(StaticCapacity <= std::numeric_limits<I>::max()); static_assert(StaticCapacity <= std::numeric_limits<I>::max());
public: public:
stack_vec() : Base() { stack_vec() : Base() {}
} explicit stack_vec(Index size) : Base() { this->container().resize(static_cast<I>(size)); }
explicit stack_vec(Index size) : Base() {
this->container().resize(static_cast<I>(size));
}
Value& operator[](Index i) { Value& operator[](Index i) {
ASSERT(i < size()); ASSERT(i < size());
@ -48,57 +45,31 @@ namespace ityp {
return Base::operator[](static_cast<I>(i)); return Base::operator[](static_cast<I>(i));
} }
void resize(Index size) { void resize(Index size) { this->container().resize(static_cast<I>(size)); }
this->container().resize(static_cast<I>(size));
}
void reserve(Index size) { void reserve(Index size) { this->container().reserve(static_cast<I>(size)); }
this->container().reserve(static_cast<I>(size));
}
Value* data() { Value* data() { return this->container().data(); }
return this->container().data();
}
const Value* data() const { const Value* data() const { return this->container().data(); }
return this->container().data();
}
typename VectorBase::iterator begin() noexcept { typename VectorBase::iterator begin() noexcept { return this->container().begin(); }
return this->container().begin();
}
typename VectorBase::const_iterator begin() const noexcept { typename VectorBase::const_iterator begin() const noexcept { return this->container().begin(); }
return this->container().begin();
}
typename VectorBase::iterator end() noexcept { typename VectorBase::iterator end() noexcept { return this->container().end(); }
return this->container().end();
}
typename VectorBase::const_iterator end() const noexcept { typename VectorBase::const_iterator end() const noexcept { return this->container().end(); }
return this->container().end();
}
typename VectorBase::reference front() { typename VectorBase::reference front() { return this->container().front(); }
return this->container().front();
}
typename VectorBase::const_reference front() const { typename VectorBase::const_reference front() const { return this->container().front(); }
return this->container().front();
}
typename VectorBase::reference back() { typename VectorBase::reference back() { return this->container().back(); }
return this->container().back();
}
typename VectorBase::const_reference back() const { typename VectorBase::const_reference back() const { return this->container().back(); }
return this->container().back();
}
Index size() const { Index size() const { return Index(static_cast<I>(this->container().size())); }
return Index(static_cast<I>(this->container().size()));
}
}; };
} // namespace ityp } // namespace ityp

View File

@ -42,23 +42,17 @@ namespace ityp {
using Base::size; using Base::size;
public: public:
vector() : Base() { vector() : Base() {}
}
explicit vector(Index size) : Base(static_cast<I>(size)) { explicit vector(Index size) : Base(static_cast<I>(size)) {}
}
vector(Index size, const Value& init) : Base(static_cast<I>(size), init) { vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {}
}
vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) { vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {}
}
vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) { vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {}
}
vector(std::initializer_list<Value> init) : Base(init) { vector(std::initializer_list<Value> init) : Base(init) {}
}
vector& operator=(const vector& rhs) { vector& operator=(const vector& rhs) {
Base::operator=(static_cast<const Base&>(rhs)); Base::operator=(static_cast<const Base&>(rhs));
@ -95,13 +89,9 @@ namespace ityp {
return Index(static_cast<I>(Base::size())); return Index(static_cast<I>(Base::size()));
} }
void resize(Index size) { void resize(Index size) { Base::resize(static_cast<I>(size)); }
Base::resize(static_cast<I>(size));
}
void reserve(Index size) { void reserve(Index size) { Base::reserve(static_cast<I>(size)); }
Base::reserve(static_cast<I>(size));
}
}; };
} // namespace ityp } // namespace ityp

View File

@ -85,49 +85,33 @@ namespace dawn::native::vulkan {
public: public:
// Default constructor and assigning of VK_NULL_HANDLE // Default constructor and assigning of VK_NULL_HANDLE
VkHandle() = default; VkHandle() = default;
VkHandle(std::nullptr_t) { VkHandle(std::nullptr_t) {}
}
// Use default copy constructor/assignment // Use default copy constructor/assignment
VkHandle(const VkHandle<Tag, HandleType>& other) = default; VkHandle(const VkHandle<Tag, HandleType>& other) = default;
VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default; VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
// Comparisons between handles // Comparisons between handles
bool operator==(VkHandle<Tag, HandleType> other) const { bool operator==(VkHandle<Tag, HandleType> other) const { return mHandle == other.mHandle; }
return mHandle == other.mHandle; bool operator!=(VkHandle<Tag, HandleType> other) const { return mHandle != other.mHandle; }
}
bool operator!=(VkHandle<Tag, HandleType> other) const {
return mHandle != other.mHandle;
}
// Comparisons between handles and VK_NULL_HANDLE // Comparisons between handles and VK_NULL_HANDLE
bool operator==(std::nullptr_t) const { bool operator==(std::nullptr_t) const { return mHandle == 0; }
return mHandle == 0; bool operator!=(std::nullptr_t) const { return mHandle != 0; }
}
bool operator!=(std::nullptr_t) const {
return mHandle != 0;
}
// Implicit conversion to real Vulkan types. // Implicit conversion to real Vulkan types.
operator HandleType() const { operator HandleType() const { return GetHandle(); }
return GetHandle();
}
HandleType GetHandle() const { HandleType GetHandle() const { return mHandle; }
return mHandle;
}
HandleType& operator*() { HandleType& operator*() { return mHandle; }
return mHandle;
}
static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) { static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
return VkHandle{handle}; return VkHandle{handle};
} }
private: private:
explicit VkHandle(HandleType handle) : mHandle(handle) { explicit VkHandle(HandleType handle) : mHandle(handle) {}
}
HandleType mHandle = 0; HandleType mHandle = 0;
}; };

View File

@ -41,9 +41,7 @@ namespace {
} }
return buf.data(); return buf.data();
} }
bool Flush() override { bool Flush() override { return true; }
return true;
}
private: private:
std::vector<char> buf; std::vector<char> buf;

View File

@ -53,14 +53,14 @@ namespace dawn::native {
std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers)); std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
mLimits.v1.maxInterStageShaderComponents = mLimits.v1.maxInterStageShaderComponents =
std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents); std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
mLimits.v1.maxSampledTexturesPerShaderStage = std::min( mLimits.v1.maxSampledTexturesPerShaderStage =
mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage); std::min(mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
mLimits.v1.maxSamplersPerShaderStage = mLimits.v1.maxSamplersPerShaderStage =
std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage); std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
mLimits.v1.maxStorageBuffersPerShaderStage = mLimits.v1.maxStorageBuffersPerShaderStage =
std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage); std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
mLimits.v1.maxStorageTexturesPerShaderStage = std::min( mLimits.v1.maxStorageTexturesPerShaderStage =
mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage); std::min(mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
mLimits.v1.maxUniformBuffersPerShaderStage = mLimits.v1.maxUniformBuffersPerShaderStage =
std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage); std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout = mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
@ -119,8 +119,8 @@ namespace dawn::native {
if (result.IsError()) { if (result.IsError()) {
std::unique_ptr<ErrorData> errorData = result.AcquireError(); std::unique_ptr<ErrorData> errorData = result.AcquireError();
// TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
callback(WGPURequestDeviceStatus_Error, nullptr, callback(WGPURequestDeviceStatus_Error, nullptr, errorData->GetFormattedMessage().c_str(),
errorData->GetFormattedMessage().c_str(), userdata); userdata);
return; return;
} }
@ -198,13 +198,12 @@ namespace dawn::native {
for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) { for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) {
wgpu::FeatureName f = descriptor->requiredFeatures[i]; wgpu::FeatureName f = descriptor->requiredFeatures[i];
DAWN_TRY(ValidateFeatureName(f)); DAWN_TRY(ValidateFeatureName(f));
DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f), DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f), "Requested feature %s is not supported.",
"Requested feature %s is not supported.", f); f);
} }
if (descriptor->requiredLimits != nullptr) { if (descriptor->requiredLimits != nullptr) {
DAWN_TRY_CONTEXT( DAWN_TRY_CONTEXT(ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
descriptor->requiredLimits->limits), descriptor->requiredLimits->limits),
"validating required limits"); "validating required limits");

View File

@ -74,8 +74,7 @@ namespace dawn::native {
FeaturesSet mSupportedFeatures; FeaturesSet mSupportedFeatures;
private: private:
virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl( virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) = 0;
const DeviceDescriptor* descriptor) = 0;
virtual MaybeError InitializeImpl() = 0; virtual MaybeError InitializeImpl() = 0;

View File

@ -21,8 +21,7 @@
namespace dawn::native { namespace dawn::native {
AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool) AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
: mWorkerTaskPool(workerTaskPool) { : mWorkerTaskPool(workerTaskPool) {}
}
void AsyncTaskManager::PostTask(AsyncTask asyncTask) { void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
// If these allocations becomes expensive, we can slab-allocate tasks. // If these allocations becomes expensive, we can slab-allocate tasks.

View File

@ -21,8 +21,7 @@
namespace dawn::native { namespace dawn::native {
AttachmentStateBlueprint::AttachmentStateBlueprint( AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor)
const RenderBundleEncoderDescriptor* descriptor)
: mSampleCount(descriptor->sampleCount) { : mSampleCount(descriptor->sampleCount) {
ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments); ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
for (ColorAttachmentIndex i(uint8_t(0)); for (ColorAttachmentIndex i(uint8_t(0));
@ -58,10 +57,8 @@ namespace dawn::native {
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) { AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
for (ColorAttachmentIndex i(uint8_t(0)); for (ColorAttachmentIndex i(uint8_t(0));
i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount)); i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount)); ++i) {
++i) { TextureViewBase* attachment = descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
TextureViewBase* attachment =
descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
if (attachment == nullptr) { if (attachment == nullptr) {
continue; continue;
} }
@ -85,8 +82,7 @@ namespace dawn::native {
ASSERT(mSampleCount > 0); ASSERT(mSampleCount > 0);
} }
AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) = AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) = default;
default;
size_t AttachmentStateBlueprint::HashFunc::operator()( size_t AttachmentStateBlueprint::HashFunc::operator()(
const AttachmentStateBlueprint* attachmentState) const { const AttachmentStateBlueprint* attachmentState) const {
@ -107,8 +103,7 @@ namespace dawn::native {
return hash; return hash;
} }
bool AttachmentStateBlueprint::EqualityFunc::operator()( bool AttachmentStateBlueprint::EqualityFunc::operator()(const AttachmentStateBlueprint* a,
const AttachmentStateBlueprint* a,
const AttachmentStateBlueprint* b) const { const AttachmentStateBlueprint* b) const {
// Check set attachments // Check set attachments
if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) { if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
@ -136,8 +131,7 @@ namespace dawn::native {
} }
AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint) AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
: AttachmentStateBlueprint(blueprint), ObjectBase(device) { : AttachmentStateBlueprint(blueprint), ObjectBase(device) {}
}
AttachmentState::~AttachmentState() { AttachmentState::~AttachmentState() {
GetDevice()->UncacheAttachmentState(this); GetDevice()->UncacheAttachmentState(this);
@ -148,13 +142,12 @@ namespace dawn::native {
return AttachmentStateBlueprint::HashFunc()(this); return AttachmentStateBlueprint::HashFunc()(this);
} }
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> AttachmentState::GetColorAttachmentsMask()
AttachmentState::GetColorAttachmentsMask() const { const {
return mColorAttachmentsSet; return mColorAttachmentsSet;
} }
wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat( wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(ColorAttachmentIndex index) const {
ColorAttachmentIndex index) const {
ASSERT(mColorAttachmentsSet[index]); ASSERT(mColorAttachmentsSet[index]);
return mColorFormats[index]; return mColorFormats[index];
} }

View File

@ -48,8 +48,7 @@ namespace dawn::native {
size_t operator()(const AttachmentStateBlueprint* attachmentState) const; size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
}; };
struct EqualityFunc { struct EqualityFunc {
bool operator()(const AttachmentStateBlueprint* a, bool operator()(const AttachmentStateBlueprint* a, const AttachmentStateBlueprint* b) const;
const AttachmentStateBlueprint* b) const;
}; };
protected: protected:

View File

@ -17,8 +17,7 @@
namespace dawn::native { namespace dawn::native {
BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type) BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
: mInstance(instance), mType(type) { : mInstance(instance), mType(type) {}
}
wgpu::BackendType BackendConnection::GetType() const { wgpu::BackendType BackendConnection::GetType() const {
return mType; return mType;

View File

@ -54,21 +54,18 @@ namespace dawn::native {
case wgpu::BufferBindingType::Uniform: case wgpu::BufferBindingType::Uniform:
requiredUsage = wgpu::BufferUsage::Uniform; requiredUsage = wgpu::BufferUsage::Uniform;
maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize; maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
requiredBindingAlignment = requiredBindingAlignment = device->GetLimits().v1.minUniformBufferOffsetAlignment;
device->GetLimits().v1.minUniformBufferOffsetAlignment;
break; break;
case wgpu::BufferBindingType::Storage: case wgpu::BufferBindingType::Storage:
case wgpu::BufferBindingType::ReadOnlyStorage: case wgpu::BufferBindingType::ReadOnlyStorage:
requiredUsage = wgpu::BufferUsage::Storage; requiredUsage = wgpu::BufferUsage::Storage;
maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize; maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
requiredBindingAlignment = requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
device->GetLimits().v1.minStorageBufferOffsetAlignment;
break; break;
case kInternalStorageBufferBinding: case kInternalStorageBufferBinding:
requiredUsage = kInternalStorageBuffer; requiredUsage = kInternalStorageBuffer;
maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize; maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
requiredBindingAlignment = requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
device->GetLimits().v1.minStorageBufferOffsetAlignment;
break; break;
case wgpu::BufferBindingType::Undefined: case wgpu::BufferBindingType::Undefined:
UNREACHABLE(); UNREACHABLE();
@ -92,26 +89,25 @@ namespace dawn::native {
// Note that no overflow can happen because we already checked that // Note that no overflow can happen because we already checked that
// bufferSize >= bindingSize // bufferSize >= bindingSize
DAWN_INVALID_IF( DAWN_INVALID_IF(entry.offset > bufferSize - bindingSize,
entry.offset > bufferSize - bindingSize,
"Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.", "Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
entry.offset, bufferSize, bindingSize, entry.buffer); entry.offset, bufferSize, bindingSize, entry.buffer);
DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment), DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
"Offset (%u) does not satisfy the minimum %s alignment (%u).", "Offset (%u) does not satisfy the minimum %s alignment (%u).", entry.offset,
entry.offset, bindingInfo.buffer.type, requiredBindingAlignment); bindingInfo.buffer.type, requiredBindingAlignment);
DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage), DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
"Binding usage (%s) of %s doesn't match expected usage (%s).", "Binding usage (%s) of %s doesn't match expected usage (%s).",
entry.buffer->GetUsageExternalOnly(), entry.buffer, requiredUsage); entry.buffer->GetUsageExternalOnly(), entry.buffer, requiredUsage);
DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize, DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
"Binding size (%u) is smaller than the minimum binding size (%u).", "Binding size (%u) is smaller than the minimum binding size (%u).", bindingSize,
bindingSize, bindingInfo.buffer.minBindingSize); bindingInfo.buffer.minBindingSize);
DAWN_INVALID_IF(bindingSize > maxBindingSize, DAWN_INVALID_IF(bindingSize > maxBindingSize,
"Binding size (%u) is larger than the maximum binding size (%u).", "Binding size (%u) is larger than the maximum binding size (%u).", bindingSize,
bindingSize, maxBindingSize); maxBindingSize);
return {}; return {};
} }
@ -131,24 +127,20 @@ namespace dawn::native {
TextureViewBase* view = entry.textureView; TextureViewBase* view = entry.textureView;
Aspect aspect = view->GetAspects(); Aspect aspect = view->GetAspects();
DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect, DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect, view);
view);
TextureBase* texture = view->GetTexture(); TextureBase* texture = view->GetTexture();
switch (bindingInfo.bindingType) { switch (bindingInfo.bindingType) {
case BindingInfoType::Texture: { case BindingInfoType::Texture: {
SampleTypeBit supportedTypes = SampleTypeBit supportedTypes =
texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes; texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
SampleTypeBit requiredType = SampleTypeBit requiredType = SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
DAWN_INVALID_IF( DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
!(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
"Usage (%s) of %s doesn't include TextureUsage::TextureBinding.", "Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
texture->GetUsage(), texture); texture->GetUsage(), texture);
DAWN_INVALID_IF( DAWN_INVALID_IF(texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
"Sample count (%u) of %s doesn't match expectation (multisampled: %d).", "Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
texture->GetSampleCount(), texture, bindingInfo.texture.multisampled); texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
@ -158,29 +150,25 @@ namespace dawn::native {
"types (%s).", "types (%s).",
supportedTypes, texture, requiredType); supportedTypes, texture, requiredType);
DAWN_INVALID_IF( DAWN_INVALID_IF(entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
"Dimension (%s) of %s doesn't match the expected dimension (%s).", "Dimension (%s) of %s doesn't match the expected dimension (%s).",
entry.textureView->GetDimension(), entry.textureView, entry.textureView->GetDimension(), entry.textureView,
bindingInfo.texture.viewDimension); bindingInfo.texture.viewDimension);
break; break;
} }
case BindingInfoType::StorageTexture: { case BindingInfoType::StorageTexture: {
DAWN_INVALID_IF( DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
!(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
"Usage (%s) of %s doesn't include TextureUsage::StorageBinding.", "Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
texture->GetUsage(), texture); texture->GetUsage(), texture);
ASSERT(!texture->IsMultisampledTexture()); ASSERT(!texture->IsMultisampledTexture());
DAWN_INVALID_IF( DAWN_INVALID_IF(texture->GetFormat().format != bindingInfo.storageTexture.format,
texture->GetFormat().format != bindingInfo.storageTexture.format,
"Format (%s) of %s expected to be (%s).", texture->GetFormat().format, "Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
texture, bindingInfo.storageTexture.format); texture, bindingInfo.storageTexture.format);
DAWN_INVALID_IF( DAWN_INVALID_IF(
entry.textureView->GetDimension() != entry.textureView->GetDimension() != bindingInfo.storageTexture.viewDimension,
bindingInfo.storageTexture.viewDimension,
"Dimension (%s) of %s doesn't match the expected dimension (%s).", "Dimension (%s) of %s doesn't match the expected dimension (%s).",
entry.textureView->GetDimension(), entry.textureView, entry.textureView->GetDimension(), entry.textureView,
bindingInfo.storageTexture.viewDimension); bindingInfo.storageTexture.viewDimension);
@ -214,22 +202,19 @@ namespace dawn::native {
switch (bindingInfo.sampler.type) { switch (bindingInfo.sampler.type) {
case wgpu::SamplerBindingType::NonFiltering: case wgpu::SamplerBindingType::NonFiltering:
DAWN_INVALID_IF( DAWN_INVALID_IF(entry.sampler->IsFiltering(),
entry.sampler->IsFiltering(),
"Filtering sampler %s is incompatible with non-filtering sampler " "Filtering sampler %s is incompatible with non-filtering sampler "
"binding.", "binding.",
entry.sampler); entry.sampler);
[[fallthrough]]; [[fallthrough]];
case wgpu::SamplerBindingType::Filtering: case wgpu::SamplerBindingType::Filtering:
DAWN_INVALID_IF( DAWN_INVALID_IF(entry.sampler->IsComparison(),
entry.sampler->IsComparison(),
"Comparison sampler %s is incompatible with non-comparison sampler " "Comparison sampler %s is incompatible with non-comparison sampler "
"binding.", "binding.",
entry.sampler); entry.sampler);
break; break;
case wgpu::SamplerBindingType::Comparison: case wgpu::SamplerBindingType::Comparison:
DAWN_INVALID_IF( DAWN_INVALID_IF(!entry.sampler->IsComparison(),
!entry.sampler->IsComparison(),
"Non-comparison sampler %s is imcompatible with comparison sampler " "Non-comparison sampler %s is imcompatible with comparison sampler "
"binding.", "binding.",
entry.sampler); entry.sampler);
@ -254,8 +239,7 @@ namespace dawn::native {
entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr, entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
"Expected only external texture to be set for binding entry."); "Expected only external texture to be set for binding entry.");
DAWN_INVALID_IF( DAWN_INVALID_IF(expansions.find(BindingNumber(entry.binding)) == expansions.end(),
expansions.find(BindingNumber(entry.binding)) == expansions.end(),
"External texture binding entry %u is not present in the bind group layout.", "External texture binding entry %u is not present in the bind group layout.",
entry.binding); entry.binding);
@ -269,8 +253,7 @@ namespace dawn::native {
} // anonymous namespace } // anonymous namespace
MaybeError ValidateBindGroupDescriptor(DeviceBase* device, MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor) {
const BindGroupDescriptor* descriptor) {
DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr."); DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
DAWN_TRY(device->ValidateObject(descriptor->layout)); DAWN_TRY(device->ValidateObject(descriptor->layout));
@ -486,8 +469,7 @@ namespace dawn::native {
} }
BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag) BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
: ApiObjectBase(device, tag), mBindingData() { : ApiObjectBase(device, tag), mBindingData() {}
}
// static // static
BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) { BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
@ -533,8 +515,7 @@ namespace dawn::native {
ASSERT(!IsError()); ASSERT(!IsError());
ASSERT(bindingIndex < mLayout->GetBindingCount()); ASSERT(bindingIndex < mLayout->GetBindingCount());
ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture || ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
mLayout->GetBindingInfo(bindingIndex).bindingType == mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::StorageTexture);
BindingInfoType::StorageTexture);
return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get()); return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
} }

View File

@ -31,8 +31,7 @@ namespace dawn::native {
class DeviceBase; class DeviceBase;
MaybeError ValidateBindGroupDescriptor(DeviceBase* device, MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor);
const BindGroupDescriptor* descriptor);
struct BufferBinding { struct BufferBinding {
BufferBase* buffer; BufferBase* buffer;

View File

@ -39,8 +39,7 @@ namespace dawn::native {
ASSERT(format != nullptr); ASSERT(format != nullptr);
DAWN_INVALID_IF(!format->supportsStorageUsage, DAWN_INVALID_IF(!format->supportsStorageUsage,
"Texture format (%s) does not support storage textures.", "Texture format (%s) does not support storage textures.", storageTextureFormat);
storageTextureFormat);
return {}; return {};
} }
@ -111,8 +110,7 @@ namespace dawn::native {
viewDimension = texture.viewDimension; viewDimension = texture.viewDimension;
} }
DAWN_INVALID_IF( DAWN_INVALID_IF(texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
"View dimension (%s) for a multisampled texture bindings was not %s.", "View dimension (%s) for a multisampled texture bindings was not %s.",
viewDimension, wgpu::TextureViewDimension::e2D); viewDimension, wgpu::TextureViewDimension::e2D);
} }
@ -150,16 +148,14 @@ namespace dawn::native {
"BindGroupLayoutEntry had more than one of buffer, sampler, texture, " "BindGroupLayoutEntry had more than one of buffer, sampler, texture, "
"storageTexture, or externalTexture set"); "storageTexture, or externalTexture set");
DAWN_INVALID_IF( DAWN_INVALID_IF(!IsSubset(entry.visibility, allowedStages),
!IsSubset(entry.visibility, allowedStages),
"%s bindings cannot be used with a visibility of %s. Only %s are allowed.", "%s bindings cannot be used with a visibility of %s. Only %s are allowed.",
bindingType, entry.visibility, allowedStages); bindingType, entry.visibility, allowedStages);
return {}; return {};
} }
BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture( BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(uint32_t binding,
uint32_t binding,
wgpu::ShaderStage visibility) { wgpu::ShaderStage visibility) {
BindGroupLayoutEntry entry; BindGroupLayoutEntry entry;
entry.binding = binding; entry.binding = binding;
@ -217,13 +213,11 @@ namespace dawn::native {
dawn_native::ExternalTextureBindingExpansion bindingExpansion; dawn_native::ExternalTextureBindingExpansion bindingExpansion;
BindGroupLayoutEntry plane0Entry = BindGroupLayoutEntry plane0Entry =
CreateSampledTextureBindingForExternalTexture(entry.binding, CreateSampledTextureBindingForExternalTexture(entry.binding, entry.visibility);
entry.visibility);
bindingExpansion.plane0 = BindingNumber(plane0Entry.binding); bindingExpansion.plane0 = BindingNumber(plane0Entry.binding);
expandedOutput.push_back(plane0Entry); expandedOutput.push_back(plane0Entry);
BindGroupLayoutEntry plane1Entry = BindGroupLayoutEntry plane1Entry = CreateSampledTextureBindingForExternalTexture(
CreateSampledTextureBindingForExternalTexture(
nextOpenBindingNumberForNewEntry++, entry.visibility); nextOpenBindingNumberForNewEntry++, entry.visibility);
bindingExpansion.plane1 = BindingNumber(plane1Entry.binding); bindingExpansion.plane1 = BindingNumber(plane1Entry.binding);
expandedOutput.push_back(plane1Entry); expandedOutput.push_back(plane1Entry);
@ -260,8 +254,8 @@ namespace dawn::native {
"Binding number (%u) exceeds the maximum binding number (%u).", "Binding number (%u) exceeds the maximum binding number (%u).",
uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped)); uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped));
DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0, DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0,
"On entries[%u]: binding index (%u) was specified by a previous entry.", "On entries[%u]: binding index (%u) was specified by a previous entry.", i,
i, entry.binding); entry.binding);
DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding), DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding),
"validating entries[%u]", i); "validating entries[%u]", i);
@ -419,8 +413,7 @@ namespace dawn::native {
return aInfo.storageTexture.access < bInfo.storageTexture.access; return aInfo.storageTexture.access < bInfo.storageTexture.access;
} }
if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) { if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) {
return aInfo.storageTexture.viewDimension < return aInfo.storageTexture.viewDimension < bInfo.storageTexture.viewDimension;
bInfo.storageTexture.viewDimension;
} }
if (aInfo.storageTexture.format != bInfo.storageTexture.format) { if (aInfo.storageTexture.format != bInfo.storageTexture.format) {
return aInfo.storageTexture.format < bInfo.storageTexture.format; return aInfo.storageTexture.format < bInfo.storageTexture.format;
@ -492,8 +485,7 @@ namespace dawn::native {
} }
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag) BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
: ApiObjectBase(device, tag) { : ApiObjectBase(device, tag) {}
}
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device) BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
: ApiObjectBase(device, kLabelNotImplemented) { : ApiObjectBase(device, kLabelNotImplemented) {
@ -624,9 +616,8 @@ namespace dawn::native {
// |-uint64_t[mUnverifiedBufferCount]-| // |-uint64_t[mUnverifiedBufferCount]-|
size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData); size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>))); ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
size_t bufferSizeArrayStart = size_t bufferSizeArrayStart = Align(
Align(objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>), objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>), sizeof(uint64_t));
sizeof(uint64_t));
ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t))); ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t); return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
} }

View File

@ -41,8 +41,7 @@ namespace dawn::native {
BindingNumber params; BindingNumber params;
}; };
using ExternalTextureBindingExpansionMap = using ExternalTextureBindingExpansionMap = std::map<BindingNumber, ExternalTextureBindingExpansion>;
std::map<BindingNumber, ExternalTextureBindingExpansion>;
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device, MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor, const BindGroupLayoutDescriptor* descriptor,
@ -161,8 +160,7 @@ namespace dawn::native {
ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap; ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
// Non-0 if this BindGroupLayout was created as part of a default PipelineLayout. // Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
const PipelineCompatibilityToken mPipelineCompatibilityToken = const PipelineCompatibilityToken mPipelineCompatibilityToken = PipelineCompatibilityToken(0);
PipelineCompatibilityToken(0);
uint32_t mUnexpandedBindingCount; uint32_t mUnexpandedBindingCount;
}; };

View File

@ -58,9 +58,7 @@ namespace dawn::native {
SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets); SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
} }
void OnSetPipeline(PipelineBase* pipeline) { void OnSetPipeline(PipelineBase* pipeline) { mPipelineLayout = pipeline->GetLayout(); }
mPipelineLayout = pipeline->GetLayout();
}
protected: protected:
// The Derived class should call this before it applies bind groups. // The Derived class should call this before it applies bind groups.

View File

@ -84,12 +84,10 @@ namespace dawn::native {
bindingCounts->perStage[stage].sampledTextureCount += bindingCounts->perStage[stage].sampledTextureCount +=
rhs.perStage[stage].sampledTextureCount; rhs.perStage[stage].sampledTextureCount;
bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount; bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
bindingCounts->perStage[stage].storageBufferCount += bindingCounts->perStage[stage].storageBufferCount += rhs.perStage[stage].storageBufferCount;
rhs.perStage[stage].storageBufferCount;
bindingCounts->perStage[stage].storageTextureCount += bindingCounts->perStage[stage].storageTextureCount +=
rhs.perStage[stage].storageTextureCount; rhs.perStage[stage].storageTextureCount;
bindingCounts->perStage[stage].uniformBufferCount += bindingCounts->perStage[stage].uniformBufferCount += rhs.perStage[stage].uniformBufferCount;
rhs.perStage[stage].uniformBufferCount;
bindingCounts->perStage[stage].externalTextureCount += bindingCounts->perStage[stage].externalTextureCount +=
rhs.perStage[stage].externalTextureCount; rhs.perStage[stage].externalTextureCount;
} }
@ -110,8 +108,7 @@ namespace dawn::native {
for (SingleShaderStage stage : IterateStages(kAllStages)) { for (SingleShaderStage stage : IterateStages(kAllStages)) {
DAWN_INVALID_IF( DAWN_INVALID_IF(
bindingCounts.perStage[stage].sampledTextureCount > bindingCounts.perStage[stage].sampledTextureCount > kMaxSampledTexturesPerShaderStage,
kMaxSampledTexturesPerShaderStage,
"The number of sampled textures (%u) in the %s stage exceeds the maximum " "The number of sampled textures (%u) in the %s stage exceeds the maximum "
"per-stage limit (%u).", "per-stage limit (%u).",
bindingCounts.perStage[stage].sampledTextureCount, stage, bindingCounts.perStage[stage].sampledTextureCount, stage,
@ -119,8 +116,7 @@ namespace dawn::native {
// The per-stage number of external textures is bound by the maximum sampled textures // The per-stage number of external textures is bound by the maximum sampled textures
// per stage. // per stage.
DAWN_INVALID_IF( DAWN_INVALID_IF(bindingCounts.perStage[stage].externalTextureCount >
bindingCounts.perStage[stage].externalTextureCount >
kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture, kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
"The number of external textures (%u) in the %s stage exceeds the maximum " "The number of external textures (%u) in the %s stage exceeds the maximum "
"per-stage limit (%u).", "per-stage limit (%u).",
@ -152,8 +148,7 @@ namespace dawn::native {
"The combination of samplers (%u) and external textures (%u) in the %s stage " "The combination of samplers (%u) and external textures (%u) in the %s stage "
"exceeds the maximum per-stage limit (%u).", "exceeds the maximum per-stage limit (%u).",
bindingCounts.perStage[stage].samplerCount, bindingCounts.perStage[stage].samplerCount,
bindingCounts.perStage[stage].externalTextureCount, stage, bindingCounts.perStage[stage].externalTextureCount, stage, kMaxSamplersPerShaderStage);
kMaxSamplersPerShaderStage);
DAWN_INVALID_IF( DAWN_INVALID_IF(
bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage, bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
@ -163,8 +158,7 @@ namespace dawn::native {
kMaxStorageBuffersPerShaderStage); kMaxStorageBuffersPerShaderStage);
DAWN_INVALID_IF( DAWN_INVALID_IF(
bindingCounts.perStage[stage].storageTextureCount > bindingCounts.perStage[stage].storageTextureCount > kMaxStorageTexturesPerShaderStage,
kMaxStorageTexturesPerShaderStage,
"The number of storage textures (%u) in the %s stage exceeds the maximum per-stage " "The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
"limit (%u).", "limit (%u).",
bindingCounts.perStage[stage].storageTextureCount, stage, bindingCounts.perStage[stage].storageTextureCount, stage,

View File

@ -49,8 +49,7 @@ namespace dawn::native {
} }
BlobCache::BlobCache(dawn::platform::CachingInterface* cachingInterface) BlobCache::BlobCache(dawn::platform::CachingInterface* cachingInterface)
: mCache(cachingInterface) { : mCache(cachingInterface) {}
}
CachedBlob BlobCache::Load(const CacheKey& key) { CachedBlob BlobCache::Load(const CacheKey& key) {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);

View File

@ -83,8 +83,8 @@ namespace dawn::native {
// Allocation offset is always local to the memory. // Allocation offset is always local to the memory.
const uint64_t memoryOffset = blockOffset % mMemoryBlockSize; const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
return ResourceMemoryAllocation{ return ResourceMemoryAllocation{info, memoryOffset,
info, memoryOffset, mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()}; mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
} }
void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) { void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {

View File

@ -44,8 +44,7 @@ namespace dawn::native {
ResourceHeapAllocator* heapAllocator); ResourceHeapAllocator* heapAllocator);
~BuddyMemoryAllocator() = default; ~BuddyMemoryAllocator() = default;
ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize, ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize, uint64_t alignment);
uint64_t alignment);
void Deallocate(const ResourceMemoryAllocation& allocation); void Deallocate(const ResourceMemoryAllocation& allocation);
uint64_t GetMemoryBlockSize() const; uint64_t GetMemoryBlockSize() const;

View File

@ -35,9 +35,7 @@ namespace dawn::native {
namespace { namespace {
struct MapRequestTask : QueueBase::TaskInFlight { struct MapRequestTask : QueueBase::TaskInFlight {
MapRequestTask(Ref<BufferBase> buffer, MapRequestID id) MapRequestTask(Ref<BufferBase> buffer, MapRequestID id) : buffer(std::move(buffer)), id(id) {}
: buffer(std::move(buffer)), id(id) {
}
void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override { void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
TRACE_EVENT1(platform, General, "Buffer::TaskInFlight::Finished", "serial", TRACE_EVENT1(platform, General, "Buffer::TaskInFlight::Finished", "serial",
uint64_t(serial)); uint64_t(serial));
@ -61,8 +59,7 @@ namespace dawn::native {
// Check that the size can be used to allocate an mFakeMappedData. A malloc(0) // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
// is invalid, and on 32bit systems we should avoid a narrowing conversion that // is invalid, and on 32bit systems we should avoid a narrowing conversion that
// would make size = 1 << 32 + 1 allocate one byte. // would make size = 1 << 32 + 1 allocate one byte.
bool isValidSize = bool isValidSize = descriptor->size != 0 &&
descriptor->size != 0 &&
descriptor->size < uint64_t(std::numeric_limits<size_t>::max()); descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
if (isValidSize) { if (isValidSize) {
@ -76,25 +73,17 @@ namespace dawn::native {
} }
private: private:
bool IsCPUWritableAtCreation() const override { bool IsCPUWritableAtCreation() const override { UNREACHABLE(); }
UNREACHABLE();
}
MaybeError MapAtCreationImpl() override { MaybeError MapAtCreationImpl() override { UNREACHABLE(); }
UNREACHABLE();
}
MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override { MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
UNREACHABLE(); UNREACHABLE();
} }
void* GetMappedPointerImpl() override { void* GetMappedPointerImpl() override { return mFakeMappedData.get(); }
return mFakeMappedData.get();
}
void UnmapImpl() override { void UnmapImpl() override { mFakeMappedData.reset(); }
mFakeMappedData.reset();
}
std::unique_ptr<uint8_t[]> mFakeMappedData; std::unique_ptr<uint8_t[]> mFakeMappedData;
}; };
@ -279,8 +268,7 @@ namespace dawn::native {
// is initialized. // is initialized.
// TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging // TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
// buffer so we don't create many small buffers. // buffer so we don't create many small buffers.
DAWN_TRY_ASSIGN(mStagingBuffer, DAWN_TRY_ASSIGN(mStagingBuffer, GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
} }
} }
@ -357,12 +345,10 @@ namespace dawn::native {
CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost); CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
return; return;
} }
std::unique_ptr<MapRequestTask> request = std::unique_ptr<MapRequestTask> request = std::make_unique<MapRequestTask>(this, mLastMapID);
std::make_unique<MapRequestTask>(this, mLastMapID);
TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Buffer::APIMapAsync", "serial", TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Buffer::APIMapAsync", "serial",
uint64_t(GetDevice()->GetPendingCommandSerial())); uint64_t(GetDevice()->GetPendingCommandSerial()));
GetDevice()->GetQueue()->TrackTask(std::move(request), GetDevice()->GetQueue()->TrackTask(std::move(request), GetDevice()->GetPendingCommandSerial());
GetDevice()->GetPendingCommandSerial());
} }
void* BufferBase::APIGetMappedRange(size_t offset, size_t size) { void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
@ -400,8 +386,8 @@ namespace dawn::native {
return {}; return {};
} }
DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0, DAWN_TRY(
GetAllocatedSize())); GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0, GetAllocatedSize()));
DynamicUploader* uploader = GetDevice()->GetDynamicUploader(); DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
uploader->ReleaseStagingBuffer(std::move(mStagingBuffer)); uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
@ -453,8 +439,7 @@ namespace dawn::native {
DAWN_TRY(GetDevice()->ValidateObject(this)); DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_INVALID_IF(uint64_t(offset) > mSize, DAWN_INVALID_IF(uint64_t(offset) > mSize,
"Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize, "Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize, this);
this);
DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset); DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size); DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
@ -523,8 +508,7 @@ namespace dawn::native {
return true; return true;
case BufferState::Mapped: case BufferState::Mapped:
ASSERT(bool{mMapMode & wgpu::MapMode::Read} ^ ASSERT(bool{mMapMode & wgpu::MapMode::Read} ^ bool{mMapMode & wgpu::MapMode::Write});
bool{mMapMode & wgpu::MapMode::Write});
return !writable || (mMapMode & wgpu::MapMode::Write); return !writable || (mMapMode & wgpu::MapMode::Write);
case BufferState::Unmapped: case BufferState::Unmapped:
@ -556,8 +540,7 @@ namespace dawn::native {
} }
bool BufferBase::NeedsInitialization() const { bool BufferBase::NeedsInitialization() const {
return !mIsDataInitialized && return !mIsDataInitialized && GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
} }
bool BufferBase::IsDataInitialized() const { bool BufferBase::IsDataInitialized() const {

View File

@ -88,9 +88,7 @@ namespace dawn::native {
void APIDestroy(); void APIDestroy();
protected: protected:
BufferBase(DeviceBase* device, BufferBase(DeviceBase* device, const BufferDescriptor* descriptor, ObjectBase::ErrorTag tag);
const BufferDescriptor* descriptor,
ObjectBase::ErrorTag tag);
// Constructor used only for mocking and testing. // Constructor used only for mocking and testing.
BufferBase(DeviceBase* device, BufferState state); BufferBase(DeviceBase* device, BufferState state);

View File

@ -103,9 +103,7 @@ namespace dawn::native {
template <size_t N> template <size_t N>
class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> { class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> {
public: public:
static void Serialize(CacheKey* key, const std::bitset<N>& t) { static void Serialize(CacheKey* key, const std::bitset<N>& t) { key->Record(t.to_ullong()); }
key->Record(t.to_ullong());
}
}; };
// Specialized overload for bitsets since using the built-in to_ullong have a size limit. // Specialized overload for bitsets since using the built-in to_ullong have a size limit.
@ -196,9 +194,7 @@ namespace dawn::native {
template <typename T> template <typename T>
class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> { class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> {
public: public:
static void Serialize(CacheKey* key, const T& t) { static void Serialize(CacheKey* key, const T& t) { key->Record(t.GetCacheKey()); }
key->Record(t.GetCacheKey());
}
}; };
} // namespace dawn::native } // namespace dawn::native

View File

@ -52,8 +52,7 @@ namespace dawn::native {
return *this; return *this;
} }
CommandIterator::CommandIterator(CommandAllocator allocator) CommandIterator::CommandIterator(CommandAllocator allocator) : mBlocks(allocator.AcquireBlocks()) {
: mBlocks(allocator.AcquireBlocks()) {
Reset(); Reset();
} }
@ -206,8 +205,7 @@ namespace dawn::native {
bool CommandAllocator::GetNewBlock(size_t minimumSize) { bool CommandAllocator::GetNewBlock(size_t minimumSize) {
// Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize). // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
mLastAllocationSize = mLastAllocationSize = std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize)); uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
if (DAWN_UNLIKELY(block == nullptr)) { if (DAWN_UNLIKELY(block == nullptr)) {

View File

@ -166,8 +166,8 @@ namespace dawn::native {
static_assert(sizeof(E) == sizeof(uint32_t)); static_assert(sizeof(E) == sizeof(uint32_t));
static_assert(alignof(E) == alignof(uint32_t)); static_assert(alignof(E) == alignof(uint32_t));
static_assert(alignof(T) <= kMaxSupportedAlignment); static_assert(alignof(T) <= kMaxSupportedAlignment);
T* result = reinterpret_cast<T*>( T* result =
Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T))); reinterpret_cast<T*>(Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
if (!result) { if (!result) {
return nullptr; return nullptr;
} }
@ -242,9 +242,7 @@ namespace dawn::native {
return AllocateInNewBlock(commandId, commandSize, commandAlignment); return AllocateInNewBlock(commandId, commandSize, commandAlignment);
} }
uint8_t* AllocateInNewBlock(uint32_t commandId, uint8_t* AllocateInNewBlock(uint32_t commandId, size_t commandSize, size_t commandAlignment);
size_t commandSize,
size_t commandAlignment);
DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) { DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
return Allocate(detail::kAdditionalData, commandSize, commandAlignment); return Allocate(detail::kAdditionalData, commandSize, commandAlignment);

View File

@ -39,8 +39,7 @@ namespace dawn::native {
} }
CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag) CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
: ApiObjectBase(device, tag) { : ApiObjectBase(device, tag) {}
}
// static // static
CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) { CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
@ -89,16 +88,14 @@ namespace dawn::native {
UNREACHABLE(); UNREACHABLE();
} }
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, const Extent3D& copySize) {
const Extent3D& copySize) {
switch (copy.texture->GetDimension()) { switch (copy.texture->GetDimension()) {
case wgpu::TextureDimension::e1D: case wgpu::TextureDimension::e1D:
ASSERT(copy.origin.z == 0 && copySize.depthOrArrayLayers == 1); ASSERT(copy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
ASSERT(copy.mipLevel == 0); ASSERT(copy.mipLevel == 0);
return {copy.aspect, {0, 1}, {0, 1}}; return {copy.aspect, {0, 1}, {0, 1}};
case wgpu::TextureDimension::e2D: case wgpu::TextureDimension::e2D:
return { return {copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
case wgpu::TextureDimension::e3D: case wgpu::TextureDimension::e3D:
return {copy.aspect, {0, 1}, {copy.mipLevel, 1}}; return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
} }
@ -194,8 +191,7 @@ namespace dawn::native {
} }
const TextureBase* texture = copy->source.texture.Get(); const TextureBase* texture = copy->source.texture.Get();
const TexelBlockInfo& blockInfo = const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width; const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width;
const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height; const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height;
const bool multiSlice = copy->copySize.depthOrArrayLayers > 1; const bool multiSlice = copy->copySize.depthOrArrayLayers > 1;
@ -234,8 +230,8 @@ namespace dawn::native {
} }
std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) { std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) {
const std::array<int32_t, 4> outputValue = { const std::array<int32_t, 4> outputValue = {
static_cast<int32_t>(color.r), static_cast<int32_t>(color.g), static_cast<int32_t>(color.r), static_cast<int32_t>(color.g), static_cast<int32_t>(color.b),
static_cast<int32_t>(color.b), static_cast<int32_t>(color.a)}; static_cast<int32_t>(color.a)};
return outputValue; return outputValue;
} }

View File

@ -60,8 +60,7 @@ namespace dawn::native {
bool IsCompleteSubresourceCopiedTo(const TextureBase* texture, bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
const Extent3D copySize, const Extent3D copySize,
const uint32_t mipLevel); const uint32_t mipLevel);
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, const Extent3D& copySize);
const Extent3D& copySize);
void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass); void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);

View File

@ -82,18 +82,15 @@ namespace dawn::native {
return ValidateOperation(kDrawIndexedAspects); return ValidateOperation(kDrawIndexedAspects);
} }
MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer( MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount,
uint32_t vertexCount,
uint32_t firstVertex) { uint32_t firstVertex) {
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline(); RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& vertexBufferSlotsUsedAsVertexBuffer =
vertexBufferSlotsUsedAsVertexBuffer =
lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer(); lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer();
for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) { for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
const VertexBufferInfo& vertexBuffer = const VertexBufferInfo& vertexBuffer = lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
uint64_t arrayStride = vertexBuffer.arrayStride; uint64_t arrayStride = vertexBuffer.arrayStride;
uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex]; uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
@ -106,8 +103,7 @@ namespace dawn::native {
} else { } else {
uint64_t strideCount = static_cast<uint64_t>(firstVertex) + vertexCount; uint64_t strideCount = static_cast<uint64_t>(firstVertex) + vertexCount;
if (strideCount != 0u) { if (strideCount != 0u) {
uint64_t requiredSize = uint64_t requiredSize = (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
(strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
// firstVertex and vertexCount are in uint32_t, // firstVertex and vertexCount are in uint32_t,
// arrayStride must not be larger than kMaxVertexBufferArrayStride, which is // arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
// currently 2048, and vertexBuffer.lastStride = max(attribute.offset + // currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
@ -133,8 +129,7 @@ namespace dawn::native {
uint32_t firstInstance) { uint32_t firstInstance) {
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline(); RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& vertexBufferSlotsUsedAsInstanceBuffer =
vertexBufferSlotsUsedAsInstanceBuffer =
lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer(); lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) { for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
@ -151,8 +146,7 @@ namespace dawn::native {
} else { } else {
uint64_t strideCount = static_cast<uint64_t>(firstInstance) + instanceCount; uint64_t strideCount = static_cast<uint64_t>(firstInstance) + instanceCount;
if (strideCount != 0u) { if (strideCount != 0u) {
uint64_t requiredSize = uint64_t requiredSize = (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
(strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
// firstInstance and instanceCount are in uint32_t, // firstInstance and instanceCount are in uint32_t,
// arrayStride must not be larger than kMaxVertexBufferArrayStride, which is // arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
// currently 2048, and vertexBuffer.lastStride = max(attribute.offset + // currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
@ -310,8 +304,7 @@ namespace dawn::native {
DAWN_INVALID_IF( DAWN_INVALID_IF(
requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) && requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
currentBGL->GetPipelineCompatibilityToken() != currentBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0),
PipelineCompatibilityToken(0),
"%s at index %u uses a %s which was created as part of the default layout for " "%s at index %u uses a %s which was created as part of the default layout for "
"a different pipeline than the current one (%s), and as a result is not " "a different pipeline than the current one (%s), and as a result is not "
"compatible. Use an explicit bind group layout when creating bind groups and " "compatible. Use an explicit bind group layout when creating bind groups and "

View File

@ -48,15 +48,12 @@ namespace dawn::native {
!std::isnan(attachment.clearColor.b) || !std::isnan(attachment.clearColor.a); !std::isnan(attachment.clearColor.b) || !std::isnan(attachment.clearColor.a);
} }
MaybeError ValidateB2BCopyAlignment(uint64_t dataSize, MaybeError ValidateB2BCopyAlignment(uint64_t dataSize, uint64_t srcOffset, uint64_t dstOffset) {
uint64_t srcOffset,
uint64_t dstOffset) {
// Copy size must be a multiple of 4 bytes on macOS. // Copy size must be a multiple of 4 bytes on macOS.
DAWN_INVALID_IF(dataSize % 4 != 0, "Copy size (%u) is not a multiple of 4.", dataSize); DAWN_INVALID_IF(dataSize % 4 != 0, "Copy size (%u) is not a multiple of 4.", dataSize);
// SourceOffset and destinationOffset must be multiples of 4 bytes on macOS. // SourceOffset and destinationOffset must be multiples of 4 bytes on macOS.
DAWN_INVALID_IF( DAWN_INVALID_IF(srcOffset % 4 != 0 || dstOffset % 4 != 0,
srcOffset % 4 != 0 || dstOffset % 4 != 0,
"Source offset (%u) or destination offset (%u) is not a multiple of 4 bytes,", "Source offset (%u) or destination offset (%u) is not a multiple of 4 bytes,",
srcOffset, dstOffset); srcOffset, dstOffset);
@ -65,8 +62,8 @@ namespace dawn::native {
MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) { MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) {
DAWN_INVALID_IF(texture->GetSampleCount() > 1, DAWN_INVALID_IF(texture->GetSampleCount() > 1,
"%s sample count (%u) is not 1 when copying to or from a buffer.", "%s sample count (%u) is not 1 when copying to or from a buffer.", texture,
texture, texture->GetSampleCount()); texture->GetSampleCount());
return {}; return {};
} }
@ -87,8 +84,7 @@ namespace dawn::native {
return {}; return {};
} }
MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions( MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(const ImageCopyTexture& src) {
const ImageCopyTexture& src) {
Aspect aspectUsed; Aspect aspectUsed;
DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(src)); DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(src));
if (aspectUsed == Aspect::Depth) { if (aspectUsed == Aspect::Depth) {
@ -138,8 +134,7 @@ namespace dawn::native {
*height = attachmentSize.height; *height = attachmentSize.height;
DAWN_ASSERT(*width != 0 && *height != 0); DAWN_ASSERT(*width != 0 && *height != 0);
} else { } else {
DAWN_INVALID_IF( DAWN_INVALID_IF(*width != attachmentSize.width || *height != attachmentSize.height,
*width != attachmentSize.width || *height != attachmentSize.height,
"Attachment %s size (width: %u, height: %u) does not match the size of the " "Attachment %s size (width: %u, height: %u) does not match the size of the "
"other attachments (width: %u, height: %u).", "other attachments (width: %u, height: %u).",
attachment, attachmentSize.width, attachmentSize.height, *width, *height); attachment, attachmentSize.width, attachmentSize.height, *width, *height);
@ -177,8 +172,7 @@ namespace dawn::native {
DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(), DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(),
wgpu::TextureUsage::RenderAttachment, usageValidationMode)); wgpu::TextureUsage::RenderAttachment, usageValidationMode));
DAWN_INVALID_IF( DAWN_INVALID_IF(!attachment->GetTexture()->IsMultisampledTexture(),
!attachment->GetTexture()->IsMultisampledTexture(),
"Cannot set %s as a resolve target when the color attachment %s has a sample " "Cannot set %s as a resolve target when the color attachment %s has a sample "
"count of 1.", "count of 1.",
resolveTarget, attachment); resolveTarget, attachment);
@ -198,10 +192,8 @@ namespace dawn::native {
const Extent3D& colorTextureSize = const Extent3D& colorTextureSize =
attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel()); attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
const Extent3D& resolveTextureSize = const Extent3D& resolveTextureSize =
resolveTarget->GetTexture()->GetMipLevelVirtualSize( resolveTarget->GetTexture()->GetMipLevelVirtualSize(resolveTarget->GetBaseMipLevel());
resolveTarget->GetBaseMipLevel()); DAWN_INVALID_IF(colorTextureSize.width != resolveTextureSize.width ||
DAWN_INVALID_IF(
colorTextureSize.width != resolveTextureSize.width ||
colorTextureSize.height != resolveTextureSize.height, colorTextureSize.height != resolveTextureSize.height,
"The Resolve target %s size (width: %u, height: %u) does not match the color " "The Resolve target %s size (width: %u, height: %u) does not match the color "
"attachment %s size (width: %u, height: %u).", "attachment %s size (width: %u, height: %u).",
@ -222,8 +214,7 @@ namespace dawn::native {
return {}; return {};
} }
MaybeError ValidateRenderPassColorAttachment( MaybeError ValidateRenderPassColorAttachment(DeviceBase* device,
DeviceBase* device,
const RenderPassColorAttachment& colorAttachment, const RenderPassColorAttachment& colorAttachment,
uint32_t* width, uint32_t* width,
uint32_t* height, uint32_t* height,
@ -234,20 +225,18 @@ namespace dawn::native {
return {}; return {};
} }
DAWN_TRY(device->ValidateObject(attachment)); DAWN_TRY(device->ValidateObject(attachment));
DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(), DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment,
wgpu::TextureUsage::RenderAttachment, usageValidationMode)); usageValidationMode));
DAWN_INVALID_IF(!(attachment->GetAspects() & Aspect::Color) || DAWN_INVALID_IF(
!attachment->GetFormat().isRenderable, !(attachment->GetAspects() & Aspect::Color) || !attachment->GetFormat().isRenderable,
"The color attachment %s format (%s) is not color renderable.", "The color attachment %s format (%s) is not color renderable.", attachment,
attachment, attachment->GetFormat().format); attachment->GetFormat().format);
DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp)); DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp));
DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp)); DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp));
DAWN_INVALID_IF(colorAttachment.loadOp == wgpu::LoadOp::Undefined, DAWN_INVALID_IF(colorAttachment.loadOp == wgpu::LoadOp::Undefined, "loadOp must be set.");
"loadOp must be set."); DAWN_INVALID_IF(colorAttachment.storeOp == wgpu::StoreOp::Undefined, "storeOp must be set.");
DAWN_INVALID_IF(colorAttachment.storeOp == wgpu::StoreOp::Undefined,
"storeOp must be set.");
// TODO(dawn:1269): Remove after the deprecation period. // TODO(dawn:1269): Remove after the deprecation period.
bool useClearColor = HasDeprecatedColor(colorAttachment); bool useClearColor = HasDeprecatedColor(colorAttachment);
@ -285,27 +274,24 @@ namespace dawn::native {
TextureViewBase* attachment = depthStencilAttachment->view; TextureViewBase* attachment = depthStencilAttachment->view;
DAWN_TRY(device->ValidateObject(attachment)); DAWN_TRY(device->ValidateObject(attachment));
DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(), DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment,
wgpu::TextureUsage::RenderAttachment, usageValidationMode)); usageValidationMode));
const Format& format = attachment->GetFormat(); const Format& format = attachment->GetFormat();
DAWN_INVALID_IF( DAWN_INVALID_IF(!format.HasDepthOrStencil(),
!format.HasDepthOrStencil(),
"The depth stencil attachment %s format (%s) is not a depth stencil format.", "The depth stencil attachment %s format (%s) is not a depth stencil format.",
attachment, format.format); attachment, format.format);
DAWN_INVALID_IF(!format.isRenderable, DAWN_INVALID_IF(!format.isRenderable,
"The depth stencil attachment %s format (%s) is not renderable.", "The depth stencil attachment %s format (%s) is not renderable.", attachment,
attachment, format.format); format.format);
DAWN_INVALID_IF(attachment->GetAspects() != format.aspects, DAWN_INVALID_IF(attachment->GetAspects() != format.aspects,
"The depth stencil attachment %s must encompass all aspects.", "The depth stencil attachment %s must encompass all aspects.", attachment);
attachment);
DAWN_INVALID_IF( DAWN_INVALID_IF(
attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) && attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) &&
depthStencilAttachment->depthReadOnly != depthStencilAttachment->depthReadOnly != depthStencilAttachment->stencilReadOnly,
depthStencilAttachment->stencilReadOnly,
"depthReadOnly (%u) and stencilReadOnly (%u) must be the same when texture aspect " "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when texture aspect "
"is 'all'.", "is 'all'.",
depthStencilAttachment->depthReadOnly, depthStencilAttachment->stencilReadOnly); depthStencilAttachment->depthReadOnly, depthStencilAttachment->stencilReadOnly);
@ -326,8 +312,7 @@ namespace dawn::native {
"no depth aspect or depthReadOnly (%u) is true.", "no depth aspect or depthReadOnly (%u) is true.",
depthStencilAttachment->depthLoadOp, attachment, depthStencilAttachment->depthLoadOp, attachment,
depthStencilAttachment->depthReadOnly); depthStencilAttachment->depthReadOnly);
DAWN_INVALID_IF( DAWN_INVALID_IF(depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Undefined,
depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Undefined,
"depthStoreOp (%s) must not be set if the attachment (%s) has no depth " "depthStoreOp (%s) must not be set if the attachment (%s) has no depth "
"aspect or depthReadOnly (%u) is true.", "aspect or depthReadOnly (%u) is true.",
depthStencilAttachment->depthStoreOp, attachment, depthStencilAttachment->depthStoreOp, attachment,
@ -372,15 +357,13 @@ namespace dawn::native {
} }
} else { } else {
DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp)); DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
DAWN_INVALID_IF( DAWN_INVALID_IF(depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Undefined,
depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Undefined,
"stencilLoadOp (%s) must be set if the attachment (%s) has a stencil " "stencilLoadOp (%s) must be set if the attachment (%s) has a stencil "
"aspect and stencilReadOnly (%u) is false.", "aspect and stencilReadOnly (%u) is false.",
depthStencilAttachment->stencilLoadOp, attachment, depthStencilAttachment->stencilLoadOp, attachment,
depthStencilAttachment->stencilReadOnly); depthStencilAttachment->stencilReadOnly);
DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp)); DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
DAWN_INVALID_IF( DAWN_INVALID_IF(depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Undefined,
depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Undefined,
"stencilStoreOp (%s) must be set if the attachment (%s) has a stencil " "stencilStoreOp (%s) must be set if the attachment (%s) has a stencil "
"aspect and stencilReadOnly (%u) is false.", "aspect and stencilReadOnly (%u) is false.",
depthStencilAttachment->stencilStoreOp, attachment, depthStencilAttachment->stencilStoreOp, attachment,
@ -389,8 +372,7 @@ namespace dawn::native {
if (!std::isnan(depthStencilAttachment->clearDepth)) { if (!std::isnan(depthStencilAttachment->clearDepth)) {
// TODO(dawn:1269): Remove this branch after the deprecation period. // TODO(dawn:1269): Remove this branch after the deprecation period.
device->EmitDeprecationWarning( device->EmitDeprecationWarning("clearDepth is deprecated, prefer depthClearValue instead.");
"clearDepth is deprecated, prefer depthClearValue instead.");
} else { } else {
DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear && DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
std::isnan(depthStencilAttachment->depthClearValue), std::isnan(depthStencilAttachment->depthClearValue),
@ -436,9 +418,9 @@ namespace dawn::native {
bool isAllColorAttachmentNull = true; bool isAllColorAttachmentNull = true;
for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) { for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) {
DAWN_TRY_CONTEXT(ValidateRenderPassColorAttachment( DAWN_TRY_CONTEXT(
device, descriptor->colorAttachments[i], width, height, ValidateRenderPassColorAttachment(device, descriptor->colorAttachments[i], width,
sampleCount, usageValidationMode), height, sampleCount, usageValidationMode),
"validating colorAttachments[%u].", i); "validating colorAttachments[%u].", i);
if (descriptor->colorAttachments[i].view) { if (descriptor->colorAttachments[i].view) {
isAllColorAttachmentNull = false; isAllColorAttachmentNull = false;
@ -447,8 +429,8 @@ namespace dawn::native {
if (descriptor->depthStencilAttachment != nullptr) { if (descriptor->depthStencilAttachment != nullptr) {
DAWN_TRY_CONTEXT(ValidateRenderPassDepthStencilAttachment( DAWN_TRY_CONTEXT(ValidateRenderPassDepthStencilAttachment(
device, descriptor->depthStencilAttachment, width, height, device, descriptor->depthStencilAttachment, width, height, sampleCount,
sampleCount, usageValidationMode), usageValidationMode),
"validating depthStencilAttachment."); "validating depthStencilAttachment.");
} else { } else {
DAWN_INVALID_IF( DAWN_INVALID_IF(
@ -459,9 +441,9 @@ namespace dawn::native {
if (descriptor->occlusionQuerySet != nullptr) { if (descriptor->occlusionQuerySet != nullptr) {
DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet)); DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet));
DAWN_INVALID_IF( DAWN_INVALID_IF(descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion, "The occlusionQuerySet %s type (%s) is not %s.",
"The occlusionQuerySet %s type (%s) is not %s.", descriptor->occlusionQuerySet, descriptor->occlusionQuerySet,
descriptor->occlusionQuerySet->GetQueryType(), wgpu::QueryType::Occlusion); descriptor->occlusionQuerySet->GetQueryType(), wgpu::QueryType::Occlusion);
} }
@ -479,10 +461,9 @@ namespace dawn::native {
DAWN_ASSERT(querySet != nullptr); DAWN_ASSERT(querySet != nullptr);
uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex; uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, querySet, queryIndex), DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, querySet, queryIndex),
"validating querySet and queryIndex of timestampWrites[%u].", "validating querySet and queryIndex of timestampWrites[%u].", i);
i); DAWN_TRY_CONTEXT(
DAWN_TRY_CONTEXT(ValidateRenderPassTimestampLocation( ValidateRenderPassTimestampLocation(descriptor->timestampWrites[i].location),
descriptor->timestampWrites[i].location),
"validating location of timestampWrites[%u].", i); "validating location of timestampWrites[%u].", i);
auto checkIt = usedQueries.find(querySet); auto checkIt = usedQueries.find(querySet);
@ -497,8 +478,8 @@ namespace dawn::native {
} }
} }
DAWN_INVALID_IF(descriptor->colorAttachmentCount == 0 && DAWN_INVALID_IF(
descriptor->depthStencilAttachment == nullptr, descriptor->colorAttachmentCount == 0 && descriptor->depthStencilAttachment == nullptr,
"Render pass has no attachments."); "Render pass has no attachments.");
return {}; return {};
@ -515,12 +496,11 @@ namespace dawn::native {
for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) { for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
DAWN_ASSERT(descriptor->timestampWrites[i].querySet != nullptr); DAWN_ASSERT(descriptor->timestampWrites[i].querySet != nullptr);
DAWN_TRY_CONTEXT( DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, descriptor->timestampWrites[i].querySet,
ValidateTimestampQuery(device, descriptor->timestampWrites[i].querySet,
descriptor->timestampWrites[i].queryIndex), descriptor->timestampWrites[i].queryIndex),
"validating querySet and queryIndex of timestampWrites[%u].", i); "validating querySet and queryIndex of timestampWrites[%u].", i);
DAWN_TRY_CONTEXT(ValidateComputePassTimestampLocation( DAWN_TRY_CONTEXT(
descriptor->timestampWrites[i].location), ValidateComputePassTimestampLocation(descriptor->timestampWrites[i].location),
"validating location of timestampWrites[%u].", i); "validating location of timestampWrites[%u].", i);
} }
} }
@ -534,8 +514,8 @@ namespace dawn::native {
const BufferBase* destination, const BufferBase* destination,
uint64_t destinationOffset) { uint64_t destinationOffset) {
DAWN_INVALID_IF(firstQuery >= querySet->GetQueryCount(), DAWN_INVALID_IF(firstQuery >= querySet->GetQueryCount(),
"First query (%u) exceeds the number of queries (%u) in %s.", "First query (%u) exceeds the number of queries (%u) in %s.", firstQuery,
firstQuery, querySet->GetQueryCount(), querySet); querySet->GetQueryCount(), querySet);
DAWN_INVALID_IF( DAWN_INVALID_IF(
queryCount > querySet->GetQueryCount() - firstQuery, queryCount > querySet->GetQueryCount() - firstQuery,
@ -544,20 +524,20 @@ namespace dawn::native {
firstQuery, queryCount, querySet->GetQueryCount(), querySet); firstQuery, queryCount, querySet->GetQueryCount(), querySet);
DAWN_INVALID_IF(destinationOffset % 256 != 0, DAWN_INVALID_IF(destinationOffset % 256 != 0,
"The destination buffer %s offset (%u) is not a multiple of 256.", "The destination buffer %s offset (%u) is not a multiple of 256.", destination,
destination, destinationOffset); destinationOffset);
uint64_t bufferSize = destination->GetSize(); uint64_t bufferSize = destination->GetSize();
// The destination buffer must have enough storage, from destination offset, to contain // The destination buffer must have enough storage, from destination offset, to contain
// the result of resolved queries // the result of resolved queries
bool fitsInBuffer = destinationOffset <= bufferSize && bool fitsInBuffer =
(static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <= destinationOffset <= bufferSize &&
(bufferSize - destinationOffset)); (static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <= (bufferSize - destinationOffset));
DAWN_INVALID_IF( DAWN_INVALID_IF(
!fitsInBuffer, !fitsInBuffer,
"The resolved %s data size (%u) would not fit in %s with size %u at the offset %u.", "The resolved %s data size (%u) would not fit in %s with size %u at the offset %u.",
querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination, querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination, bufferSize,
bufferSize, destinationOffset); destinationOffset);
return {}; return {};
} }
@ -583,8 +563,7 @@ namespace dawn::native {
Ref<BufferBase> availabilityBuffer; Ref<BufferBase> availabilityBuffer;
DAWN_TRY_ASSIGN(availabilityBuffer, device->CreateBuffer(&availabilityDesc)); DAWN_TRY_ASSIGN(availabilityBuffer, device->CreateBuffer(&availabilityDesc));
DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0, DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0, availability.data(),
availability.data(),
availability.size() * sizeof(uint32_t))); availability.size() * sizeof(uint32_t)));
// Timestamp params uniform buffer // Timestamp params uniform buffer
@ -597,11 +576,10 @@ namespace dawn::native {
Ref<BufferBase> paramsBuffer; Ref<BufferBase> paramsBuffer;
DAWN_TRY_ASSIGN(paramsBuffer, device->CreateBuffer(&parmsDesc)); DAWN_TRY_ASSIGN(paramsBuffer, device->CreateBuffer(&parmsDesc));
DAWN_TRY( DAWN_TRY(device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, &params, sizeof(params)));
device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, &params, sizeof(params)));
return EncodeConvertTimestampsToNanoseconds( return EncodeConvertTimestampsToNanoseconds(encoder, destination, availabilityBuffer.Get(),
encoder, destination, availabilityBuffer.Get(), paramsBuffer.Get()); paramsBuffer.Get());
} }
bool IsReadOnlyDepthStencilAttachment( bool IsReadOnlyDepthStencilAttachment(
@ -702,13 +680,11 @@ namespace dawn::native {
// Implementation of the API's command recording methods // Implementation of the API's command recording methods
ComputePassEncoder* CommandEncoder::APIBeginComputePass( ComputePassEncoder* CommandEncoder::APIBeginComputePass(const ComputePassDescriptor* descriptor) {
const ComputePassDescriptor* descriptor) {
return BeginComputePass(descriptor).Detach(); return BeginComputePass(descriptor).Detach();
} }
Ref<ComputePassEncoder> CommandEncoder::BeginComputePass( Ref<ComputePassEncoder> CommandEncoder::BeginComputePass(const ComputePassDescriptor* descriptor) {
const ComputePassDescriptor* descriptor) {
DeviceBase* device = GetDevice(); DeviceBase* device = GetDevice();
std::vector<TimestampWrite> timestampWritesAtBeginning; std::vector<TimestampWrite> timestampWritesAtBeginning;
@ -786,8 +762,8 @@ namespace dawn::native {
[&](CommandAllocator* allocator) -> MaybeError { [&](CommandAllocator* allocator) -> MaybeError {
uint32_t sampleCount = 0; uint32_t sampleCount = 0;
DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height, DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height, &sampleCount,
&sampleCount, mUsageValidationMode)); mUsageValidationMode));
ASSERT(width > 0 && height > 0 && sampleCount > 0); ASSERT(width > 0 && height > 0 && sampleCount > 0);
@ -947,8 +923,7 @@ namespace dawn::native {
DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(source, sourceOffset, size), DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(source, sourceOffset, size),
"validating source %s copy size.", source); "validating source %s copy size.", source);
DAWN_TRY_CONTEXT( DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
"validating destination %s copy size.", destination); "validating destination %s copy size.", destination);
DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset)); DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
@ -987,8 +962,7 @@ namespace dawn::native {
"validating source %s usage.", source->buffer); "validating source %s usage.", source->buffer);
DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize)); DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
DAWN_TRY_CONTEXT( DAWN_TRY_CONTEXT(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
mUsageValidationMode), mUsageValidationMode),
"validating destination %s usage.", destination->texture); "validating destination %s usage.", destination->texture);
DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture)); DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
@ -1031,8 +1005,8 @@ namespace dawn::native {
return {}; return {};
}, },
"encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer, "encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer, destination->texture,
destination->texture, copySize); copySize);
} }
void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source, void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
@ -1050,8 +1024,7 @@ namespace dawn::native {
DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source)); DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination)); DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
DAWN_TRY_CONTEXT( DAWN_TRY_CONTEXT(ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
"validating destination %s usage.", destination->buffer); "validating destination %s usage.", destination->buffer);
// We validate texture copy range before validating linear texture data, // We validate texture copy range before validating linear texture data,
@ -1090,8 +1063,8 @@ namespace dawn::native {
return {}; return {};
}, },
"encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture, "encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture, destination->buffer,
destination->buffer, copySize); copySize);
} }
void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source, void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
@ -1176,8 +1149,8 @@ namespace dawn::native {
uint64_t bufferSize = buffer->GetSize(); uint64_t bufferSize = buffer->GetSize();
DAWN_INVALID_IF(offset > bufferSize, DAWN_INVALID_IF(offset > bufferSize,
"Buffer offset (%u) is larger than the size (%u) of %s.", "Buffer offset (%u) is larger than the size (%u) of %s.", offset,
offset, bufferSize, buffer); bufferSize, buffer);
uint64_t remainingSize = bufferSize - offset; uint64_t remainingSize = bufferSize - offset;
if (size == wgpu::kWholeSize) { if (size == wgpu::kWholeSize) {
@ -1245,8 +1218,7 @@ namespace dawn::native {
this, this,
[&](CommandAllocator* allocator) -> MaybeError { [&](CommandAllocator* allocator) -> MaybeError {
if (GetDevice()->IsValidationEnabled()) { if (GetDevice()->IsValidationEnabled()) {
DAWN_INVALID_IF( DAWN_INVALID_IF(mDebugGroupStackSize == 0,
mDebugGroupStackSize == 0,
"PopDebugGroup called when no debug groups are currently pushed."); "PopDebugGroup called when no debug groups are currently pushed.");
} }
allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup); allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
@ -1315,8 +1287,8 @@ namespace dawn::native {
return {}; return {};
}, },
"encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery, "encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery, queryCount,
queryCount, destination, destinationOffset); destination, destinationOffset);
} }
void CommandEncoder::APIWriteBuffer(BufferBase* buffer, void CommandEncoder::APIWriteBuffer(BufferBase* buffer,

View File

@ -162,8 +162,8 @@ namespace dawn::native {
uint64_t bufferSize = buffer->GetSize(); uint64_t bufferSize = buffer->GetSize();
bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset)); bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
DAWN_INVALID_IF(!fitsInBuffer, DAWN_INVALID_IF(!fitsInBuffer,
"Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset, "Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset, size,
size, buffer.Get(), bufferSize); buffer.Get(), bufferSize);
return {}; return {};
} }
@ -201,8 +201,7 @@ namespace dawn::native {
// TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the // TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
// validation message. Investigate ways to make it print as a more readable symbol. // validation message. Investigate ways to make it print as a more readable symbol.
DAWN_INVALID_IF( DAWN_INVALID_IF(
copyExtent.depthOrArrayLayers > 1 && copyExtent.depthOrArrayLayers > 1 && (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
(layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
layout.rowsPerImage == wgpu::kCopyStrideUndefined), layout.rowsPerImage == wgpu::kCopyStrideUndefined),
"Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.", "Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage); copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
@ -214,8 +213,7 @@ namespace dawn::native {
// Validation for other members in layout: // Validation for other members in layout:
ASSERT(copyExtent.width % blockInfo.width == 0); ASSERT(copyExtent.width % blockInfo.width == 0);
uint32_t widthInBlocks = copyExtent.width / blockInfo.width; uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <= ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <= std::numeric_limits<uint32_t>::max());
std::numeric_limits<uint32_t>::max());
uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize; uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
// These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks, // These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks,
@ -225,18 +223,18 @@ namespace dawn::native {
"The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow, "The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow,
layout.bytesPerRow); layout.bytesPerRow);
DAWN_INVALID_IF(layout.rowsPerImage != wgpu::kCopyStrideUndefined && DAWN_INVALID_IF(
heightInBlocks > layout.rowsPerImage, layout.rowsPerImage != wgpu::kCopyStrideUndefined && heightInBlocks > layout.rowsPerImage,
"The height of each image in blocks (%u) is > rowsPerImage (%u).", "The height of each image in blocks (%u) is > rowsPerImage (%u).", heightInBlocks,
heightInBlocks, layout.rowsPerImage); layout.rowsPerImage);
// We compute required bytes in copy after validating texel block alignments // We compute required bytes in copy after validating texel block alignments
// because the divisibility conditions are necessary for the algorithm to be valid, // because the divisibility conditions are necessary for the algorithm to be valid,
// also the bytesPerRow bound is necessary to avoid overflows. // also the bytesPerRow bound is necessary to avoid overflows.
uint64_t requiredBytesInCopy; uint64_t requiredBytesInCopy;
DAWN_TRY_ASSIGN(requiredBytesInCopy, DAWN_TRY_ASSIGN(
ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow, requiredBytesInCopy,
layout.rowsPerImage)); ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow, layout.rowsPerImage));
bool fitsInData = bool fitsInData =
layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset)); layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
@ -272,8 +270,7 @@ namespace dawn::native {
textureCopy.mipLevel, texture->GetNumMipLevels(), texture); textureCopy.mipLevel, texture->GetNumMipLevels(), texture);
DAWN_TRY(ValidateTextureAspect(textureCopy.aspect)); DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
DAWN_INVALID_IF( DAWN_INVALID_IF(SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
"%s format (%s) does not have the selected aspect (%s).", texture, "%s format (%s) does not have the selected aspect (%s).", texture,
texture->GetFormat().format, textureCopy.aspect); texture->GetFormat().format, textureCopy.aspect);
@ -287,8 +284,8 @@ namespace dawn::native {
"Copy origin (%s) and size (%s) does not cover the entire subresource (origin: " "Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
"[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the " "[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
"format (%s) is a depth/stencil format or the sample count (%u) is > 1.", "format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
&textureCopy.origin, &copySize, &subresourceSize, texture, &textureCopy.origin, &copySize, &subresourceSize, texture, texture->GetFormat().format,
texture->GetFormat().format, texture->GetSampleCount()); texture->GetSampleCount());
} }
return {}; return {};
@ -311,8 +308,7 @@ namespace dawn::native {
DAWN_INVALID_IF( DAWN_INVALID_IF(
static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) > static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
static_cast<uint64_t>(mipSize.width) || static_cast<uint64_t>(mipSize.width) ||
static_cast<uint64_t>(textureCopy.origin.y) + static_cast<uint64_t>(textureCopy.origin.y) + static_cast<uint64_t>(copySize.height) >
static_cast<uint64_t>(copySize.height) >
static_cast<uint64_t>(mipSize.height) || static_cast<uint64_t>(mipSize.height) ||
static_cast<uint64_t>(textureCopy.origin.z) + static_cast<uint64_t>(textureCopy.origin.z) +
static_cast<uint64_t>(copySize.depthOrArrayLayers) > static_cast<uint64_t>(copySize.depthOrArrayLayers) >
@ -340,8 +336,7 @@ namespace dawn::native {
"copySize.width (%u) is not a multiple of compressed texture format block width " "copySize.width (%u) is not a multiple of compressed texture format block width "
"(%u).", "(%u).",
copySize.width, blockInfo.width); copySize.width, blockInfo.width);
DAWN_INVALID_IF( DAWN_INVALID_IF(copySize.height % blockInfo.height != 0,
copySize.height % blockInfo.height != 0,
"copySize.height (%u) is not a multiple of compressed texture format block " "copySize.height (%u) is not a multiple of compressed texture format block "
"height (%u).", "height (%u).",
copySize.height, blockInfo.height); copySize.height, blockInfo.height);
@ -388,8 +383,8 @@ namespace dawn::native {
return {}; return {};
default: default:
DAWN_INVALID_IF(aspectUsed == Aspect::Depth, DAWN_INVALID_IF(aspectUsed == Aspect::Depth,
"Cannot copy into the depth aspect of %s with format %s.", "Cannot copy into the depth aspect of %s with format %s.", dst.texture,
dst.texture, format.format); format.format);
break; break;
} }
@ -427,14 +422,13 @@ namespace dawn::native {
return DAWN_FORMAT_VALIDATION_ERROR("Copy is from %s to itself.", src.texture); return DAWN_FORMAT_VALIDATION_ERROR("Copy is from %s to itself.", src.texture);
case wgpu::TextureDimension::e2D: case wgpu::TextureDimension::e2D:
DAWN_INVALID_IF(src.mipLevel == dst.mipLevel && DAWN_INVALID_IF(
IsRangeOverlapped(src.origin.z, dst.origin.z, src.mipLevel == dst.mipLevel &&
copySize.depthOrArrayLayers), IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depthOrArrayLayers),
"Copy source and destination are overlapping layer ranges " "Copy source and destination are overlapping layer ranges "
"([%u, %u) and [%u, %u)) of %s mip level %u", "([%u, %u) and [%u, %u)) of %s mip level %u",
src.origin.z, src.origin.z + copySize.depthOrArrayLayers, src.origin.z, src.origin.z + copySize.depthOrArrayLayers, dst.origin.z,
dst.origin.z, dst.origin.z + copySize.depthOrArrayLayers, dst.origin.z + copySize.depthOrArrayLayers, src.texture, src.mipLevel);
src.texture, src.mipLevel);
break; break;
case wgpu::TextureDimension::e3D: case wgpu::TextureDimension::e3D:
@ -453,8 +447,7 @@ namespace dawn::native {
const Extent3D& copySize) { const Extent3D& copySize) {
// Metal requires texture-to-texture copies happens between texture formats that equal to // Metal requires texture-to-texture copies happens between texture formats that equal to
// each other or only have diff on srgb-ness. // each other or only have diff on srgb-ness.
DAWN_INVALID_IF( DAWN_INVALID_IF(!src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
!src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
"Source %s format (%s) and destination %s format (%s) are not copy compatible.", "Source %s format (%s) and destination %s format (%s) are not copy compatible.",
src.texture, src.texture->GetFormat().format, dst.texture, src.texture, src.texture->GetFormat().format, dst.texture,
dst.texture->GetFormat().format); dst.texture->GetFormat().format);
@ -483,9 +476,8 @@ namespace dawn::native {
MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) { MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
ASSERT(wgpu::HasZeroOrOneBits(usage)); ASSERT(wgpu::HasZeroOrOneBits(usage));
DAWN_INVALID_IF(!(buffer->GetUsageExternalOnly() & usage), DAWN_INVALID_IF(!(buffer->GetUsageExternalOnly() & usage), "%s usage (%s) doesn't include %s.",
"%s usage (%s) doesn't include %s.", buffer, buffer->GetUsageExternalOnly(), buffer, buffer->GetUsageExternalOnly(), usage);
usage);
return {}; return {};
} }

View File

@ -62,8 +62,7 @@ namespace dawn::native {
break; break;
} }
case Command::CopyTextureToTexture: { case Command::CopyTextureToTexture: {
CopyTextureToTextureCmd* copy = CopyTextureToTextureCmd* copy = commands->NextCommand<CopyTextureToTextureCmd>();
commands->NextCommand<CopyTextureToTextureCmd>();
copy->~CopyTextureToTextureCmd(); copy->~CopyTextureToTextureCmd();
break; break;
} }

View File

@ -52,8 +52,8 @@ namespace dawn::native {
ASSERT(mCompilationInfo.messages == nullptr); ASSERT(mCompilationInfo.messages == nullptr);
mMessageStrings.push_back(message); mMessageStrings.push_back(message);
mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type), mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type), lineNum,
lineNum, linePos, offset, length}); linePos, offset, length});
} }
void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) { void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
@ -114,8 +114,8 @@ namespace dawn::native {
mMessageStrings.push_back(diagnostic.message); mMessageStrings.push_back(diagnostic.message);
} }
mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity), mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity), lineNum,
lineNum, linePos, offset, length}); linePos, offset, length});
} }
void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) { void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {

View File

@ -82,8 +82,7 @@ namespace dawn::native {
)")); )"));
Ref<BindGroupLayoutBase> bindGroupLayout; Ref<BindGroupLayoutBase> bindGroupLayout;
DAWN_TRY_ASSIGN( DAWN_TRY_ASSIGN(bindGroupLayout,
bindGroupLayout,
utils::MakeBindGroupLayout( utils::MakeBindGroupLayout(
device, device,
{ {
@ -94,8 +93,7 @@ namespace dawn::native {
/* allowInternalBinding */ true)); /* allowInternalBinding */ true));
Ref<PipelineLayoutBase> pipelineLayout; Ref<PipelineLayoutBase> pipelineLayout;
DAWN_TRY_ASSIGN(pipelineLayout, DAWN_TRY_ASSIGN(pipelineLayout, utils::MakeBasicPipelineLayout(device, bindGroupLayout));
utils::MakeBasicPipelineLayout(device, bindGroupLayout));
ComputePipelineDescriptor computePipelineDescriptor = {}; ComputePipelineDescriptor computePipelineDescriptor = {};
computePipelineDescriptor.layout = pipelineLayout.Get(); computePipelineDescriptor.layout = pipelineLayout.Get();
@ -128,16 +126,15 @@ namespace dawn::native {
CommandEncoder* commandEncoder, CommandEncoder* commandEncoder,
EncodingContext* encodingContext, EncodingContext* encodingContext,
std::vector<TimestampWrite> timestampWritesAtEnd) { std::vector<TimestampWrite> timestampWritesAtEnd) {
return AcquireRef(new ComputePassEncoder(device, descriptor, commandEncoder, return AcquireRef(new ComputePassEncoder(device, descriptor, commandEncoder, encodingContext,
encodingContext, std::move(timestampWritesAtEnd))); std::move(timestampWritesAtEnd)));
} }
ComputePassEncoder::ComputePassEncoder(DeviceBase* device, ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
CommandEncoder* commandEncoder, CommandEncoder* commandEncoder,
EncodingContext* encodingContext, EncodingContext* encodingContext,
ErrorTag errorTag) ErrorTag errorTag)
: ProgrammableEncoder(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) { : ProgrammableEncoder(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {}
}
// static // static
Ref<ComputePassEncoder> ComputePassEncoder::MakeError(DeviceBase* device, Ref<ComputePassEncoder> ComputePassEncoder::MakeError(DeviceBase* device,
@ -258,12 +255,10 @@ namespace dawn::native {
Ref<BindGroupLayoutBase> layout; Ref<BindGroupLayoutBase> layout;
DAWN_TRY_ASSIGN(layout, validationPipeline->GetBindGroupLayout(0)); DAWN_TRY_ASSIGN(layout, validationPipeline->GetBindGroupLayout(0));
uint32_t storageBufferOffsetAlignment = uint32_t storageBufferOffsetAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
device->GetLimits().v1.minStorageBufferOffsetAlignment;
// Let the offset be the indirectOffset, aligned down to |storageBufferOffsetAlignment|. // Let the offset be the indirectOffset, aligned down to |storageBufferOffsetAlignment|.
const uint32_t clientOffsetFromAlignedBoundary = const uint32_t clientOffsetFromAlignedBoundary = indirectOffset % storageBufferOffsetAlignment;
indirectOffset % storageBufferOffsetAlignment;
const uint64_t clientOffsetAlignedDown = indirectOffset - clientOffsetFromAlignedBoundary; const uint64_t clientOffsetAlignedDown = indirectOffset - clientOffsetFromAlignedBoundary;
const uint64_t clientIndirectBindingOffset = clientOffsetAlignedDown; const uint64_t clientIndirectBindingOffset = clientOffsetAlignedDown;
@ -291,8 +286,8 @@ namespace dawn::native {
params.enableValidation = static_cast<uint32_t>(IsValidationEnabled()); params.enableValidation = static_cast<uint32_t>(IsValidationEnabled());
params.duplicateNumWorkgroups = static_cast<uint32_t>(shouldDuplicateNumWorkgroups); params.duplicateNumWorkgroups = static_cast<uint32_t>(shouldDuplicateNumWorkgroups);
DAWN_TRY_ASSIGN(uniformBuffer, utils::CreateBufferFromData( DAWN_TRY_ASSIGN(uniformBuffer,
device, wgpu::BufferUsage::Uniform, {params})); utils::CreateBufferFromData(device, wgpu::BufferUsage::Uniform, {params}));
} }
// Reserve space in the scratch buffer to hold the validated indirect params. // Reserve space in the scratch buffer to hold the validated indirect params.
@ -325,8 +320,7 @@ namespace dawn::native {
return std::make_pair(std::move(validatedIndirectBuffer), uint64_t(0)); return std::make_pair(std::move(validatedIndirectBuffer), uint64_t(0));
} }
void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer, void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
uint64_t indirectOffset) {
GetDevice()->EmitDeprecationWarning( GetDevice()->EmitDeprecationWarning(
"dispatchIndirect() has been deprecated. Use dispatchWorkgroupsIndirect() instead."); "dispatchIndirect() has been deprecated. Use dispatchWorkgroupsIndirect() instead.");
APIDispatchWorkgroupsIndirect(indirectBuffer, indirectOffset); APIDispatchWorkgroupsIndirect(indirectBuffer, indirectOffset);
@ -396,8 +390,7 @@ namespace dawn::native {
dispatch->indirectOffset = indirectOffset; dispatch->indirectOffset = indirectOffset;
return {}; return {};
}, },
"encoding %s.DispatchWorkgroupsIndirect(%s, %u).", this, indirectBuffer, "encoding %s.DispatchWorkgroupsIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
indirectOffset);
} }
void ComputePassEncoder::APISetPipeline(ComputePipelineBase* pipeline) { void ComputePassEncoder::APISetPipeline(ComputePipelineBase* pipeline) {
@ -429,15 +422,13 @@ namespace dawn::native {
BindGroupIndex groupIndex(groupIndexIn); BindGroupIndex groupIndex(groupIndexIn);
if (IsValidationEnabled()) { if (IsValidationEnabled()) {
DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount, DAWN_TRY(
dynamicOffsets)); ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets));
} }
mUsageTracker.AddResourcesReferencedByBindGroup(group); mUsageTracker.AddResourcesReferencedByBindGroup(group);
RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount, RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount, dynamicOffsets);
dynamicOffsets); mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets);
mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount,
dynamicOffsets);
return {}; return {};
}, },

View File

@ -40,12 +40,12 @@ namespace dawn::native {
ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ComputePipelineBase::ComputePipelineBase(DeviceBase* device,
const ComputePipelineDescriptor* descriptor) const ComputePipelineDescriptor* descriptor)
: PipelineBase(device, : PipelineBase(
device,
descriptor->layout, descriptor->layout,
descriptor->label, descriptor->label,
{{SingleShaderStage::Compute, descriptor->compute.module, {{SingleShaderStage::Compute, descriptor->compute.module, descriptor->compute.entryPoint,
descriptor->compute.entryPoint, descriptor->compute.constantCount, descriptor->compute.constantCount, descriptor->compute.constants}}) {
descriptor->compute.constants}}) {
SetContentHash(ComputeContentHash()); SetContentHash(ComputeContentHash());
TrackInDevice(); TrackInDevice();
@ -58,8 +58,7 @@ namespace dawn::native {
} }
ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag) ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
: PipelineBase(device, tag) { : PipelineBase(device, tag) {}
}
ComputePipelineBase::~ComputePipelineBase() = default; ComputePipelineBase::~ComputePipelineBase() = default;
@ -75,8 +74,7 @@ namespace dawn::native {
class ErrorComputePipeline final : public ComputePipelineBase { class ErrorComputePipeline final : public ComputePipelineBase {
public: public:
explicit ErrorComputePipeline(DeviceBase* device) explicit ErrorComputePipeline(DeviceBase* device)
: ComputePipelineBase(device, ObjectBase::kError) { : ComputePipelineBase(device, ObjectBase::kError) {}
}
MaybeError Initialize() override { MaybeError Initialize() override {
UNREACHABLE(); UNREACHABLE();

View File

@ -231,8 +231,8 @@ namespace dawn::native {
case wgpu::TextureFormat::RGBA8Unorm: case wgpu::TextureFormat::RGBA8Unorm:
break; break;
default: default:
return DAWN_FORMAT_VALIDATION_ERROR( return DAWN_FORMAT_VALIDATION_ERROR("Source texture format (%s) is not supported.",
"Source texture format (%s) is not supported.", srcFormat); srcFormat);
} }
switch (dstFormat) { switch (dstFormat) {
@ -251,15 +251,14 @@ namespace dawn::native {
case wgpu::TextureFormat::RGBA32Float: case wgpu::TextureFormat::RGBA32Float:
break; break;
default: default:
return DAWN_FORMAT_VALIDATION_ERROR( return DAWN_FORMAT_VALIDATION_ERROR("Destination texture format (%s) is not supported.",
"Destination texture format (%s) is not supported.", dstFormat); dstFormat);
} }
return {}; return {};
} }
RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store, RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store, wgpu::TextureFormat dstFormat) {
wgpu::TextureFormat dstFormat) {
auto pipeline = store->copyTextureForBrowserPipelines.find(dstFormat); auto pipeline = store->copyTextureForBrowserPipelines.find(dstFormat);
if (pipeline != store->copyTextureForBrowserPipelines.end()) { if (pipeline != store->copyTextureForBrowserPipelines.end()) {
return pipeline->second.Get(); return pipeline->second.Get();
@ -275,8 +274,7 @@ namespace dawn::native {
if (GetCachedPipeline(store, dstFormat) == nullptr) { if (GetCachedPipeline(store, dstFormat) == nullptr) {
// Create vertex shader module if not cached before. // Create vertex shader module if not cached before.
if (store->copyTextureForBrowser == nullptr) { if (store->copyTextureForBrowser == nullptr) {
DAWN_TRY_ASSIGN( DAWN_TRY_ASSIGN(store->copyTextureForBrowser,
store->copyTextureForBrowser,
utils::CreateShaderModule(device, sCopyTextureForBrowserShader)); utils::CreateShaderModule(device, sCopyTextureForBrowserShader));
} }
@ -330,8 +328,7 @@ namespace dawn::native {
DAWN_INVALID_IF(source->texture->GetTextureState() == TextureBase::TextureState::Destroyed, DAWN_INVALID_IF(source->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
"Source texture %s is destroyed.", source->texture); "Source texture %s is destroyed.", source->texture);
DAWN_INVALID_IF( DAWN_INVALID_IF(destination->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
destination->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
"Destination texture %s is destroyed.", destination->texture); "Destination texture %s is destroyed.", destination->texture);
DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *source, *copySize), DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *source, *copySize),
@ -346,10 +343,9 @@ namespace dawn::native {
DAWN_TRY(ValidateTextureToTextureCopyCommonRestrictions(*source, *destination, *copySize)); DAWN_TRY(ValidateTextureToTextureCopyCommonRestrictions(*source, *destination, *copySize));
DAWN_INVALID_IF(source->origin.z > 0, "Source has a non-zero z origin (%u).", DAWN_INVALID_IF(source->origin.z > 0, "Source has a non-zero z origin (%u).", source->origin.z);
source->origin.z); DAWN_INVALID_IF(copySize->depthOrArrayLayers > 1, "Copy is for more than one array layer (%u)",
DAWN_INVALID_IF(copySize->depthOrArrayLayers > 1, copySize->depthOrArrayLayers);
"Copy is for more than one array layer (%u)", copySize->depthOrArrayLayers);
DAWN_INVALID_IF( DAWN_INVALID_IF(
source->texture->GetSampleCount() > 1 || destination->texture->GetSampleCount() > 1, source->texture->GetSampleCount() > 1 || destination->texture->GetSampleCount() > 1,
@ -461,8 +457,7 @@ namespace dawn::native {
constexpr uint32_t kDecodeForSrgbDstFormat = 0x20; constexpr uint32_t kDecodeForSrgbDstFormat = 0x20;
if (options->srcAlphaMode == wgpu::AlphaMode::Premultiplied) { if (options->srcAlphaMode == wgpu::AlphaMode::Premultiplied) {
if (options->needsColorSpaceConversion || if (options->needsColorSpaceConversion || options->srcAlphaMode != options->dstAlphaMode) {
options->srcAlphaMode != options->dstAlphaMode) {
stepsMask |= kUnpremultiplyStep; stepsMask |= kUnpremultiplyStep;
} }
} }
@ -471,9 +466,9 @@ namespace dawn::native {
stepsMask |= kDecodeToLinearStep; stepsMask |= kDecodeToLinearStep;
const float* decodingParams = options->srcTransferFunctionParameters; const float* decodingParams = options->srcTransferFunctionParameters;
uniformData.gammaDecodingParams = { uniformData.gammaDecodingParams = {decodingParams[0], decodingParams[1], decodingParams[2],
decodingParams[0], decodingParams[1], decodingParams[2], decodingParams[3], decodingParams[3], decodingParams[4], decodingParams[5],
decodingParams[4], decodingParams[5], decodingParams[6]}; decodingParams[6]};
stepsMask |= kConvertToDstGamutStep; stepsMask |= kConvertToDstGamutStep;
const float* matrix = options->conversionMatrix; const float* matrix = options->conversionMatrix;
@ -495,14 +490,13 @@ namespace dawn::native {
stepsMask |= kEncodeToGammaStep; stepsMask |= kEncodeToGammaStep;
const float* encodingParams = options->dstTransferFunctionParameters; const float* encodingParams = options->dstTransferFunctionParameters;
uniformData.gammaEncodingParams = { uniformData.gammaEncodingParams = {encodingParams[0], encodingParams[1], encodingParams[2],
encodingParams[0], encodingParams[1], encodingParams[2], encodingParams[3], encodingParams[3], encodingParams[4], encodingParams[5],
encodingParams[4], encodingParams[5], encodingParams[6]}; encodingParams[6]};
} }
if (options->dstAlphaMode == wgpu::AlphaMode::Premultiplied) { if (options->dstAlphaMode == wgpu::AlphaMode::Premultiplied) {
if (options->needsColorSpaceConversion || if (options->needsColorSpaceConversion || options->srcAlphaMode != options->dstAlphaMode) {
options->srcAlphaMode != options->dstAlphaMode) {
stepsMask |= kPremultiplyStep; stepsMask |= kPremultiplyStep;
} }
} }
@ -531,8 +525,8 @@ namespace dawn::native {
Ref<BufferBase> uniformBuffer; Ref<BufferBase> uniformBuffer;
DAWN_TRY_ASSIGN( DAWN_TRY_ASSIGN(
uniformBuffer, uniformBuffer,
utils::CreateBufferFromData( utils::CreateBufferFromData(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform,
device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform, {uniformData})); {uniformData}));
// Prepare binding 1 resource: sampler // Prepare binding 1 resource: sampler
// Use default configuration, filterMode set to Nearest for min and mag. // Use default configuration, filterMode set to Nearest for min and mag.
@ -551,8 +545,8 @@ namespace dawn::native {
// Create bind group after all binding entries are set. // Create bind group after all binding entries are set.
Ref<BindGroupBase> bindGroup; Ref<BindGroupBase> bindGroup;
DAWN_TRY_ASSIGN(bindGroup, utils::MakeBindGroup( DAWN_TRY_ASSIGN(bindGroup,
device, layout, utils::MakeBindGroup(device, layout,
{{0, uniformBuffer}, {1, sampler}, {2, srcTextureView}})); {{0, uniformBuffer}, {1, sampler}, {2, srcTextureView}}));
// Create command encoder. // Create command encoder.
@ -567,8 +561,7 @@ namespace dawn::native {
dstTextureViewDesc.arrayLayerCount = 1; dstTextureViewDesc.arrayLayerCount = 1;
Ref<TextureViewBase> dstView; Ref<TextureViewBase> dstView;
DAWN_TRY_ASSIGN(dstView, DAWN_TRY_ASSIGN(dstView, device->CreateTextureView(destination->texture, &dstTextureViewDesc));
device->CreateTextureView(destination->texture, &dstTextureViewDesc));
// Prepare render pass color attachment descriptor. // Prepare render pass color attachment descriptor.
RenderPassColorAttachment colorAttachmentDesc; RenderPassColorAttachment colorAttachmentDesc;

View File

@ -26,11 +26,9 @@
namespace dawn::native { namespace dawn::native {
CreatePipelineAsyncCallbackTaskBase::CreatePipelineAsyncCallbackTaskBase( CreatePipelineAsyncCallbackTaskBase::CreatePipelineAsyncCallbackTaskBase(std::string errorMessage,
std::string errorMessage,
void* userdata) void* userdata)
: mErrorMessage(errorMessage), mUserData(userdata) { : mErrorMessage(errorMessage), mUserData(userdata) {}
}
CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask( CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask(
Ref<ComputePipelineBase> pipeline, Ref<ComputePipelineBase> pipeline,
@ -39,8 +37,7 @@ namespace dawn::native {
void* userdata) void* userdata)
: CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata), : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
mPipeline(std::move(pipeline)), mPipeline(std::move(pipeline)),
mCreateComputePipelineAsyncCallback(callback) { mCreateComputePipelineAsyncCallback(callback) {}
}
void CreateComputePipelineAsyncCallbackTask::Finish() { void CreateComputePipelineAsyncCallbackTask::Finish() {
ASSERT(mCreateComputePipelineAsyncCallback != nullptr); ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
@ -75,8 +72,7 @@ namespace dawn::native {
void* userdata) void* userdata)
: CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata), : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
mPipeline(std::move(pipeline)), mPipeline(std::move(pipeline)),
mCreateRenderPipelineAsyncCallback(callback) { mCreateRenderPipelineAsyncCallback(callback) {}
}
void CreateRenderPipelineAsyncCallbackTask::Finish() { void CreateRenderPipelineAsyncCallbackTask::Finish() {
ASSERT(mCreateRenderPipelineAsyncCallback != nullptr); ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
@ -119,8 +115,7 @@ namespace dawn::native {
DeviceBase* device = mComputePipeline->GetDevice(); DeviceBase* device = mComputePipeline->GetDevice();
TRACE_EVENT_FLOW_END1(device->GetPlatform(), General, TRACE_EVENT_FLOW_END1(device->GetPlatform(), General,
"CreateComputePipelineAsyncTask::RunAsync", this, "label", "CreateComputePipelineAsyncTask::RunAsync", this, "label", eventLabel);
eventLabel);
TRACE_EVENT1(device->GetPlatform(), General, "CreateComputePipelineAsyncTask::Run", "label", TRACE_EVENT1(device->GetPlatform(), General, "CreateComputePipelineAsyncTask::Run", "label",
eventLabel); eventLabel);
@ -139,8 +134,7 @@ namespace dawn::native {
std::unique_ptr<CreateComputePipelineAsyncTask> task) { std::unique_ptr<CreateComputePipelineAsyncTask> task) {
DeviceBase* device = task->mComputePipeline->GetDevice(); DeviceBase* device = task->mComputePipeline->GetDevice();
const char* eventLabel = const char* eventLabel = utils::GetLabelForTrace(task->mComputePipeline->GetLabel().c_str());
utils::GetLabelForTrace(task->mComputePipeline->GetLabel().c_str());
// Using "taskPtr = std::move(task)" causes compilation error while it should be supported // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
// since C++14: // since C++14:
@ -170,8 +164,8 @@ namespace dawn::native {
const char* eventLabel = utils::GetLabelForTrace(mRenderPipeline->GetLabel().c_str()); const char* eventLabel = utils::GetLabelForTrace(mRenderPipeline->GetLabel().c_str());
DeviceBase* device = mRenderPipeline->GetDevice(); DeviceBase* device = mRenderPipeline->GetDevice();
TRACE_EVENT_FLOW_END1(device->GetPlatform(), General, TRACE_EVENT_FLOW_END1(device->GetPlatform(), General, "CreateRenderPipelineAsyncTask::RunAsync",
"CreateRenderPipelineAsyncTask::RunAsync", this, "label", eventLabel); this, "label", eventLabel);
TRACE_EVENT1(device->GetPlatform(), General, "CreateRenderPipelineAsyncTask::Run", "label", TRACE_EVENT1(device->GetPlatform(), General, "CreateRenderPipelineAsyncTask::Run", "label",
eventLabel); eventLabel);
@ -182,12 +176,10 @@ namespace dawn::native {
errorMessage = maybeError.AcquireError()->GetMessage(); errorMessage = maybeError.AcquireError()->GetMessage();
} }
device->AddRenderPipelineAsyncCallbackTask(mRenderPipeline, errorMessage, mCallback, device->AddRenderPipelineAsyncCallbackTask(mRenderPipeline, errorMessage, mCallback, mUserdata);
mUserdata);
} }
void CreateRenderPipelineAsyncTask::RunAsync( void CreateRenderPipelineAsyncTask::RunAsync(std::unique_ptr<CreateRenderPipelineAsyncTask> task) {
std::unique_ptr<CreateRenderPipelineAsyncTask> task) {
DeviceBase* device = task->mRenderPipeline->GetDevice(); DeviceBase* device = task->mRenderPipeline->GetDevice();
const char* eventLabel = utils::GetLabelForTrace(task->mRenderPipeline->GetLabel().c_str()); const char* eventLabel = utils::GetLabelForTrace(task->mRenderPipeline->GetLabel().c_str());

View File

@ -30,8 +30,7 @@ namespace dawn::native {
namespace { namespace {
struct ComboDeprecatedDawnDeviceDescriptor : DeviceDescriptor { struct ComboDeprecatedDawnDeviceDescriptor : DeviceDescriptor {
explicit ComboDeprecatedDawnDeviceDescriptor( explicit ComboDeprecatedDawnDeviceDescriptor(const DawnDeviceDescriptor* deviceDescriptor) {
const DawnDeviceDescriptor* deviceDescriptor) {
dawn::WarningLog() << "DawnDeviceDescriptor is deprecated. Please use " dawn::WarningLog() << "DawnDeviceDescriptor is deprecated. Please use "
"WGPUDeviceDescriptor instead."; "WGPUDeviceDescriptor instead.";
@ -40,12 +39,9 @@ namespace dawn::native {
if (deviceDescriptor != nullptr) { if (deviceDescriptor != nullptr) {
desc->nextInChain = &mTogglesDesc; desc->nextInChain = &mTogglesDesc;
mTogglesDesc.forceEnabledToggles = deviceDescriptor->forceEnabledToggles.data(); mTogglesDesc.forceEnabledToggles = deviceDescriptor->forceEnabledToggles.data();
mTogglesDesc.forceEnabledTogglesCount = mTogglesDesc.forceEnabledTogglesCount = deviceDescriptor->forceEnabledToggles.size();
deviceDescriptor->forceEnabledToggles.size(); mTogglesDesc.forceDisabledToggles = deviceDescriptor->forceDisabledToggles.data();
mTogglesDesc.forceDisabledToggles = mTogglesDesc.forceDisabledTogglesCount = deviceDescriptor->forceDisabledToggles.size();
deviceDescriptor->forceDisabledToggles.data();
mTogglesDesc.forceDisabledTogglesCount =
deviceDescriptor->forceDisabledToggles.size();
desc->requiredLimits = desc->requiredLimits =
reinterpret_cast<const RequiredLimits*>(deviceDescriptor->requiredLimits); reinterpret_cast<const RequiredLimits*>(deviceDescriptor->requiredLimits);
@ -91,8 +87,7 @@ namespace dawn::native {
mImpl = nullptr; mImpl = nullptr;
} }
Adapter::Adapter(const Adapter& other) : Adapter(other.mImpl) { Adapter::Adapter(const Adapter& other) : Adapter(other.mImpl) {}
}
Adapter& Adapter::operator=(const Adapter& other) { Adapter& Adapter::operator=(const Adapter& other) {
if (this != &other) { if (this != &other) {
@ -185,14 +180,12 @@ namespace dawn::native {
// AdapterDiscoverOptionsBase // AdapterDiscoverOptionsBase
AdapterDiscoveryOptionsBase::AdapterDiscoveryOptionsBase(WGPUBackendType type) AdapterDiscoveryOptionsBase::AdapterDiscoveryOptionsBase(WGPUBackendType type)
: backendType(type) { : backendType(type) {}
}
// Instance // Instance
Instance::Instance(const WGPUInstanceDescriptor* desc) Instance::Instance(const WGPUInstanceDescriptor* desc)
: mImpl(APICreateInstance(reinterpret_cast<const InstanceDescriptor*>(desc))) { : mImpl(APICreateInstance(reinterpret_cast<const InstanceDescriptor*>(desc))) {}
}
Instance::~Instance() { Instance::~Instance() {
if (mImpl != nullptr) { if (mImpl != nullptr) {
@ -283,8 +276,7 @@ namespace dawn::native {
// ExternalImageDescriptor // ExternalImageDescriptor
ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageType type) : mType(type) { ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageType type) : mType(type) {}
}
ExternalImageType ExternalImageDescriptor::GetType() const { ExternalImageType ExternalImageDescriptor::GetType() const {
return mType; return mType;
@ -292,8 +284,7 @@ namespace dawn::native {
// ExternalImageExportInfo // ExternalImageExportInfo
ExternalImageExportInfo::ExternalImageExportInfo(ExternalImageType type) : mType(type) { ExternalImageExportInfo::ExternalImageExportInfo(ExternalImageType type) : mType(type) {}
}
ExternalImageType ExternalImageExportInfo::GetType() const { ExternalImageType ExternalImageExportInfo::GetType() const {
return mType; return mType;

View File

@ -104,18 +104,14 @@ namespace dawn::native {
// may already disposed, we must keep a local copy in the CallbackTask. // may already disposed, we must keep a local copy in the CallbackTask.
} }
void Finish() override { void Finish() override { mCallback(mLoggingType, mMessage.c_str(), mUserdata); }
mCallback(mLoggingType, mMessage.c_str(), mUserdata);
}
void HandleShutDown() override { void HandleShutDown() override {
// Do the logging anyway // Do the logging anyway
mCallback(mLoggingType, mMessage.c_str(), mUserdata); mCallback(mLoggingType, mMessage.c_str(), mUserdata);
} }
void HandleDeviceLoss() override { void HandleDeviceLoss() override { mCallback(mLoggingType, mMessage.c_str(), mUserdata); }
mCallback(mLoggingType, mMessage.c_str(), mUserdata);
}
private: private:
// As all deferred callback tasks will be triggered before modifying the registered // As all deferred callback tasks will be triggered before modifying the registered
@ -127,8 +123,7 @@ namespace dawn::native {
void* mUserdata; void* mUserdata;
}; };
ResultOrError<Ref<PipelineLayoutBase>> ResultOrError<Ref<PipelineLayoutBase>> ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
DeviceBase* device, DeviceBase* device,
const ComputePipelineDescriptor& descriptor, const ComputePipelineDescriptor& descriptor,
ComputePipelineDescriptor* outDescriptor) { ComputePipelineDescriptor* outDescriptor) {
@ -150,8 +145,7 @@ namespace dawn::native {
return layoutRef; return layoutRef;
} }
ResultOrError<Ref<PipelineLayoutBase>> ResultOrError<Ref<PipelineLayoutBase>> ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
DeviceBase* device, DeviceBase* device,
const RenderPipelineDescriptor& descriptor, const RenderPipelineDescriptor& descriptor,
RenderPipelineDescriptor* outDescriptor) { RenderPipelineDescriptor* outDescriptor) {
@ -161,9 +155,9 @@ namespace dawn::native {
if (descriptor.layout == nullptr) { if (descriptor.layout == nullptr) {
// Ref will keep the pipeline layout alive until the end of the function where // Ref will keep the pipeline layout alive until the end of the function where
// the pipeline will take another reference. // the pipeline will take another reference.
DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault( DAWN_TRY_ASSIGN(layoutRef,
device, GetRenderStagesAndSetPlaceholderShader( PipelineLayoutBase::CreateDefault(
device, &descriptor))); device, GetRenderStagesAndSetPlaceholderShader(device, &descriptor)));
outDescriptor->layout = layoutRef.Get(); outDescriptor->layout = layoutRef.Get();
} }
@ -568,8 +562,7 @@ namespace dawn::native {
return returnValue; return returnValue;
} }
ErrorScope scope = mErrorScopeStack->Pop(); ErrorScope scope = mErrorScopeStack->Pop();
callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(), callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(), userdata);
userdata);
return returnValue; return returnValue;
} }
@ -731,8 +724,7 @@ namespace dawn::native {
if (iter != mCaches->bindGroupLayouts.end()) { if (iter != mCaches->bindGroupLayouts.end()) {
result = *iter; result = *iter;
} else { } else {
DAWN_TRY_ASSIGN(result, DAWN_TRY_ASSIGN(result, CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
result->SetIsCachedReference(); result->SetIsCachedReference();
result->SetContentHash(blueprintHash); result->SetContentHash(blueprintHash);
mCaches->bindGroupLayouts.insert(result.Get()); mCaches->bindGroupLayouts.insert(result.Get());
@ -921,8 +913,8 @@ namespace dawn::native {
// now, so call validate. Most of |ValidateShaderModuleDescriptor| is parsing, but // now, so call validate. Most of |ValidateShaderModuleDescriptor| is parsing, but
// we can consider splitting it if additional validation is added. // we can consider splitting it if additional validation is added.
ASSERT(!IsValidationEnabled()); ASSERT(!IsValidationEnabled());
DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor, parseResult, DAWN_TRY(
compilationMessages)); ValidateShaderModuleDescriptor(this, descriptor, parseResult, compilationMessages));
} }
DAWN_TRY_ASSIGN(result, CreateShaderModuleImpl(descriptor, parseResult)); DAWN_TRY_ASSIGN(result, CreateShaderModuleImpl(descriptor, parseResult));
result->SetIsCachedReference(); result->SetIsCachedReference();
@ -939,8 +931,7 @@ namespace dawn::native {
ASSERT(removedCount == 1); ASSERT(removedCount == 1);
} }
Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState( Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(AttachmentStateBlueprint* blueprint) {
AttachmentStateBlueprint* blueprint) {
auto iter = mCaches->attachmentStates.find(blueprint); auto iter = mCaches->attachmentStates.find(blueprint);
if (iter != mCaches->attachmentStates.end()) { if (iter != mCaches->attachmentStates.end()) {
return static_cast<AttachmentState*>(*iter); return static_cast<AttachmentState*>(*iter);
@ -981,8 +972,8 @@ namespace dawn::native {
BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) { BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
Ref<BindGroupBase> result; Ref<BindGroupBase> result;
if (ConsumedError(CreateBindGroup(descriptor), &result, "calling %s.CreateBindGroup(%s).", if (ConsumedError(CreateBindGroup(descriptor), &result, "calling %s.CreateBindGroup(%s).", this,
this, descriptor)) { descriptor)) {
return BindGroupBase::MakeError(this); return BindGroupBase::MakeError(this);
} }
return result.Detach(); return result.Detach();
@ -1005,8 +996,7 @@ namespace dawn::native {
} }
return result.Detach(); return result.Detach();
} }
CommandEncoder* DeviceBase::APICreateCommandEncoder( CommandEncoder* DeviceBase::APICreateCommandEncoder(const CommandEncoderDescriptor* descriptor) {
const CommandEncoderDescriptor* descriptor) {
Ref<CommandEncoder> result; Ref<CommandEncoder> result;
if (ConsumedError(CreateCommandEncoder(descriptor), &result, if (ConsumedError(CreateCommandEncoder(descriptor), &result,
"calling %s.CreateCommandEncoder(%s).", this, descriptor)) { "calling %s.CreateCommandEncoder(%s).", this, descriptor)) {
@ -1055,8 +1045,8 @@ namespace dawn::native {
} }
QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) { QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
Ref<QuerySetBase> result; Ref<QuerySetBase> result;
if (ConsumedError(CreateQuerySet(descriptor), &result, "calling %s.CreateQuerySet(%s).", if (ConsumedError(CreateQuerySet(descriptor), &result, "calling %s.CreateQuerySet(%s).", this,
this, descriptor)) { descriptor)) {
return QuerySetBase::MakeError(this); return QuerySetBase::MakeError(this);
} }
return result.Detach(); return result.Detach();
@ -1265,9 +1255,8 @@ namespace dawn::native {
void DeviceBase::EmitLog(WGPULoggingType loggingType, const char* message) { void DeviceBase::EmitLog(WGPULoggingType loggingType, const char* message) {
if (mLoggingCallback != nullptr) { if (mLoggingCallback != nullptr) {
// Use the thread-safe CallbackTaskManager routine // Use the thread-safe CallbackTaskManager routine
std::unique_ptr<LoggingCallbackTask> callbackTask = std::unique_ptr<LoggingCallbackTask> callbackTask = std::make_unique<LoggingCallbackTask>(
std::make_unique<LoggingCallbackTask>(mLoggingCallback, loggingType, message, mLoggingCallback, loggingType, message, mLoggingUserdata);
mLoggingUserdata);
mCallbackTaskManager->AddCallbackTask(std::move(callbackTask)); mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
} }
} }
@ -1315,8 +1304,8 @@ namespace dawn::native {
const BindGroupDescriptor* descriptor) { const BindGroupDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive()); DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) { if (IsValidationEnabled()) {
DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor), DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor), "validating %s against %s",
"validating %s against %s", descriptor, descriptor->layout); descriptor, descriptor->layout);
} }
return CreateBindGroupImpl(descriptor); return CreateBindGroupImpl(descriptor);
} }
@ -1326,8 +1315,7 @@ namespace dawn::native {
bool allowInternalBinding) { bool allowInternalBinding) {
DAWN_TRY(ValidateIsAlive()); DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) { if (IsValidationEnabled()) {
DAWN_TRY_CONTEXT( DAWN_TRY_CONTEXT(ValidateBindGroupLayoutDescriptor(this, descriptor, allowInternalBinding),
ValidateBindGroupLayoutDescriptor(this, descriptor, allowInternalBinding),
"validating %s", descriptor); "validating %s", descriptor);
} }
return GetOrCreateBindGroupLayout(descriptor); return GetOrCreateBindGroupLayout(descriptor);
@ -1336,8 +1324,7 @@ namespace dawn::native {
ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) { ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive()); DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) { if (IsValidationEnabled()) {
DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s", DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s", descriptor);
descriptor);
} }
Ref<BufferBase> buffer; Ref<BufferBase> buffer;
@ -1390,8 +1377,7 @@ namespace dawn::native {
return CommandEncoder::Create(this, descriptor); return CommandEncoder::Create(this, descriptor);
} }
MaybeError DeviceBase::CreateComputePipelineAsync( MaybeError DeviceBase::CreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
const ComputePipelineDescriptor* descriptor,
WGPUCreateComputePipelineAsyncCallback callback, WGPUCreateComputePipelineAsyncCallback callback,
void* userdata) { void* userdata) {
DAWN_TRY(ValidateIsAlive()); DAWN_TRY(ValidateIsAlive());
@ -1412,8 +1398,8 @@ namespace dawn::native {
GetCachedComputePipeline(uninitializedComputePipeline.Get()); GetCachedComputePipeline(uninitializedComputePipeline.Get());
if (cachedComputePipeline.Get() != nullptr) { if (cachedComputePipeline.Get() != nullptr) {
// TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()), callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()), "",
"", userdata); userdata);
} else { } else {
// Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(), // Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
// where the pipeline object may be initialized asynchronously and the result will be // where the pipeline object may be initialized asynchronously and the result will be
@ -1427,8 +1413,7 @@ namespace dawn::native {
// This function is overwritten with the async version on the backends that supports // This function is overwritten with the async version on the backends that supports
// initializing compute pipelines asynchronously. // initializing compute pipelines asynchronously.
void DeviceBase::InitializeComputePipelineAsyncImpl( void DeviceBase::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
Ref<ComputePipelineBase> computePipeline,
WGPUCreateComputePipelineAsyncCallback callback, WGPUCreateComputePipelineAsyncCallback callback,
void* userdata) { void* userdata) {
Ref<ComputePipelineBase> result; Ref<ComputePipelineBase> result;
@ -1443,15 +1428,14 @@ namespace dawn::native {
} }
std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask = std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask =
std::make_unique<CreateComputePipelineAsyncCallbackTask>( std::make_unique<CreateComputePipelineAsyncCallbackTask>(std::move(result), errorMessage,
std::move(result), errorMessage, callback, userdata); callback, userdata);
mCallbackTaskManager->AddCallbackTask(std::move(callbackTask)); mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
} }
// This function is overwritten with the async version on the backends // This function is overwritten with the async version on the backends
// that supports initializing render pipeline asynchronously // that supports initializing render pipeline asynchronously
void DeviceBase::InitializeRenderPipelineAsyncImpl( void DeviceBase::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
Ref<RenderPipelineBase> renderPipeline,
WGPUCreateRenderPipelineAsyncCallback callback, WGPUCreateRenderPipelineAsyncCallback callback,
void* userdata) { void* userdata) {
Ref<RenderPipelineBase> result; Ref<RenderPipelineBase> result;
@ -1490,12 +1474,10 @@ namespace dawn::native {
return ExternalTextureBase::Create(this, descriptor); return ExternalTextureBase::Create(this, descriptor);
} }
ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet( ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet(const QuerySetDescriptor* descriptor) {
const QuerySetDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive()); DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) { if (IsValidationEnabled()) {
DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s", DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s", descriptor);
descriptor);
} }
return CreateQuerySetImpl(descriptor); return CreateQuerySetImpl(descriptor);
} }
@ -1559,8 +1541,8 @@ namespace dawn::native {
GetCachedRenderPipeline(uninitializedRenderPipeline.Get()); GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
if (cachedRenderPipeline != nullptr) { if (cachedRenderPipeline != nullptr) {
// TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()), callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()), "",
"", userdata); userdata);
} else { } else {
// Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(), // Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
// where the pipeline object may be initialized asynchronously and the result will be // where the pipeline object may be initialized asynchronously and the result will be
@ -1577,8 +1559,7 @@ namespace dawn::native {
DAWN_TRY(ValidateIsAlive()); DAWN_TRY(ValidateIsAlive());
descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor; descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor;
if (IsValidationEnabled()) { if (IsValidationEnabled()) {
DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s", DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s", descriptor);
descriptor);
} }
return GetOrCreateSampler(descriptor); return GetOrCreateSampler(descriptor);
} }
@ -1607,8 +1588,8 @@ namespace dawn::native {
const SwapChainDescriptor* descriptor) { const SwapChainDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive()); DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) { if (IsValidationEnabled()) {
DAWN_TRY_CONTEXT(ValidateSwapChainDescriptor(this, surface, descriptor), DAWN_TRY_CONTEXT(ValidateSwapChainDescriptor(this, surface, descriptor), "validating %s",
"validating %s", descriptor); descriptor);
} }
// TODO(dawn:269): Remove this code path once implementation-based swapchains are removed. // TODO(dawn:269): Remove this code path once implementation-based swapchains are removed.
@ -1637,8 +1618,7 @@ namespace dawn::native {
ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) { ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
DAWN_TRY(ValidateIsAlive()); DAWN_TRY(ValidateIsAlive());
if (IsValidationEnabled()) { if (IsValidationEnabled()) {
DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.", DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.", descriptor);
descriptor);
} }
return CreateTextureImpl(descriptor); return CreateTextureImpl(descriptor);
} }
@ -1771,8 +1751,7 @@ namespace dawn::native {
std::move(pipeline), errorMessage, callback, userdata)); std::move(pipeline), errorMessage, callback, userdata));
} }
void DeviceBase::AddRenderPipelineAsyncCallbackTask( void DeviceBase::AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
Ref<RenderPipelineBase> pipeline,
std::string errorMessage, std::string errorMessage,
WGPUCreateRenderPipelineAsyncCallback callback, WGPUCreateRenderPipelineAsyncCallback callback,
void* userdata) { void* userdata) {
@ -1816,8 +1795,7 @@ namespace dawn::native {
SetLabelImpl(); SetLabelImpl();
} }
void DeviceBase::SetLabelImpl() { void DeviceBase::SetLabelImpl() {}
}
bool DeviceBase::ShouldDuplicateNumWorkgroupsForDispatchIndirect( bool DeviceBase::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
ComputePipelineBase* computePipeline) const { ComputePipelineBase* computePipeline) const {

View File

@ -219,16 +219,14 @@ namespace dawn::native {
MaybeError CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor, MaybeError CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
WGPUCreateRenderPipelineAsyncCallback callback, WGPUCreateRenderPipelineAsyncCallback callback,
void* userdata); void* userdata);
ResultOrError<Ref<SamplerBase>> CreateSampler( ResultOrError<Ref<SamplerBase>> CreateSampler(const SamplerDescriptor* descriptor = nullptr);
const SamplerDescriptor* descriptor = nullptr);
ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule( ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(
const ShaderModuleDescriptor* descriptor, const ShaderModuleDescriptor* descriptor,
OwnedCompilationMessages* compilationMessages = nullptr); OwnedCompilationMessages* compilationMessages = nullptr);
ResultOrError<Ref<SwapChainBase>> CreateSwapChain(Surface* surface, ResultOrError<Ref<SwapChainBase>> CreateSwapChain(Surface* surface,
const SwapChainDescriptor* descriptor); const SwapChainDescriptor* descriptor);
ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* descriptor); ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* descriptor);
ResultOrError<Ref<TextureViewBase>> CreateTextureView( ResultOrError<Ref<TextureViewBase>> CreateTextureView(TextureBase* texture,
TextureBase* texture,
const TextureViewDescriptor* descriptor); const TextureViewDescriptor* descriptor);
// Implementation of API object creation methods. DO NOT use them in a reentrant manner. // Implementation of API object creation methods. DO NOT use them in a reentrant manner.
@ -277,8 +275,7 @@ namespace dawn::native {
BlobCache* GetBlobCache(); BlobCache* GetBlobCache();
virtual ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer( virtual ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) = 0;
size_t size) = 0;
virtual MaybeError CopyFromStagingToBuffer(StagingBufferBase* source, virtual MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
uint64_t sourceOffset, uint64_t sourceOffset,
BufferBase* destination, BufferBase* destination,
@ -380,8 +377,7 @@ namespace dawn::native {
void APISetLabel(const char* label); void APISetLabel(const char* label);
void APIDestroy(); void APIDestroy();
virtual void AppendDebugLayerMessages(ErrorData* error) { virtual void AppendDebugLayerMessages(ErrorData* error) {}
}
protected: protected:
// Constructor used only for mocking and testing. // Constructor used only for mocking and testing.
@ -403,8 +399,7 @@ namespace dawn::native {
virtual ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl( virtual ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
const BindGroupLayoutDescriptor* descriptor, const BindGroupLayoutDescriptor* descriptor,
PipelineCompatibilityToken pipelineCompatibilityToken) = 0; PipelineCompatibilityToken pipelineCompatibilityToken) = 0;
virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl( virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) = 0;
const BufferDescriptor* descriptor) = 0;
virtual ResultOrError<Ref<ExternalTextureBase>> CreateExternalTextureImpl( virtual ResultOrError<Ref<ExternalTextureBase>> CreateExternalTextureImpl(
const ExternalTextureDescriptor* descriptor); const ExternalTextureDescriptor* descriptor);
virtual ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl( virtual ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
@ -445,14 +440,11 @@ namespace dawn::native {
RenderPipelineBase* uninitializedRenderPipeline); RenderPipelineBase* uninitializedRenderPipeline);
Ref<ComputePipelineBase> AddOrGetCachedComputePipeline( Ref<ComputePipelineBase> AddOrGetCachedComputePipeline(
Ref<ComputePipelineBase> computePipeline); Ref<ComputePipelineBase> computePipeline);
Ref<RenderPipelineBase> AddOrGetCachedRenderPipeline( Ref<RenderPipelineBase> AddOrGetCachedRenderPipeline(Ref<RenderPipelineBase> renderPipeline);
Ref<RenderPipelineBase> renderPipeline); virtual void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
virtual void InitializeComputePipelineAsyncImpl(
Ref<ComputePipelineBase> computePipeline,
WGPUCreateComputePipelineAsyncCallback callback, WGPUCreateComputePipelineAsyncCallback callback,
void* userdata); void* userdata);
virtual void InitializeRenderPipelineAsyncImpl( virtual void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
Ref<RenderPipelineBase> renderPipeline,
WGPUCreateRenderPipelineAsyncCallback callback, WGPUCreateRenderPipelineAsyncCallback callback,
void* userdata); void* userdata);

View File

@ -22,13 +22,12 @@
namespace dawn::native { namespace dawn::native {
DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) { DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) {
mRingBuffers.emplace_back(std::unique_ptr<RingBuffer>( mRingBuffers.emplace_back(
new RingBuffer{nullptr, RingBufferAllocator(kRingBufferSize)})); std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, RingBufferAllocator(kRingBufferSize)}));
} }
void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) { void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) {
mReleasedStagingBuffers.Enqueue(std::move(stagingBuffer), mReleasedStagingBuffers.Enqueue(std::move(stagingBuffer), mDevice->GetPendingCommandSerial());
mDevice->GetPendingCommandSerial());
} }
ResultOrError<UploadHandle> DynamicUploader::AllocateInternal(uint64_t allocationSize, ResultOrError<UploadHandle> DynamicUploader::AllocateInternal(uint64_t allocationSize,
@ -120,12 +119,10 @@ namespace dawn::native {
uint64_t offsetAlignment) { uint64_t offsetAlignment) {
ASSERT(offsetAlignment > 0); ASSERT(offsetAlignment > 0);
UploadHandle uploadHandle; UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle, DAWN_TRY_ASSIGN(uploadHandle, AllocateInternal(allocationSize + offsetAlignment - 1, serial));
AllocateInternal(allocationSize + offsetAlignment - 1, serial));
uint64_t additionalOffset = uint64_t additionalOffset =
Align(uploadHandle.startOffset, offsetAlignment) - uploadHandle.startOffset; Align(uploadHandle.startOffset, offsetAlignment) - uploadHandle.startOffset;
uploadHandle.mappedBuffer = uploadHandle.mappedBuffer = static_cast<uint8_t*>(uploadHandle.mappedBuffer) + additionalOffset;
static_cast<uint8_t*>(uploadHandle.mappedBuffer) + additionalOffset;
uploadHandle.startOffset += additionalOffset; uploadHandle.startOffset += additionalOffset;
return uploadHandle; return uploadHandle;
} }

View File

@ -57,8 +57,7 @@ namespace dawn::native {
RingBufferAllocator mAllocator; RingBufferAllocator mAllocator;
}; };
ResultOrError<UploadHandle> AllocateInternal(uint64_t allocationSize, ResultOrError<UploadHandle> AllocateInternal(uint64_t allocationSize, ExecutionSerial serial);
ExecutionSerial serial);
std::vector<std::unique_ptr<RingBuffer>> mRingBuffers; std::vector<std::unique_ptr<RingBuffer>> mRingBuffers;
SerialQueue<ExecutionSerial, std::unique_ptr<StagingBufferBase>> mReleasedStagingBuffers; SerialQueue<ExecutionSerial, std::unique_ptr<StagingBufferBase>> mReleasedStagingBuffers;

View File

@ -25,8 +25,7 @@
namespace dawn::native { namespace dawn::native {
EncodingContext::EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder) EncodingContext::EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder)
: mDevice(device), mTopLevelEncoder(initialEncoder), mCurrentEncoder(initialEncoder) { : mDevice(device), mTopLevelEncoder(initialEncoder), mCurrentEncoder(initialEncoder) {}
}
EncodingContext::~EncodingContext() { EncodingContext::~EncodingContext() {
Destroy(); Destroy();
@ -91,8 +90,7 @@ namespace dawn::native {
void EncodingContext::WillBeginRenderPass() { void EncodingContext::WillBeginRenderPass() {
ASSERT(mCurrentEncoder == mTopLevelEncoder); ASSERT(mCurrentEncoder == mTopLevelEncoder);
if (mDevice->IsValidationEnabled() || if (mDevice->IsValidationEnabled() || mDevice->MayRequireDuplicationOfIndirectParameters()) {
mDevice->MayRequireDuplicationOfIndirectParameters()) {
// When validation is enabled or indirect parameters require duplication, we are going // When validation is enabled or indirect parameters require duplication, we are going
// to want to capture all commands encoded between and including BeginRenderPassCmd and // to want to capture all commands encoded between and including BeginRenderPassCmd and
// EndRenderPassCmd, and defer their sequencing util after we have a chance to insert // EndRenderPassCmd, and defer their sequencing util after we have a chance to insert
@ -120,8 +118,7 @@ namespace dawn::native {
mCurrentEncoder = mTopLevelEncoder; mCurrentEncoder = mTopLevelEncoder;
if (mDevice->IsValidationEnabled() || if (mDevice->IsValidationEnabled() || mDevice->MayRequireDuplicationOfIndirectParameters()) {
mDevice->MayRequireDuplicationOfIndirectParameters()) {
// With validation enabled, commands were committed just before BeginRenderPassCmd was // With validation enabled, commands were committed just before BeginRenderPassCmd was
// encoded by our RenderPassEncoder (see WillBeginRenderPass above). This means // encoded by our RenderPassEncoder (see WillBeginRenderPass above). This means
// mPendingCommands contains only the commands from BeginRenderPassCmd to // mPendingCommands contains only the commands from BeginRenderPassCmd to

View File

@ -59,9 +59,7 @@ namespace dawn::native {
} }
template <typename... Args> template <typename... Args>
inline bool ConsumedError(MaybeError maybeError, inline bool ConsumedError(MaybeError maybeError, const char* formatStr, const Args&... args) {
const char* formatStr,
const Args&... args) {
if (DAWN_UNLIKELY(maybeError.IsError())) { if (DAWN_UNLIKELY(maybeError.IsError())) {
std::unique_ptr<ErrorData> error = maybeError.AcquireError(); std::unique_ptr<ErrorData> error = maybeError.AcquireError();
if (error->GetType() == InternalErrorType::Validation) { if (error->GetType() == InternalErrorType::Validation) {
@ -70,8 +68,8 @@ namespace dawn::native {
if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) { if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
error->AppendContext(std::move(out)); error->AppendContext(std::move(out));
} else { } else {
error->AppendContext(absl::StrFormat( error->AppendContext(
"[Failed to format error message: \"%s\"].", formatStr)); absl::StrFormat("[Failed to format error message: \"%s\"].", formatStr));
} }
} }
HandleError(std::move(error)); HandleError(std::move(error));
@ -83,8 +81,7 @@ namespace dawn::native {
inline bool CheckCurrentEncoder(const ApiObjectBase* encoder) { inline bool CheckCurrentEncoder(const ApiObjectBase* encoder) {
if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) { if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
if (mDestroyed) { if (mDestroyed) {
HandleError( HandleError(DAWN_FORMAT_VALIDATION_ERROR("Recording in a destroyed %s.", encoder));
DAWN_FORMAT_VALIDATION_ERROR("Recording in a destroyed %s.", encoder));
} else if (mCurrentEncoder != mTopLevelEncoder) { } else if (mCurrentEncoder != mTopLevelEncoder) {
// The top level encoder was used when a pass encoder was current. // The top level encoder was used when a pass encoder was current.
HandleError(DAWN_FORMAT_VALIDATION_ERROR( HandleError(DAWN_FORMAT_VALIDATION_ERROR(

View File

@ -36,21 +36,16 @@ namespace dawn::native {
class Iterator final { class Iterator final {
public: public:
explicit Iterator(const typename BitSetIterator<N, U>::Iterator& iter) : mIter(iter) { explicit Iterator(const typename BitSetIterator<N, U>::Iterator& iter) : mIter(iter) {}
}
Iterator& operator++() { Iterator& operator++() {
++mIter; ++mIter;
return *this; return *this;
} }
bool operator==(const Iterator& other) const { bool operator==(const Iterator& other) const { return mIter == other.mIter; }
return mIter == other.mIter;
}
bool operator!=(const Iterator& other) const { bool operator!=(const Iterator& other) const { return mIter != other.mIter; }
return mIter != other.mIter;
}
T operator*() const { T operator*() const {
U value = *mIter; U value = *mIter;
@ -61,13 +56,9 @@ namespace dawn::native {
typename BitSetIterator<N, U>::Iterator mIter; typename BitSetIterator<N, U>::Iterator mIter;
}; };
Iterator begin() const { Iterator begin() const { return Iterator(mBitSetIterator.begin()); }
return Iterator(mBitSetIterator.begin());
}
Iterator end() const { Iterator end() const { return Iterator(mBitSetIterator.end()); }
return Iterator(mBitSetIterator.end());
}
private: private:
BitSetIterator<N, U> mBitSetIterator; BitSetIterator<N, U> mBitSetIterator;

View File

@ -33,8 +33,7 @@ namespace dawn::native {
} }
ErrorData::ErrorData(InternalErrorType type, std::string message) ErrorData::ErrorData(InternalErrorType type, std::string message)
: mType(type), mMessage(std::move(message)) { : mType(type), mMessage(std::move(message)) {}
}
void ErrorData::AppendBacktrace(const char* file, const char* function, int line) { void ErrorData::AppendBacktrace(const char* file, const char* function, int line) {
BacktraceRecord record; BacktraceRecord record;
@ -95,8 +94,8 @@ namespace dawn::native {
// stack trace for debugging purposes. // stack trace for debugging purposes.
if (mContexts.empty() || mType != InternalErrorType::Validation) { if (mContexts.empty() || mType != InternalErrorType::Validation) {
for (const auto& callsite : mBacktrace) { for (const auto& callsite : mBacktrace) {
ss << " at " << callsite.function << " (" << callsite.file << ":" ss << " at " << callsite.function << " (" << callsite.file << ":" << callsite.line
<< callsite.line << ")\n"; << ")\n";
} }
} }

View File

@ -35,8 +35,10 @@ namespace dawn::native {
class [[nodiscard]] ErrorData { class [[nodiscard]] ErrorData {
public: public:
[[nodiscard]] static std::unique_ptr<ErrorData> Create( [[nodiscard]] static std::unique_ptr<ErrorData> Create(InternalErrorType type,
InternalErrorType type, std::string message, const char* file, const char* function, std::string message,
const char* file,
const char* function,
int line); int line);
ErrorData(InternalErrorType type, std::string message); ErrorData(InternalErrorType type, std::string message);

View File

@ -35,8 +35,7 @@ namespace dawn::native {
} // namespace } // namespace
ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter) ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter)
: mMatchedErrorType(ErrorFilterToErrorType(errorFilter)) { : mMatchedErrorType(ErrorFilterToErrorType(errorFilter)) {}
}
wgpu::ErrorType ErrorScope::GetErrorType() const { wgpu::ErrorType ErrorScope::GetErrorType() const {
return mCapturedError; return mCapturedError;

View File

@ -37,12 +37,12 @@ namespace dawn::native {
textureView->GetDimension()); textureView->GetDimension());
DAWN_INVALID_IF(textureView->GetLevelCount() > 1, DAWN_INVALID_IF(textureView->GetLevelCount() > 1,
"The external texture plane (%s) mip level count (%u) is not 1.", "The external texture plane (%s) mip level count (%u) is not 1.", textureView,
textureView, textureView->GetLevelCount()); textureView->GetLevelCount());
DAWN_INVALID_IF(textureView->GetTexture()->GetSampleCount() != 1, DAWN_INVALID_IF(textureView->GetTexture()->GetSampleCount() != 1,
"The external texture plane (%s) sample count (%u) is not one.", "The external texture plane (%s) sample count (%u) is not one.", textureView,
textureView, textureView->GetTexture()->GetSampleCount()); textureView->GetTexture()->GetSampleCount());
return {}; return {};
} }
@ -118,8 +118,7 @@ namespace dawn::native {
} }
ExternalTextureBase::ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag) ExternalTextureBase::ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
: ApiObjectBase(device, tag) { : ApiObjectBase(device, tag) {}
}
ExternalTextureBase::~ExternalTextureBase() = default; ExternalTextureBase::~ExternalTextureBase() = default;
@ -191,8 +190,8 @@ namespace dawn::native {
return {}; return {};
} }
const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& ExternalTextureBase::GetTextureViews()
ExternalTextureBase::GetTextureViews() const { const {
return mTextureViews; return mTextureViews;
} }

View File

@ -82,8 +82,7 @@ namespace dawn::native {
"dawn_internal_usages.md"}, "dawn_internal_usages.md"},
&WGPUDeviceProperties::dawnInternalUsages}, &WGPUDeviceProperties::dawnInternalUsages},
{Feature::MultiPlanarFormats, {Feature::MultiPlanarFormats,
{"multiplanar-formats", {"multiplanar-formats", "Import and use multi-planar texture formats with per plane views",
"Import and use multi-planar texture formats with per plane views",
"https://bugs.chromium.org/p/dawn/issues/detail?id=551"}, "https://bugs.chromium.org/p/dawn/issues/detail?id=551"},
&WGPUDeviceProperties::multiPlanarFormats}, &WGPUDeviceProperties::multiPlanarFormats},
{Feature::DawnNative, {Feature::DawnNative,
@ -250,8 +249,8 @@ namespace dawn::native {
} }
// TODO(dawn:550): Remove this fallback logic when Chromium is updated. // TODO(dawn:550): Remove this fallback logic when Chromium is updated.
constexpr std::array<std::pair<const char*, const char*>, 6> constexpr std::array<std::pair<const char*, const char*>, 6> kReplacementsForDeprecatedNames = {
kReplacementsForDeprecatedNames = {{ {
{"texture_compression_bc", "texture-compression-bc"}, {"texture_compression_bc", "texture-compression-bc"},
{"depth_clamping", "depth-clamping"}, {"depth_clamping", "depth-clamping"},
{"pipeline_statistics_query", "pipeline-statistics-query"}, {"pipeline_statistics_query", "pipeline-statistics-query"},

View File

@ -142,8 +142,7 @@ namespace dawn::native {
FormatIndex ComputeFormatIndex(wgpu::TextureFormat format) { FormatIndex ComputeFormatIndex(wgpu::TextureFormat format) {
// This takes advantage of overflows to make the index of TextureFormat::Undefined outside // This takes advantage of overflows to make the index of TextureFormat::Undefined outside
// of the range of the FormatTable. // of the range of the FormatTable.
static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 > static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 > kKnownFormatCount);
kKnownFormatCount);
return static_cast<FormatIndex>(static_cast<uint32_t>(format) - 1); return static_cast<FormatIndex>(static_cast<uint32_t>(format) - 1);
} }
@ -284,8 +283,8 @@ namespace dawn::native {
}; };
auto AddCompressedFormat = auto AddCompressedFormat =
[&AddFormat](wgpu::TextureFormat format, uint32_t byteSize, uint32_t width, [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize, uint32_t width, uint32_t height,
uint32_t height, bool isSupported, uint8_t componentCount, bool isSupported, uint8_t componentCount,
wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) { wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
Format internalFormat; Format internalFormat;
internalFormat.format = format; internalFormat.format = format;
@ -315,11 +314,10 @@ namespace dawn::native {
AddFormat(internalFormat); AddFormat(internalFormat);
}; };
auto AddMultiAspectFormat = [&AddFormat, &table](wgpu::TextureFormat format, Aspect aspects, auto AddMultiAspectFormat =
wgpu::TextureFormat firstFormat, [&AddFormat, &table](wgpu::TextureFormat format, Aspect aspects,
wgpu::TextureFormat secondFormat, wgpu::TextureFormat firstFormat, wgpu::TextureFormat secondFormat,
bool isRenderable, bool isSupported, bool isRenderable, bool isSupported, bool supportsMultisample,
bool supportsMultisample,
uint8_t componentCount) { uint8_t componentCount) {
Format internalFormat; Format internalFormat;
internalFormat.format = format; internalFormat.format = format;

View File

@ -32,8 +32,7 @@ namespace dawn::native {
IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::IndexedIndirectBufferValidationInfo( IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::IndexedIndirectBufferValidationInfo(
BufferBase* indirectBuffer) BufferBase* indirectBuffer)
: mIndirectBuffer(indirectBuffer) { : mIndirectBuffer(indirectBuffer) {}
}
void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddIndirectDraw( void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddIndirectDraw(
uint32_t maxDrawCallsPerIndirectValidationBatch, uint32_t maxDrawCallsPerIndirectValidationBatch,
@ -99,8 +98,8 @@ namespace dawn::native {
IndirectValidationBatch& batch = *it; IndirectValidationBatch& batch = *it;
uint64_t min = std::min(newBatch.minOffset, batch.minOffset); uint64_t min = std::min(newBatch.minOffset, batch.minOffset);
uint64_t max = std::max(newBatch.maxOffset, batch.maxOffset); uint64_t max = std::max(newBatch.maxOffset, batch.maxOffset);
if (max - min <= maxBatchOffsetRange && batch.draws.size() + newBatch.draws.size() <= if (max - min <= maxBatchOffsetRange &&
maxDrawCallsPerIndirectValidationBatch) { batch.draws.size() + newBatch.draws.size() <= maxDrawCallsPerIndirectValidationBatch) {
// This batch fits within the limits of an existing batch. Merge it. // This batch fits within the limits of an existing batch. Merge it.
batch.minOffset = min; batch.minOffset = min;
batch.maxOffset = max; batch.maxOffset = max;
@ -124,8 +123,7 @@ namespace dawn::native {
IndirectDrawMetadata::IndirectDrawMetadata(const CombinedLimits& limits) IndirectDrawMetadata::IndirectDrawMetadata(const CombinedLimits& limits)
: mMaxBatchOffsetRange(ComputeMaxIndirectValidationBatchOffsetRange(limits)), : mMaxBatchOffsetRange(ComputeMaxIndirectValidationBatchOffsetRange(limits)),
mMaxDrawCallsPerBatch(ComputeMaxDrawCallsPerIndirectValidationBatch(limits)) { mMaxDrawCallsPerBatch(ComputeMaxDrawCallsPerIndirectValidationBatch(limits)) {}
}
IndirectDrawMetadata::~IndirectDrawMetadata() = default; IndirectDrawMetadata::~IndirectDrawMetadata() = default;
@ -213,16 +211,14 @@ namespace dawn::native {
bool IndirectDrawMetadata::IndexedIndirectConfig::operator<( bool IndirectDrawMetadata::IndexedIndirectConfig::operator<(
const IndexedIndirectConfig& other) const { const IndexedIndirectConfig& other) const {
return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance, return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
drawType) < std::tie(other.inputIndirectBuffer, drawType) < std::tie(other.inputIndirectBuffer, other.numIndexBufferElements,
other.numIndexBufferElements,
other.duplicateBaseVertexInstance, other.drawType); other.duplicateBaseVertexInstance, other.drawType);
} }
bool IndirectDrawMetadata::IndexedIndirectConfig::operator==( bool IndirectDrawMetadata::IndexedIndirectConfig::operator==(
const IndexedIndirectConfig& other) const { const IndexedIndirectConfig& other) const {
return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance, return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
drawType) == std::tie(other.inputIndirectBuffer, drawType) == std::tie(other.inputIndirectBuffer, other.numIndexBufferElements,
other.numIndexBufferElements,
other.duplicateBaseVertexInstance, other.drawType); other.duplicateBaseVertexInstance, other.drawType);
} }

View File

@ -178,15 +178,13 @@ namespace dawn::native {
} }
)"; )";
ResultOrError<ComputePipelineBase*> GetOrCreateRenderValidationPipeline( ResultOrError<ComputePipelineBase*> GetOrCreateRenderValidationPipeline(DeviceBase* device) {
DeviceBase* device) {
InternalPipelineStore* store = device->GetInternalPipelineStore(); InternalPipelineStore* store = device->GetInternalPipelineStore();
if (store->renderValidationPipeline == nullptr) { if (store->renderValidationPipeline == nullptr) {
// Create compute shader module if not cached before. // Create compute shader module if not cached before.
if (store->renderValidationShader == nullptr) { if (store->renderValidationShader == nullptr) {
DAWN_TRY_ASSIGN( DAWN_TRY_ASSIGN(store->renderValidationShader,
store->renderValidationShader,
utils::CreateShaderModule(device, sRenderValidationShaderSource)); utils::CreateShaderModule(device, sRenderValidationShaderSource));
} }
@ -196,16 +194,14 @@ namespace dawn::native {
utils::MakeBindGroupLayout( utils::MakeBindGroupLayout(
device, device,
{ {
{0, wgpu::ShaderStage::Compute, {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage},
wgpu::BufferBindingType::ReadOnlyStorage},
{1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding}, {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
{2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}, {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
}, },
/* allowInternalBinding */ true)); /* allowInternalBinding */ true));
Ref<PipelineLayoutBase> pipelineLayout; Ref<PipelineLayoutBase> pipelineLayout;
DAWN_TRY_ASSIGN(pipelineLayout, DAWN_TRY_ASSIGN(pipelineLayout, utils::MakeBasicPipelineLayout(device, bindGroupLayout));
utils::MakeBasicPipelineLayout(device, bindGroupLayout));
ComputePipelineDescriptor computePipelineDescriptor = {}; ComputePipelineDescriptor computePipelineDescriptor = {};
computePipelineDescriptor.layout = pipelineLayout.Get(); computePipelineDescriptor.layout = pipelineLayout.Get();
@ -272,15 +268,13 @@ namespace dawn::native {
return {}; return {};
} }
const uint64_t maxStorageBufferBindingSize = const uint64_t maxStorageBufferBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
device->GetLimits().v1.maxStorageBufferBindingSize;
const uint32_t minStorageBufferOffsetAlignment = const uint32_t minStorageBufferOffsetAlignment =
device->GetLimits().v1.minStorageBufferOffsetAlignment; device->GetLimits().v1.minStorageBufferOffsetAlignment;
for (auto& [config, validationInfo] : bufferInfoMap) { for (auto& [config, validationInfo] : bufferInfoMap) {
const uint64_t indirectDrawCommandSize = const uint64_t indirectDrawCommandSize =
config.drawType == IndirectDrawMetadata::DrawType::Indexed config.drawType == IndirectDrawMetadata::DrawType::Indexed ? kDrawIndexedIndirectSize
? kDrawIndexedIndirectSize
: kDrawIndirectSize; : kDrawIndirectSize;
uint64_t outputIndirectSize = indirectDrawCommandSize; uint64_t outputIndirectSize = indirectDrawCommandSize;
@ -292,8 +286,7 @@ namespace dawn::native {
validationInfo.GetBatches()) { validationInfo.GetBatches()) {
const uint64_t minOffsetFromAlignedBoundary = const uint64_t minOffsetFromAlignedBoundary =
batch.minOffset % minStorageBufferOffsetAlignment; batch.minOffset % minStorageBufferOffsetAlignment;
const uint64_t minOffsetAlignedDown = const uint64_t minOffsetAlignedDown = batch.minOffset - minOffsetFromAlignedBoundary;
batch.minOffset - minOffsetFromAlignedBoundary;
Batch newBatch; Batch newBatch;
newBatch.metadata = &batch; newBatch.metadata = &batch;
@ -304,8 +297,7 @@ namespace dawn::native {
batch.maxOffset + indirectDrawCommandSize - minOffsetAlignedDown; batch.maxOffset + indirectDrawCommandSize - minOffsetAlignedDown;
newBatch.outputParamsSize = batch.draws.size() * outputIndirectSize; newBatch.outputParamsSize = batch.draws.size() * outputIndirectSize;
newBatch.outputParamsOffset = newBatch.outputParamsOffset = Align(outputParamsSize, minStorageBufferOffsetAlignment);
Align(outputParamsSize, minStorageBufferOffsetAlignment);
outputParamsSize = newBatch.outputParamsOffset + newBatch.outputParamsSize; outputParamsSize = newBatch.outputParamsOffset + newBatch.outputParamsSize;
if (outputParamsSize > maxStorageBufferBindingSize) { if (outputParamsSize > maxStorageBufferBindingSize) {
return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate"); return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate");
@ -376,8 +368,8 @@ namespace dawn::native {
uint64_t outputParamsOffset = batch.outputParamsOffset; uint64_t outputParamsOffset = batch.outputParamsOffset;
for (auto& draw : batch.metadata->draws) { for (auto& draw : batch.metadata->draws) {
// The shader uses this to index an array of u32, hence the division by 4 bytes. // The shader uses this to index an array of u32, hence the division by 4 bytes.
*indirectOffsets++ = static_cast<uint32_t>( *indirectOffsets++ =
(draw.inputBufferOffset - batch.inputIndirectOffset) / 4); static_cast<uint32_t>((draw.inputBufferOffset - batch.inputIndirectOffset) / 4);
draw.cmd->indirectBuffer = outputParamsBuffer.GetBuffer(); draw.cmd->indirectBuffer = outputParamsBuffer.GetBuffer();
draw.cmd->indirectOffset = outputParamsOffset; draw.cmd->indirectOffset = outputParamsOffset;

View File

@ -332,8 +332,7 @@ namespace dawn::native {
#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL) #if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
case wgpu::BackendType::OpenGL: case wgpu::BackendType::OpenGL:
Register(opengl::Connect(this, wgpu::BackendType::OpenGL), Register(opengl::Connect(this, wgpu::BackendType::OpenGL), wgpu::BackendType::OpenGL);
wgpu::BackendType::OpenGL);
break; break;
#endif // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL) #endif // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)

View File

@ -106,8 +106,7 @@ namespace dawn::native {
MaybeError DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options); MaybeError DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options);
ResultOrError<Ref<AdapterBase>> RequestAdapterInternal( ResultOrError<Ref<AdapterBase>> RequestAdapterInternal(const RequestAdapterOptions* options);
const RequestAdapterOptions* options);
std::vector<std::string> mRuntimeSearchPaths; std::vector<std::string> mRuntimeSearchPaths;

View File

@ -28,10 +28,9 @@ namespace dawn::native {
InternalPipelineStore::InternalPipelineStore(DeviceBase* device) InternalPipelineStore::InternalPipelineStore(DeviceBase* device)
: scratchStorage(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage), : scratchStorage(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage),
scratchIndirectStorage(device, scratchIndirectStorage(
wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Indirect | device,
wgpu::BufferUsage::Storage) { wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage) {}
}
InternalPipelineStore::~InternalPipelineStore() = default; InternalPipelineStore::~InternalPipelineStore() = default;

View File

@ -33,8 +33,7 @@ namespace dawn::native {
explicit InternalPipelineStore(DeviceBase* device); explicit InternalPipelineStore(DeviceBase* device);
~InternalPipelineStore(); ~InternalPipelineStore();
std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>> std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>> copyTextureForBrowserPipelines;
copyTextureForBrowserPipelines;
Ref<ShaderModuleBase> copyTextureForBrowser; Ref<ShaderModuleBase> copyTextureForBrowser;

View File

@ -100,10 +100,10 @@ namespace dawn::native {
template <typename T> template <typename T>
static MaybeError Validate(T supported, T required) { static MaybeError Validate(T supported, T required) {
DAWN_INVALID_IF(IsBetter(required, supported), DAWN_INVALID_IF(IsBetter(required, supported),
"Required limit (%u) is lower than the supported limit (%u).", "Required limit (%u) is lower than the supported limit (%u).", required,
required, supported); supported);
DAWN_INVALID_IF(!IsPowerOfTwo(required), DAWN_INVALID_IF(!IsPowerOfTwo(required), "Required limit (%u) is not a power of two.",
"Required limit (%u) is not a power of two.", required); required);
return {}; return {};
} }
}; };
@ -118,8 +118,8 @@ namespace dawn::native {
template <typename T> template <typename T>
static MaybeError Validate(T supported, T required) { static MaybeError Validate(T supported, T required) {
DAWN_INVALID_IF(IsBetter(required, supported), DAWN_INVALID_IF(IsBetter(required, supported),
"Required limit (%u) is greater than the supported limit (%u).", "Required limit (%u) is greater than the supported limit (%u).", required,
required, supported); supported);
return {}; return {};
} }
}; };

View File

@ -14,20 +14,17 @@
#include <mutex> #include <mutex>
#include "dawn/native/ObjectBase.h"
#include "dawn/native/Device.h" #include "dawn/native/Device.h"
#include "dawn/native/ObjectBase.h"
namespace dawn::native { namespace dawn::native {
static constexpr uint64_t kErrorPayload = 0; static constexpr uint64_t kErrorPayload = 0;
static constexpr uint64_t kNotErrorPayload = 1; static constexpr uint64_t kNotErrorPayload = 1;
ObjectBase::ObjectBase(DeviceBase* device) : RefCounted(kNotErrorPayload), mDevice(device) { ObjectBase::ObjectBase(DeviceBase* device) : RefCounted(kNotErrorPayload), mDevice(device) {}
}
ObjectBase::ObjectBase(DeviceBase* device, ErrorTag) ObjectBase::ObjectBase(DeviceBase* device, ErrorTag) : RefCounted(kErrorPayload), mDevice(device) {}
: RefCounted(kErrorPayload), mDevice(device) {
}
DeviceBase* ObjectBase::GetDevice() const { DeviceBase* ObjectBase::GetDevice() const {
return mDevice; return mDevice;
@ -43,12 +40,9 @@ namespace dawn::native {
} }
} }
ApiObjectBase::ApiObjectBase(DeviceBase* device, ErrorTag tag) : ObjectBase(device, tag) { ApiObjectBase::ApiObjectBase(DeviceBase* device, ErrorTag tag) : ObjectBase(device, tag) {}
}
ApiObjectBase::ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag) ApiObjectBase::ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag) : ObjectBase(device) {}
: ObjectBase(device) {
}
ApiObjectBase::~ApiObjectBase() { ApiObjectBase::~ApiObjectBase() {
ASSERT(!IsAlive()); ASSERT(!IsAlive());
@ -63,8 +57,7 @@ namespace dawn::native {
return mLabel; return mLabel;
} }
void ApiObjectBase::SetLabelImpl() { void ApiObjectBase::SetLabelImpl() {}
}
bool ApiObjectBase::IsAlive() const { bool ApiObjectBase::IsAlive() const {
return IsInList(); return IsInList();

View File

@ -44,8 +44,7 @@ namespace dawn::native {
texture->GetNumMipLevels(), wgpu::TextureUsage::None)); texture->GetNumMipLevels(), wgpu::TextureUsage::None));
TextureSubresourceUsage& textureUsage = it.first->second; TextureSubresourceUsage& textureUsage = it.first->second;
textureUsage.Update(range, textureUsage.Update(range, [usage](const SubresourceRange&, wgpu::TextureUsage* storedUsage) {
[usage](const SubresourceRange&, wgpu::TextureUsage* storedUsage) {
// TODO(crbug.com/dawn/1001): Consider optimizing to have fewer // TODO(crbug.com/dawn/1001): Consider optimizing to have fewer
// branches. // branches.
if ((*storedUsage & wgpu::TextureUsage::RenderAttachment) != 0 && if ((*storedUsage & wgpu::TextureUsage::RenderAttachment) != 0 &&
@ -71,8 +70,8 @@ namespace dawn::native {
texture->GetNumMipLevels(), wgpu::TextureUsage::None)); texture->GetNumMipLevels(), wgpu::TextureUsage::None));
TextureSubresourceUsage* passTextureUsage = &it.first->second; TextureSubresourceUsage* passTextureUsage = &it.first->second;
passTextureUsage->Merge( passTextureUsage->Merge(textureUsage,
textureUsage, [](const SubresourceRange&, wgpu::TextureUsage* storedUsage, [](const SubresourceRange&, wgpu::TextureUsage* storedUsage,
const wgpu::TextureUsage& addedUsage) { const wgpu::TextureUsage& addedUsage) {
ASSERT((addedUsage & wgpu::TextureUsage::RenderAttachment) == 0); ASSERT((addedUsage & wgpu::TextureUsage::RenderAttachment) == 0);
*storedUsage |= addedUsage; *storedUsage |= addedUsage;

View File

@ -49,9 +49,7 @@ namespace dawn::native {
class PerStage { class PerStage {
public: public:
PerStage() = default; PerStage() = default;
explicit PerStage(const T& initialValue) { explicit PerStage(const T& initialValue) { mData.fill(initialValue); }
mData.fill(initialValue);
}
T& operator[](SingleShaderStage stage) { T& operator[](SingleShaderStage stage) {
DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages); DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);

View File

@ -71,8 +71,8 @@ namespace dawn::native {
std::unordered_set<std::string> stageInitializedConstantIdentifiers; std::unordered_set<std::string> stageInitializedConstantIdentifiers;
for (uint32_t i = 0; i < constantCount; i++) { for (uint32_t i = 0; i < constantCount; i++) {
DAWN_INVALID_IF(metadata.overridableConstants.count(constants[i].key) == 0, DAWN_INVALID_IF(metadata.overridableConstants.count(constants[i].key) == 0,
"Pipeline overridable constant \"%s\" not found in %s.", "Pipeline overridable constant \"%s\" not found in %s.", constants[i].key,
constants[i].key, module); module);
if (stageInitializedConstantIdentifiers.count(constants[i].key) == 0) { if (stageInitializedConstantIdentifiers.count(constants[i].key) == 0) {
if (metadata.uninitializedOverridableConstants.count(constants[i].key) > 0) { if (metadata.uninitializedOverridableConstants.count(constants[i].key) > 0) {
@ -159,12 +159,10 @@ namespace dawn::native {
} }
} }
PipelineBase::PipelineBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) { PipelineBase::PipelineBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {}
}
PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag) PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
: ApiObjectBase(device, tag) { : ApiObjectBase(device, tag) {}
}
PipelineBase::~PipelineBase() = default; PipelineBase::~PipelineBase() = default;
@ -200,15 +198,13 @@ namespace dawn::native {
DAWN_TRY(GetDevice()->ValidateIsAlive()); DAWN_TRY(GetDevice()->ValidateIsAlive());
DAWN_TRY(GetDevice()->ValidateObject(this)); DAWN_TRY(GetDevice()->ValidateObject(this));
DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get())); DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get()));
DAWN_INVALID_IF( DAWN_INVALID_IF(groupIndex >= kMaxBindGroups,
groupIndex >= kMaxBindGroups,
"Bind group layout index (%u) exceeds the maximum number of bind groups (%u).", "Bind group layout index (%u) exceeds the maximum number of bind groups (%u).",
groupIndex, kMaxBindGroups); groupIndex, kMaxBindGroups);
return {}; return {};
} }
ResultOrError<Ref<BindGroupLayoutBase>> PipelineBase::GetBindGroupLayout( ResultOrError<Ref<BindGroupLayoutBase>> PipelineBase::GetBindGroupLayout(uint32_t groupIndexIn) {
uint32_t groupIndexIn) {
DAWN_TRY(ValidateGetBindGroupLayout(groupIndexIn)); DAWN_TRY(ValidateGetBindGroupLayout(groupIndexIn));
BindGroupIndex groupIndex(groupIndexIn); BindGroupIndex groupIndex(groupIndexIn);

View File

@ -29,8 +29,7 @@
namespace dawn::native { namespace dawn::native {
MaybeError ValidatePipelineLayoutDescriptor( MaybeError ValidatePipelineLayoutDescriptor(DeviceBase* device,
DeviceBase* device,
const PipelineLayoutDescriptor* descriptor, const PipelineLayoutDescriptor* descriptor,
PipelineCompatibilityToken pipelineCompatibilityToken) { PipelineCompatibilityToken pipelineCompatibilityToken) {
if (descriptor->nextInChain != nullptr) { if (descriptor->nextInChain != nullptr) {
@ -84,8 +83,7 @@ namespace dawn::native {
} }
PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag) PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
: ApiObjectBase(device, tag) { : ApiObjectBase(device, tag) {}
}
PipelineLayoutBase::~PipelineLayoutBase() = default; PipelineLayoutBase::~PipelineLayoutBase() = default;
@ -209,8 +207,7 @@ namespace dawn::native {
(SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat)) { (SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat)) {
// Default to UnfilterableFloat. It will be promoted to Float if it // Default to UnfilterableFloat. It will be promoted to Float if it
// is used with a sampler. // is used with a sampler.
entry.texture.sampleType = entry.texture.sampleType = wgpu::TextureSampleType::UnfilterableFloat;
wgpu::TextureSampleType::UnfilterableFloat;
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
@ -290,8 +287,7 @@ namespace dawn::native {
} }
// Promote any Unfilterable textures used with a sampler to Filtering. // Promote any Unfilterable textures used with a sampler to Filtering.
for (const EntryPointMetadata::SamplerTexturePair& pair : for (const EntryPointMetadata::SamplerTexturePair& pair : metadata.samplerTexturePairs) {
metadata.samplerTexturePairs) {
BindGroupLayoutEntry* entry = &entryData[pair.texture.group][pair.texture.binding]; BindGroupLayoutEntry* entry = &entryData[pair.texture.group][pair.texture.binding];
if (entry->texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat) { if (entry->texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat) {
entry->texture.sampleType = wgpu::TextureSampleType::Float; entry->texture.sampleType = wgpu::TextureSampleType::Float;
@ -332,8 +328,7 @@ namespace dawn::native {
// Check in debug that the pipeline layout is compatible with the current pipeline. // Check in debug that the pipeline layout is compatible with the current pipeline.
for (const StageAndDescriptor& stage : stages) { for (const StageAndDescriptor& stage : stages) {
const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint); const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get()) ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get()).IsSuccess());
.IsSuccess());
} }
return std::move(result); return std::move(result);
@ -366,8 +361,7 @@ namespace dawn::native {
return mMask; return mMask;
} }
BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask( BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask(const PipelineLayoutBase* other) const {
const PipelineLayoutBase* other) const {
ASSERT(!IsError()); ASSERT(!IsError());
return {(1 << static_cast<uint32_t>(GroupsInheritUpTo(other))) - 1u}; return {(1 << static_cast<uint32_t>(GroupsInheritUpTo(other))) - 1u};
} }

View File

@ -38,8 +38,7 @@ namespace dawn::native {
const PipelineLayoutDescriptor* descriptor, const PipelineLayoutDescriptor* descriptor,
PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0)); PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
using BindGroupLayoutArray = using BindGroupLayoutArray = ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
using BindGroupLayoutMask = ityp::bitset<BindGroupIndex, kMaxBindGroups>; using BindGroupLayoutMask = ityp::bitset<BindGroupIndex, kMaxBindGroups>;
struct StageAndDescriptor { struct StageAndDescriptor {

View File

@ -20,10 +20,8 @@
namespace dawn::native { namespace dawn::native {
PooledResourceMemoryAllocator::PooledResourceMemoryAllocator( PooledResourceMemoryAllocator::PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator)
ResourceHeapAllocator* heapAllocator) : mHeapAllocator(heapAllocator) {}
: mHeapAllocator(heapAllocator) {
}
void PooledResourceMemoryAllocator::DestroyPool() { void PooledResourceMemoryAllocator::DestroyPool() {
for (auto& resourceHeap : mPool) { for (auto& resourceHeap : mPool) {

View File

@ -34,8 +34,7 @@ namespace dawn::native {
explicit PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator); explicit PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator);
~PooledResourceMemoryAllocator() override = default; ~PooledResourceMemoryAllocator() override = default;
ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap( ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override;
uint64_t size) override;
void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override; void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
void DestroyPool(); void DestroyPool();

View File

@ -33,16 +33,14 @@ namespace dawn::native {
EncodingContext* encodingContext) EncodingContext* encodingContext)
: ApiObjectBase(device, label), : ApiObjectBase(device, label),
mEncodingContext(encodingContext), mEncodingContext(encodingContext),
mValidationEnabled(device->IsValidationEnabled()) { mValidationEnabled(device->IsValidationEnabled()) {}
}
ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device, ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
EncodingContext* encodingContext, EncodingContext* encodingContext,
ErrorTag errorTag) ErrorTag errorTag)
: ApiObjectBase(device, errorTag), : ApiObjectBase(device, errorTag),
mEncodingContext(encodingContext), mEncodingContext(encodingContext),
mValidationEnabled(device->IsValidationEnabled()) { mValidationEnabled(device->IsValidationEnabled()) {}
}
bool ProgrammableEncoder::IsValidationEnabled() const { bool ProgrammableEncoder::IsValidationEnabled() const {
return mValidationEnabled; return mValidationEnabled;
@ -76,8 +74,7 @@ namespace dawn::native {
this, this,
[&](CommandAllocator* allocator) -> MaybeError { [&](CommandAllocator* allocator) -> MaybeError {
if (IsValidationEnabled()) { if (IsValidationEnabled()) {
DAWN_INVALID_IF( DAWN_INVALID_IF(mDebugGroupStackSize == 0,
mDebugGroupStackSize == 0,
"PopDebugGroup called when no debug groups are currently pushed."); "PopDebugGroup called when no debug groups are currently pushed.");
} }
allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup); allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
@ -114,8 +111,7 @@ namespace dawn::native {
const uint32_t* dynamicOffsetsIn) const { const uint32_t* dynamicOffsetsIn) const {
DAWN_TRY(GetDevice()->ValidateObject(group)); DAWN_TRY(GetDevice()->ValidateObject(group));
DAWN_INVALID_IF(index >= kMaxBindGroupsTyped, DAWN_INVALID_IF(index >= kMaxBindGroupsTyped, "Bind group index (%u) exceeds the maximum (%u).",
"Bind group index (%u) exceeds the maximum (%u).",
static_cast<uint32_t>(index), kMaxBindGroups); static_cast<uint32_t>(index), kMaxBindGroups);
ityp::span<BindingIndex, const uint32_t> dynamicOffsets(dynamicOffsetsIn, ityp::span<BindingIndex, const uint32_t> dynamicOffsets(dynamicOffsetsIn,
@ -153,8 +149,8 @@ namespace dawn::native {
} }
DAWN_INVALID_IF(!IsAligned(dynamicOffsets[i], requiredAlignment), DAWN_INVALID_IF(!IsAligned(dynamicOffsets[i], requiredAlignment),
"Dynamic Offset[%u] (%u) is not %u byte aligned.", "Dynamic Offset[%u] (%u) is not %u byte aligned.", static_cast<uint32_t>(i),
static_cast<uint32_t>(i), dynamicOffsets[i], requiredAlignment); dynamicOffsets[i], requiredAlignment);
BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i); BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);

View File

@ -30,9 +30,7 @@ namespace dawn::native {
// Base class for shared functionality between programmable encoders. // Base class for shared functionality between programmable encoders.
class ProgrammableEncoder : public ApiObjectBase { class ProgrammableEncoder : public ApiObjectBase {
public: public:
ProgrammableEncoder(DeviceBase* device, ProgrammableEncoder(DeviceBase* device, const char* label, EncodingContext* encodingContext);
const char* label,
EncodingContext* encodingContext);
void APIInsertDebugMarker(const char* groupLabel); void APIInsertDebugMarker(const char* groupLabel);
void APIPopDebugGroup(); void APIPopDebugGroup();
@ -55,9 +53,7 @@ namespace dawn::native {
const uint32_t* dynamicOffsets) const; const uint32_t* dynamicOffsets) const;
// Construct an "error" programmable pass encoder. // Construct an "error" programmable pass encoder.
ProgrammableEncoder(DeviceBase* device, ProgrammableEncoder(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
EncodingContext* encodingContext,
ErrorTag errorTag);
EncodingContext* mEncodingContext = nullptr; EncodingContext* mEncodingContext = nullptr;

View File

@ -116,15 +116,13 @@ namespace dawn::native {
} }
)"; )";
ResultOrError<ComputePipelineBase*> GetOrCreateTimestampComputePipeline( ResultOrError<ComputePipelineBase*> GetOrCreateTimestampComputePipeline(DeviceBase* device) {
DeviceBase* device) {
InternalPipelineStore* store = device->GetInternalPipelineStore(); InternalPipelineStore* store = device->GetInternalPipelineStore();
if (store->timestampComputePipeline == nullptr) { if (store->timestampComputePipeline == nullptr) {
// Create compute shader module if not cached before. // Create compute shader module if not cached before.
if (store->timestampCS == nullptr) { if (store->timestampCS == nullptr) {
DAWN_TRY_ASSIGN( DAWN_TRY_ASSIGN(store->timestampCS,
store->timestampCS,
utils::CreateShaderModule(device, sConvertTimestampsToNanoseconds)); utils::CreateShaderModule(device, sConvertTimestampsToNanoseconds));
} }
@ -135,8 +133,7 @@ namespace dawn::native {
device, device,
{ {
{0, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding}, {0, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
{1, wgpu::ShaderStage::Compute, {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage},
wgpu::BufferBindingType::ReadOnlyStorage},
{2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform}, {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
}, },
/* allowInternalBinding */ true)); /* allowInternalBinding */ true));
@ -200,9 +197,9 @@ namespace dawn::native {
// Create bind group after all binding entries are set. // Create bind group after all binding entries are set.
Ref<BindGroupBase> bindGroup; Ref<BindGroupBase> bindGroup;
DAWN_TRY_ASSIGN(bindGroup, DAWN_TRY_ASSIGN(
utils::MakeBindGroup(device, layout, bindGroup,
{{0, timestamps}, {1, availability}, {2, params}})); utils::MakeBindGroup(device, layout, {{0, timestamps}, {1, availability}, {2, params}}));
// Create compute encoder and issue dispatch. // Create compute encoder and issue dispatch.
Ref<ComputePassEncoder> pass = encoder->BeginComputePass(); Ref<ComputePassEncoder> pass = encoder->BeginComputePass();

View File

@ -27,19 +27,15 @@ namespace dawn::native {
class ErrorQuerySet final : public QuerySetBase { class ErrorQuerySet final : public QuerySetBase {
public: public:
explicit ErrorQuerySet(DeviceBase* device) : QuerySetBase(device, ObjectBase::kError) { explicit ErrorQuerySet(DeviceBase* device) : QuerySetBase(device, ObjectBase::kError) {}
}
private: private:
void DestroyImpl() override { void DestroyImpl() override { UNREACHABLE(); }
UNREACHABLE();
}
}; };
} // anonymous namespace } // anonymous namespace
MaybeError ValidateQuerySetDescriptor(DeviceBase* device, MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor) {
const QuerySetDescriptor* descriptor) {
DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr"); DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
DAWN_TRY(ValidateQueryType(descriptor->type)); DAWN_TRY(ValidateQueryType(descriptor->type));
@ -118,8 +114,7 @@ namespace dawn::native {
} }
QuerySetBase::QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag) QuerySetBase::QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag)
: ApiObjectBase(device, tag) { : ApiObjectBase(device, tag) {}
}
QuerySetBase::~QuerySetBase() { QuerySetBase::~QuerySetBase() {
// Uninitialized or already destroyed // Uninitialized or already destroyed

View File

@ -49,8 +49,7 @@ namespace dawn::native {
uint32_t actualBytesPerRow, uint32_t actualBytesPerRow,
uint32_t dstBytesPerRow, uint32_t dstBytesPerRow,
uint32_t srcBytesPerRow) { uint32_t srcBytesPerRow) {
bool copyWholeLayer = bool copyWholeLayer = actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0; bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0;
if (!copyWholeLayer) { // copy row by row if (!copyWholeLayer) { // copy row by row
@ -87,19 +86,16 @@ namespace dawn::native {
const TexelBlockInfo& blockInfo, const TexelBlockInfo& blockInfo,
const Extent3D& writeSizePixel) { const Extent3D& writeSizePixel) {
uint64_t newDataSizeBytes; uint64_t newDataSizeBytes;
DAWN_TRY_ASSIGN( DAWN_TRY_ASSIGN(newDataSizeBytes,
newDataSizeBytes, ComputeRequiredBytesInCopy(blockInfo, writeSizePixel,
ComputeRequiredBytesInCopy(blockInfo, writeSizePixel, optimallyAlignedBytesPerRow, optimallyAlignedBytesPerRow, alignedRowsPerImage));
alignedRowsPerImage));
uint64_t optimalOffsetAlignment = uint64_t optimalOffsetAlignment = device->GetOptimalBufferToTextureCopyOffsetAlignment();
device->GetOptimalBufferToTextureCopyOffsetAlignment();
ASSERT(IsPowerOfTwo(optimalOffsetAlignment)); ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
ASSERT(IsPowerOfTwo(blockInfo.byteSize)); ASSERT(IsPowerOfTwo(blockInfo.byteSize));
// We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize, // We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
// since both of them are powers of two, we only need to align to the max value. // since both of them are powers of two, we only need to align to the max value.
uint64_t offsetAlignment = uint64_t offsetAlignment = std::max(optimalOffsetAlignment, uint64_t(blockInfo.byteSize));
std::max(optimalOffsetAlignment, uint64_t(blockInfo.byteSize));
// For depth-stencil texture, buffer offset must be a multiple of 4, which is required // For depth-stencil texture, buffer offset must be a multiple of 4, which is required
// by WebGPU and Vulkan SPEC. // by WebGPU and Vulkan SPEC.
@ -109,9 +105,9 @@ namespace dawn::native {
} }
UploadHandle uploadHandle; UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate( DAWN_TRY_ASSIGN(uploadHandle,
newDataSizeBytes, device->GetPendingCommandSerial(), device->GetDynamicUploader()->Allocate(
offsetAlignment)); newDataSizeBytes, device->GetPendingCommandSerial(), offsetAlignment));
ASSERT(uploadHandle.mappedBuffer != nullptr); ASSERT(uploadHandle.mappedBuffer != nullptr);
uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer); uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
@ -127,17 +123,16 @@ namespace dawn::native {
uint64_t imageAdditionalStride = uint64_t imageAdditionalStride =
dataLayout.bytesPerRow * (dataRowsPerImage - alignedRowsPerImage); dataLayout.bytesPerRow * (dataRowsPerImage - alignedRowsPerImage);
CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers, CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers, alignedRowsPerImage,
alignedRowsPerImage, imageAdditionalStride, alignedBytesPerRow, imageAdditionalStride, alignedBytesPerRow, optimallyAlignedBytesPerRow,
optimallyAlignedBytesPerRow, dataLayout.bytesPerRow); dataLayout.bytesPerRow);
return uploadHandle; return uploadHandle;
} }
struct SubmittedWorkDone : QueueBase::TaskInFlight { struct SubmittedWorkDone : QueueBase::TaskInFlight {
SubmittedWorkDone(WGPUQueueWorkDoneCallback callback, void* userdata) SubmittedWorkDone(WGPUQueueWorkDoneCallback callback, void* userdata)
: mCallback(callback), mUserdata(userdata) { : mCallback(callback), mUserdata(userdata) {}
}
void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override { void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
ASSERT(mCallback != nullptr); ASSERT(mCallback != nullptr);
TRACE_EVENT1(platform, General, "Queue::SubmittedWorkDone::Finished", "serial", TRACE_EVENT1(platform, General, "Queue::SubmittedWorkDone::Finished", "serial",
@ -159,12 +154,10 @@ namespace dawn::native {
class ErrorQueue : public QueueBase { class ErrorQueue : public QueueBase {
public: public:
explicit ErrorQueue(DeviceBase* device) : QueueBase(device, ObjectBase::kError) { explicit ErrorQueue(DeviceBase* device) : QueueBase(device, ObjectBase::kError) {}
}
private: private:
MaybeError SubmitImpl(uint32_t commandCount, MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override {
CommandBufferBase* const* commands) override {
UNREACHABLE(); UNREACHABLE();
} }
}; };
@ -172,23 +165,18 @@ namespace dawn::native {
// QueueBase // QueueBase
QueueBase::TaskInFlight::~TaskInFlight() { QueueBase::TaskInFlight::~TaskInFlight() {}
}
QueueBase::QueueBase(DeviceBase* device, const QueueDescriptor* descriptor) QueueBase::QueueBase(DeviceBase* device, const QueueDescriptor* descriptor)
: ApiObjectBase(device, descriptor->label) { : ApiObjectBase(device, descriptor->label) {}
}
QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag) QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag) : ApiObjectBase(device, tag) {}
: ApiObjectBase(device, tag) {
}
QueueBase::~QueueBase() { QueueBase::~QueueBase() {
ASSERT(mTasksInFlight.Empty()); ASSERT(mTasksInFlight.Empty());
} }
void QueueBase::DestroyImpl() { void QueueBase::DestroyImpl() {}
}
// static // static
QueueBase* QueueBase::MakeError(DeviceBase* device) { QueueBase* QueueBase::MakeError(DeviceBase* device) {
@ -291,8 +279,8 @@ namespace dawn::native {
DeviceBase* device = GetDevice(); DeviceBase* device = GetDevice();
UploadHandle uploadHandle; UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate( DAWN_TRY_ASSIGN(uploadHandle,
size, device->GetPendingCommandSerial(), device->GetDynamicUploader()->Allocate(size, device->GetPendingCommandSerial(),
kCopyBufferToBufferOffsetAlignment)); kCopyBufferToBufferOffsetAlignment));
ASSERT(uploadHandle.mappedBuffer != nullptr); ASSERT(uploadHandle.mappedBuffer != nullptr);
@ -347,15 +335,13 @@ namespace dawn::native {
uint32_t alignedRowsPerImage = writeSizePixel.height / blockInfo.height; uint32_t alignedRowsPerImage = writeSizePixel.height / blockInfo.height;
uint32_t optimalBytesPerRowAlignment = GetDevice()->GetOptimalBytesPerRowAlignment(); uint32_t optimalBytesPerRowAlignment = GetDevice()->GetOptimalBytesPerRowAlignment();
uint32_t optimallyAlignedBytesPerRow = uint32_t optimallyAlignedBytesPerRow = Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
UploadHandle uploadHandle; UploadHandle uploadHandle;
DAWN_TRY_ASSIGN(uploadHandle, DAWN_TRY_ASSIGN(uploadHandle, UploadTextureDataAligningBytesPerRowAndOffset(
UploadTextureDataAligningBytesPerRowAndOffset( GetDevice(), data, alignedBytesPerRow,
GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow, optimallyAlignedBytesPerRow, alignedRowsPerImage, dataLayout,
alignedRowsPerImage, dataLayout, format.HasDepthOrStencil(), blockInfo, format.HasDepthOrStencil(), blockInfo, writeSizePixel));
writeSizePixel));
TextureDataLayout passDataLayout = dataLayout; TextureDataLayout passDataLayout = dataLayout;
passDataLayout.offset = uploadHandle.startOffset; passDataLayout.offset = uploadHandle.startOffset;
@ -384,8 +370,7 @@ namespace dawn::native {
CopyTextureForBrowserInternal(source, destination, copySize, options)); CopyTextureForBrowserInternal(source, destination, copySize, options));
} }
MaybeError QueueBase::CopyTextureForBrowserInternal( MaybeError QueueBase::CopyTextureForBrowserInternal(const ImageCopyTexture* source,
const ImageCopyTexture* source,
const ImageCopyTexture* destination, const ImageCopyTexture* destination,
const Extent3D* copySize, const Extent3D* copySize,
const CopyTextureForBrowserOptions* options) { const CopyTextureForBrowserOptions* options) {
@ -481,9 +466,8 @@ namespace dawn::native {
"Usage (%s) of %s does not include %s.", destination->texture->GetUsage(), "Usage (%s) of %s does not include %s.", destination->texture->GetUsage(),
destination->texture, wgpu::TextureUsage::CopyDst); destination->texture, wgpu::TextureUsage::CopyDst);
DAWN_INVALID_IF(destination->texture->GetSampleCount() > 1, DAWN_INVALID_IF(destination->texture->GetSampleCount() > 1, "Sample count (%u) of %s is not 1",
"Sample count (%u) of %s is not 1", destination->texture->GetSampleCount(), destination->texture->GetSampleCount(), destination->texture);
destination->texture);
DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination)); DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
// We validate texture copy range before validating linear texture data, // We validate texture copy range before validating linear texture data,

Some files were not shown because too many files have changed in this diff Show More