Consistent formatting for Dawn/Tint.

This CL updates the clang format files to have a single shared format
between Dawn and Tint. The major changes are tabs are 4 spaces, lines
are 100 columns and namespaces are not indented.

Bug: dawn:1339
Change-Id: I4208742c95643998d9fd14e77a9cc558071ded39
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87603
Commit-Queue: Dan Sinclair <dsinclair@chromium.org>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Kokoro: Kokoro <noreply+kokoro@google.com>
This commit is contained in:
dan sinclair 2022-05-01 14:40:55 +00:00 committed by Dawn LUCI CQ
parent 73b1d1dafa
commit 41e4d9a34c
1827 changed files with 218382 additions and 227741 deletions

View File

@ -1,8 +1,5 @@
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html # http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Chromium BasedOnStyle: Chromium
Standard: Cpp11
AllowShortFunctionsOnASingleLine: false
ColumnLimit: 100 ColumnLimit: 100
@ -11,10 +8,3 @@ IndentWidth: 4
ObjCBlockIndentWidth: 4 ObjCBlockIndentWidth: 4
AccessModifierOffset: -2 AccessModifierOffset: -2
CompactNamespaces: true
# This should result in only one indentation level with compacted namespaces
NamespaceIndentation: All
# Use this option once clang-format 6 is out.
IndentPPDirectives: AfterHash

View File

@ -121,7 +121,7 @@ def _NonInclusiveFileFilter(file):
"third_party/khronos/KHR/khrplatform.h", # Third party file "third_party/khronos/KHR/khrplatform.h", # Third party file
"tools/roll-all", # Branch name "tools/roll-all", # Branch name
"tools/src/container/key.go", # External URL "tools/src/container/key.go", # External URL
"tools/src/go.sum", # External URL "go.sum", # External URL
] ]
return file.LocalPath() not in filter_list return file.LocalPath() not in filter_list

View File

@ -1 +0,0 @@
filter=-runtime/indentation_namespace

View File

@ -31,126 +31,117 @@
namespace dawn { namespace dawn {
template <typename T> template <typename T>
struct IsDawnBitmask { struct IsDawnBitmask {
static constexpr bool enable = false; static constexpr bool enable = false;
}; };
template <typename T, typename Enable = void> template <typename T, typename Enable = void>
struct LowerBitmask { struct LowerBitmask {
static constexpr bool enable = false; static constexpr bool enable = false;
}; };
template <typename T> template <typename T>
struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> { struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> {
static constexpr bool enable = true; static constexpr bool enable = true;
using type = T; using type = T;
constexpr static T Lower(T t) { constexpr static T Lower(T t) { return t; }
return t; };
}
};
template <typename T> template <typename T>
struct BoolConvertible { struct BoolConvertible {
using Integral = typename std::underlying_type<T>::type; using Integral = typename std::underlying_type<T>::type;
// NOLINTNEXTLINE(runtime/explicit) // NOLINTNEXTLINE(runtime/explicit)
constexpr BoolConvertible(Integral value) : value(value) { constexpr BoolConvertible(Integral value) : value(value) {}
} constexpr operator bool() const { return value != 0; }
constexpr operator bool() const { constexpr operator T() const { return static_cast<T>(value); }
return value != 0;
}
constexpr operator T() const {
return static_cast<T>(value);
}
Integral value; Integral value;
}; };
template <typename T> template <typename T>
struct LowerBitmask<BoolConvertible<T>> { struct LowerBitmask<BoolConvertible<T>> {
static constexpr bool enable = true; static constexpr bool enable = true;
using type = T; using type = T;
static constexpr type Lower(BoolConvertible<T> t) { static constexpr type Lower(BoolConvertible<T> t) { return t; }
return t; };
}
};
template <typename T1, template <
typename T2, typename T1,
typename = typename std::enable_if<LowerBitmask<T1>::enable && typename T2,
LowerBitmask<T2>::enable>::type> typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) { constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
using T = typename LowerBitmask<T1>::type; using T = typename LowerBitmask<T1>::type;
using Integral = typename std::underlying_type<T>::type; using Integral = typename std::underlying_type<T>::type;
return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) | return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) |
static_cast<Integral>(LowerBitmask<T2>::Lower(right)); static_cast<Integral>(LowerBitmask<T2>::Lower(right));
} }
template <typename T1, template <
typename T2, typename T1,
typename = typename std::enable_if<LowerBitmask<T1>::enable && typename T2,
LowerBitmask<T2>::enable>::type> typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) { constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
using T = typename LowerBitmask<T1>::type; using T = typename LowerBitmask<T1>::type;
using Integral = typename std::underlying_type<T>::type; using Integral = typename std::underlying_type<T>::type;
return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) & return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) &
static_cast<Integral>(LowerBitmask<T2>::Lower(right)); static_cast<Integral>(LowerBitmask<T2>::Lower(right));
} }
template <typename T1, template <
typename T2, typename T1,
typename = typename std::enable_if<LowerBitmask<T1>::enable && typename T2,
LowerBitmask<T2>::enable>::type> typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) { constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
using T = typename LowerBitmask<T1>::type; using T = typename LowerBitmask<T1>::type;
using Integral = typename std::underlying_type<T>::type; using Integral = typename std::underlying_type<T>::type;
return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) ^ return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) ^
static_cast<Integral>(LowerBitmask<T2>::Lower(right)); static_cast<Integral>(LowerBitmask<T2>::Lower(right));
} }
template <typename T1> template <typename T1>
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator~(T1 t) { constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator~(T1 t) {
using T = typename LowerBitmask<T1>::type; using T = typename LowerBitmask<T1>::type;
using Integral = typename std::underlying_type<T>::type; using Integral = typename std::underlying_type<T>::type;
return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t)); return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t));
} }
template <typename T, template <
typename T2, typename T,
typename = typename std::enable_if<IsDawnBitmask<T>::enable && typename T2,
LowerBitmask<T2>::enable>::type> typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
constexpr T& operator&=(T& l, T2 right) { constexpr T& operator&=(T& l, T2 right) {
T r = LowerBitmask<T2>::Lower(right); T r = LowerBitmask<T2>::Lower(right);
l = l & r; l = l & r;
return l; return l;
} }
template <typename T, template <
typename T2, typename T,
typename = typename std::enable_if<IsDawnBitmask<T>::enable && typename T2,
LowerBitmask<T2>::enable>::type> typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
constexpr T& operator|=(T& l, T2 right) { constexpr T& operator|=(T& l, T2 right) {
T r = LowerBitmask<T2>::Lower(right); T r = LowerBitmask<T2>::Lower(right);
l = l | r; l = l | r;
return l; return l;
} }
template <typename T, template <
typename T2, typename T,
typename = typename std::enable_if<IsDawnBitmask<T>::enable && typename T2,
LowerBitmask<T2>::enable>::type> typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
constexpr T& operator^=(T& l, T2 right) { constexpr T& operator^=(T& l, T2 right) {
T r = LowerBitmask<T2>::Lower(right); T r = LowerBitmask<T2>::Lower(right);
l = l ^ r; l = l ^ r;
return l; return l;
} }
template <typename T> template <typename T>
constexpr bool HasZeroOrOneBits(T value) { constexpr bool HasZeroOrOneBits(T value) {
using Integral = typename std::underlying_type<T>::type; using Integral = typename std::underlying_type<T>::type;
return (static_cast<Integral>(value) & (static_cast<Integral>(value) - 1)) == 0; return (static_cast<Integral>(value) & (static_cast<Integral>(value) - 1)) == 0;
} }
} // namespace dawn } // namespace dawn

View File

@ -65,7 +65,7 @@ struct DawnWSIContextD3D12 {
#endif #endif
#if defined(DAWN_ENABLE_BACKEND_METAL) && defined(__OBJC__) #if defined(DAWN_ENABLE_BACKEND_METAL) && defined(__OBJC__)
# import <Metal/Metal.h> #import <Metal/Metal.h>
struct DawnWSIContextMetal { struct DawnWSIContextMetal {
id<MTLDevice> device = nil; id<MTLDevice> device = nil;

View File

@ -30,81 +30,81 @@ struct ID3D12Resource;
namespace dawn::native::d3d12 { namespace dawn::native::d3d12 {
class D3D11on12ResourceCache; class D3D11on12ResourceCache;
DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device); DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device);
DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device, DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
HWND window); HWND window);
DAWN_NATIVE_EXPORT WGPUTextureFormat DAWN_NATIVE_EXPORT WGPUTextureFormat
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain); GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
enum MemorySegment { enum MemorySegment {
Local, Local,
NonLocal, NonLocal,
}; };
DAWN_NATIVE_EXPORT uint64_t SetExternalMemoryReservation(WGPUDevice device, DAWN_NATIVE_EXPORT uint64_t SetExternalMemoryReservation(WGPUDevice device,
uint64_t requestedReservationSize, uint64_t requestedReservationSize,
MemorySegment memorySegment); MemorySegment memorySegment);
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDXGISharedHandle : ExternalImageDescriptor { struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDXGISharedHandle : ExternalImageDescriptor {
public: public:
ExternalImageDescriptorDXGISharedHandle(); ExternalImageDescriptorDXGISharedHandle();
// Note: SharedHandle must be a handle to a texture object. // Note: SharedHandle must be a handle to a texture object.
HANDLE sharedHandle; HANDLE sharedHandle;
}; };
// Keyed mutex acquire/release uses a fixed key of 0 to match Chromium behavior. // Keyed mutex acquire/release uses a fixed key of 0 to match Chromium behavior.
constexpr UINT64 kDXGIKeyedMutexAcquireReleaseKey = 0; constexpr UINT64 kDXGIKeyedMutexAcquireReleaseKey = 0;
struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptorDXGIKeyedMutex struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptorDXGIKeyedMutex
: ExternalImageAccessDescriptor { : ExternalImageAccessDescriptor {
public: public:
// TODO(chromium:1241533): Remove deprecated keyed mutex params after removing associated // TODO(chromium:1241533): Remove deprecated keyed mutex params after removing associated
// code from Chromium - we use a fixed key of 0 for acquire and release everywhere now. // code from Chromium - we use a fixed key of 0 for acquire and release everywhere now.
uint64_t acquireMutexKey; uint64_t acquireMutexKey;
uint64_t releaseMutexKey; uint64_t releaseMutexKey;
bool isSwapChainTexture = false; bool isSwapChainTexture = false;
}; };
class DAWN_NATIVE_EXPORT ExternalImageDXGI { class DAWN_NATIVE_EXPORT ExternalImageDXGI {
public: public:
~ExternalImageDXGI(); ~ExternalImageDXGI();
// Note: SharedHandle must be a handle to a texture object. // Note: SharedHandle must be a handle to a texture object.
static std::unique_ptr<ExternalImageDXGI> Create( static std::unique_ptr<ExternalImageDXGI> Create(
WGPUDevice device, WGPUDevice device,
const ExternalImageDescriptorDXGISharedHandle* descriptor); const ExternalImageDescriptorDXGISharedHandle* descriptor);
WGPUTexture ProduceTexture(WGPUDevice device, WGPUTexture ProduceTexture(WGPUDevice device,
const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor); const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor);
private: private:
ExternalImageDXGI(Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource, ExternalImageDXGI(Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource,
const WGPUTextureDescriptor* descriptor); const WGPUTextureDescriptor* descriptor);
Microsoft::WRL::ComPtr<ID3D12Resource> mD3D12Resource; Microsoft::WRL::ComPtr<ID3D12Resource> mD3D12Resource;
// Contents of WGPUTextureDescriptor are stored individually since the descriptor // Contents of WGPUTextureDescriptor are stored individually since the descriptor
// could outlive this image. // could outlive this image.
WGPUTextureUsageFlags mUsage; WGPUTextureUsageFlags mUsage;
WGPUTextureUsageFlags mUsageInternal = WGPUTextureUsage_None; WGPUTextureUsageFlags mUsageInternal = WGPUTextureUsage_None;
WGPUTextureDimension mDimension; WGPUTextureDimension mDimension;
WGPUExtent3D mSize; WGPUExtent3D mSize;
WGPUTextureFormat mFormat; WGPUTextureFormat mFormat;
uint32_t mMipLevelCount; uint32_t mMipLevelCount;
uint32_t mSampleCount; uint32_t mSampleCount;
std::unique_ptr<D3D11on12ResourceCache> mD3D11on12ResourceCache; std::unique_ptr<D3D11on12ResourceCache> mD3D11on12ResourceCache;
}; };
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase { struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
AdapterDiscoveryOptions(); AdapterDiscoveryOptions();
explicit AdapterDiscoveryOptions(Microsoft::WRL::ComPtr<IDXGIAdapter> adapter); explicit AdapterDiscoveryOptions(Microsoft::WRL::ComPtr<IDXGIAdapter> adapter);
Microsoft::WRL::ComPtr<IDXGIAdapter> dxgiAdapter; Microsoft::WRL::ComPtr<IDXGIAdapter> dxgiAdapter;
}; };
} // namespace dawn::native::d3d12 } // namespace dawn::native::d3d12

View File

@ -23,237 +23,237 @@
#include "dawn/webgpu.h" #include "dawn/webgpu.h"
namespace dawn::platform { namespace dawn::platform {
class Platform; class Platform;
} // namespace dawn::platform } // namespace dawn::platform
namespace wgpu { namespace wgpu {
struct AdapterProperties; struct AdapterProperties;
struct DeviceDescriptor; struct DeviceDescriptor;
} // namespace wgpu } // namespace wgpu
namespace dawn::native { namespace dawn::native {
class InstanceBase; class InstanceBase;
class AdapterBase; class AdapterBase;
// An optional parameter of Adapter::CreateDevice() to send additional information when creating // An optional parameter of Adapter::CreateDevice() to send additional information when creating
// a Device. For example, we can use it to enable a workaround, optimization or feature. // a Device. For example, we can use it to enable a workaround, optimization or feature.
struct DAWN_NATIVE_EXPORT DawnDeviceDescriptor { struct DAWN_NATIVE_EXPORT DawnDeviceDescriptor {
std::vector<const char*> requiredFeatures; std::vector<const char*> requiredFeatures;
std::vector<const char*> forceEnabledToggles; std::vector<const char*> forceEnabledToggles;
std::vector<const char*> forceDisabledToggles; std::vector<const char*> forceDisabledToggles;
const WGPURequiredLimits* requiredLimits = nullptr; const WGPURequiredLimits* requiredLimits = nullptr;
}; };
// A struct to record the information of a toggle. A toggle is a code path in Dawn device that // A struct to record the information of a toggle. A toggle is a code path in Dawn device that
// can be manually configured to run or not outside Dawn, including workarounds, special // can be manually configured to run or not outside Dawn, including workarounds, special
// features and optimizations. // features and optimizations.
struct ToggleInfo { struct ToggleInfo {
const char* name; const char* name;
const char* description; const char* description;
const char* url; const char* url;
}; };
// A struct to record the information of a feature. A feature is a GPU feature that is not // A struct to record the information of a feature. A feature is a GPU feature that is not
// required to be supported by all Dawn backends and can only be used when it is enabled on the // required to be supported by all Dawn backends and can only be used when it is enabled on the
// creation of device. // creation of device.
using FeatureInfo = ToggleInfo; using FeatureInfo = ToggleInfo;
// An adapter is an object that represent on possibility of creating devices in the system. // An adapter is an object that represent on possibility of creating devices in the system.
// Most of the time it will represent a combination of a physical GPU and an API. Not that the // Most of the time it will represent a combination of a physical GPU and an API. Not that the
// same GPU can be represented by multiple adapters but on different APIs. // same GPU can be represented by multiple adapters but on different APIs.
// //
// The underlying Dawn adapter is owned by the Dawn instance so this class is not RAII but just // The underlying Dawn adapter is owned by the Dawn instance so this class is not RAII but just
// a reference to an underlying adapter. // a reference to an underlying adapter.
class DAWN_NATIVE_EXPORT Adapter { class DAWN_NATIVE_EXPORT Adapter {
public: public:
Adapter(); Adapter();
// NOLINTNEXTLINE(runtime/explicit) // NOLINTNEXTLINE(runtime/explicit)
Adapter(AdapterBase* impl); Adapter(AdapterBase* impl);
~Adapter(); ~Adapter();
Adapter(const Adapter& other); Adapter(const Adapter& other);
Adapter& operator=(const Adapter& other); Adapter& operator=(const Adapter& other);
// Essentially webgpu.h's wgpuAdapterGetProperties while we don't have WGPUAdapter in // Essentially webgpu.h's wgpuAdapterGetProperties while we don't have WGPUAdapter in
// dawn.json // dawn.json
void GetProperties(wgpu::AdapterProperties* properties) const; void GetProperties(wgpu::AdapterProperties* properties) const;
void GetProperties(WGPUAdapterProperties* properties) const; void GetProperties(WGPUAdapterProperties* properties) const;
std::vector<const char*> GetSupportedExtensions() const; std::vector<const char*> GetSupportedExtensions() const;
std::vector<const char*> GetSupportedFeatures() const; std::vector<const char*> GetSupportedFeatures() const;
WGPUDeviceProperties GetAdapterProperties() const; WGPUDeviceProperties GetAdapterProperties() const;
bool GetLimits(WGPUSupportedLimits* limits) const; bool GetLimits(WGPUSupportedLimits* limits) const;
void SetUseTieredLimits(bool useTieredLimits); void SetUseTieredLimits(bool useTieredLimits);
// Check that the Adapter is able to support importing external images. This is necessary // Check that the Adapter is able to support importing external images. This is necessary
// to implement the swapchain and interop APIs in Chromium. // to implement the swapchain and interop APIs in Chromium.
bool SupportsExternalImages() const; bool SupportsExternalImages() const;
explicit operator bool() const; explicit operator bool() const;
// Create a device on this adapter. On an error, nullptr is returned. // Create a device on this adapter. On an error, nullptr is returned.
WGPUDevice CreateDevice(const DawnDeviceDescriptor* deviceDescriptor); WGPUDevice CreateDevice(const DawnDeviceDescriptor* deviceDescriptor);
WGPUDevice CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor); WGPUDevice CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor);
WGPUDevice CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor = nullptr); WGPUDevice CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor = nullptr);
void RequestDevice(const DawnDeviceDescriptor* descriptor, void RequestDevice(const DawnDeviceDescriptor* descriptor,
WGPURequestDeviceCallback callback, WGPURequestDeviceCallback callback,
void* userdata); void* userdata);
void RequestDevice(const wgpu::DeviceDescriptor* descriptor, void RequestDevice(const wgpu::DeviceDescriptor* descriptor,
WGPURequestDeviceCallback callback, WGPURequestDeviceCallback callback,
void* userdata); void* userdata);
void RequestDevice(const WGPUDeviceDescriptor* descriptor, void RequestDevice(const WGPUDeviceDescriptor* descriptor,
WGPURequestDeviceCallback callback, WGPURequestDeviceCallback callback,
void* userdata); void* userdata);
// Returns the underlying WGPUAdapter object. // Returns the underlying WGPUAdapter object.
WGPUAdapter Get() const; WGPUAdapter Get() const;
// Reset the backend device object for testing purposes. // Reset the backend device object for testing purposes.
void ResetInternalDeviceForTesting(); void ResetInternalDeviceForTesting();
private: private:
AdapterBase* mImpl = nullptr; AdapterBase* mImpl = nullptr;
}; };
// Base class for options passed to Instance::DiscoverAdapters. // Base class for options passed to Instance::DiscoverAdapters.
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsBase { struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsBase {
public: public:
const WGPUBackendType backendType; const WGPUBackendType backendType;
protected: protected:
explicit AdapterDiscoveryOptionsBase(WGPUBackendType type); explicit AdapterDiscoveryOptionsBase(WGPUBackendType type);
}; };
enum BackendValidationLevel { Full, Partial, Disabled }; enum BackendValidationLevel { Full, Partial, Disabled };
// Represents a connection to dawn_native and is used for dependency injection, discovering // Represents a connection to dawn_native and is used for dependency injection, discovering
// system adapters and injecting custom adapters (like a Swiftshader Vulkan adapter). // system adapters and injecting custom adapters (like a Swiftshader Vulkan adapter).
// //
// This is an RAII class for Dawn instances and also controls the lifetime of all adapters // This is an RAII class for Dawn instances and also controls the lifetime of all adapters
// for this instance. // for this instance.
class DAWN_NATIVE_EXPORT Instance { class DAWN_NATIVE_EXPORT Instance {
public: public:
explicit Instance(const WGPUInstanceDescriptor* desc = nullptr); explicit Instance(const WGPUInstanceDescriptor* desc = nullptr);
~Instance(); ~Instance();
Instance(const Instance& other) = delete; Instance(const Instance& other) = delete;
Instance& operator=(const Instance& other) = delete; Instance& operator=(const Instance& other) = delete;
// Gather all adapters in the system that can be accessed with no special options. These // Gather all adapters in the system that can be accessed with no special options. These
// adapters will later be returned by GetAdapters. // adapters will later be returned by GetAdapters.
void DiscoverDefaultAdapters(); void DiscoverDefaultAdapters();
// Adds adapters that can be discovered with the options provided (like a getProcAddress). // Adds adapters that can be discovered with the options provided (like a getProcAddress).
// The backend is chosen based on the type of the options used. Returns true on success. // The backend is chosen based on the type of the options used. Returns true on success.
bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options); bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
// Returns all the adapters that the instance knows about. // Returns all the adapters that the instance knows about.
std::vector<Adapter> GetAdapters() const; std::vector<Adapter> GetAdapters() const;
const ToggleInfo* GetToggleInfo(const char* toggleName); const ToggleInfo* GetToggleInfo(const char* toggleName);
const FeatureInfo* GetFeatureInfo(WGPUFeatureName feature); const FeatureInfo* GetFeatureInfo(WGPUFeatureName feature);
// Enables backend validation layers // Enables backend validation layers
void EnableBackendValidation(bool enableBackendValidation); void EnableBackendValidation(bool enableBackendValidation);
void SetBackendValidationLevel(BackendValidationLevel validationLevel); void SetBackendValidationLevel(BackendValidationLevel validationLevel);
// Enable debug capture on Dawn startup // Enable debug capture on Dawn startup
void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup); void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
// TODO(dawn:1374) Deprecate this once it is passed via the descriptor. // TODO(dawn:1374) Deprecate this once it is passed via the descriptor.
void SetPlatform(dawn::platform::Platform* platform); void SetPlatform(dawn::platform::Platform* platform);
// Returns the underlying WGPUInstance object. // Returns the underlying WGPUInstance object.
WGPUInstance Get() const; WGPUInstance Get() const;
private: private:
InstanceBase* mImpl = nullptr; InstanceBase* mImpl = nullptr;
}; };
// Backend-agnostic API for dawn_native // Backend-agnostic API for dawn_native
DAWN_NATIVE_EXPORT const DawnProcTable& GetProcs(); DAWN_NATIVE_EXPORT const DawnProcTable& GetProcs();
// Query the names of all the toggles that are enabled in device // Query the names of all the toggles that are enabled in device
DAWN_NATIVE_EXPORT std::vector<const char*> GetTogglesUsed(WGPUDevice device); DAWN_NATIVE_EXPORT std::vector<const char*> GetTogglesUsed(WGPUDevice device);
// Backdoor to get the number of lazy clears for testing // Backdoor to get the number of lazy clears for testing
DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(WGPUDevice device); DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(WGPUDevice device);
// Backdoor to get the number of deprecation warnings for testing // Backdoor to get the number of deprecation warnings for testing
DAWN_NATIVE_EXPORT size_t GetDeprecationWarningCountForTesting(WGPUDevice device); DAWN_NATIVE_EXPORT size_t GetDeprecationWarningCountForTesting(WGPUDevice device);
// Query if texture has been initialized // Query if texture has been initialized
DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized( DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized(
WGPUTexture texture, WGPUTexture texture,
uint32_t baseMipLevel, uint32_t baseMipLevel,
uint32_t levelCount, uint32_t levelCount,
uint32_t baseArrayLayer, uint32_t baseArrayLayer,
uint32_t layerCount, uint32_t layerCount,
WGPUTextureAspect aspect = WGPUTextureAspect_All); WGPUTextureAspect aspect = WGPUTextureAspect_All);
// Backdoor to get the order of the ProcMap for testing // Backdoor to get the order of the ProcMap for testing
DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting(); DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device); DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device);
// ErrorInjector functions used for testing only. Defined in dawn_native/ErrorInjector.cpp // ErrorInjector functions used for testing only. Defined in dawn_native/ErrorInjector.cpp
DAWN_NATIVE_EXPORT void EnableErrorInjector(); DAWN_NATIVE_EXPORT void EnableErrorInjector();
DAWN_NATIVE_EXPORT void DisableErrorInjector(); DAWN_NATIVE_EXPORT void DisableErrorInjector();
DAWN_NATIVE_EXPORT void ClearErrorInjector(); DAWN_NATIVE_EXPORT void ClearErrorInjector();
DAWN_NATIVE_EXPORT uint64_t AcquireErrorInjectorCallCount(); DAWN_NATIVE_EXPORT uint64_t AcquireErrorInjectorCallCount();
DAWN_NATIVE_EXPORT void InjectErrorAt(uint64_t index); DAWN_NATIVE_EXPORT void InjectErrorAt(uint64_t index);
// The different types of external images // The different types of external images
enum ExternalImageType { enum ExternalImageType {
OpaqueFD, OpaqueFD,
DmaBuf, DmaBuf,
IOSurface, IOSurface,
DXGISharedHandle, DXGISharedHandle,
EGLImage, EGLImage,
}; };
// Common properties of external images // Common properties of external images
struct DAWN_NATIVE_EXPORT ExternalImageDescriptor { struct DAWN_NATIVE_EXPORT ExternalImageDescriptor {
public: public:
const WGPUTextureDescriptor* cTextureDescriptor; // Must match image creation params const WGPUTextureDescriptor* cTextureDescriptor; // Must match image creation params
bool isInitialized; // Whether the texture is initialized on import bool isInitialized; // Whether the texture is initialized on import
ExternalImageType GetType() const; ExternalImageType GetType() const;
protected: protected:
explicit ExternalImageDescriptor(ExternalImageType type); explicit ExternalImageDescriptor(ExternalImageType type);
private: private:
ExternalImageType mType; ExternalImageType mType;
}; };
struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptor { struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptor {
public: public:
bool isInitialized; // Whether the texture is initialized on import bool isInitialized; // Whether the texture is initialized on import
WGPUTextureUsageFlags usage; WGPUTextureUsageFlags usage;
}; };
struct DAWN_NATIVE_EXPORT ExternalImageExportInfo { struct DAWN_NATIVE_EXPORT ExternalImageExportInfo {
public: public:
bool isInitialized; // Whether the texture is initialized after export bool isInitialized; // Whether the texture is initialized after export
ExternalImageType GetType() const; ExternalImageType GetType() const;
protected: protected:
explicit ExternalImageExportInfo(ExternalImageType type); explicit ExternalImageExportInfo(ExternalImageType type);
private: private:
ExternalImageType mType; ExternalImageType mType;
}; };
DAWN_NATIVE_EXPORT const char* GetObjectLabelForTesting(void* objectHandle); DAWN_NATIVE_EXPORT const char* GetObjectLabelForTesting(void* objectHandle);
DAWN_NATIVE_EXPORT uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer); DAWN_NATIVE_EXPORT uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer);
DAWN_NATIVE_EXPORT bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a, DAWN_NATIVE_EXPORT bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a,
WGPUBindGroupLayout b); WGPUBindGroupLayout b);
} // namespace dawn::native } // namespace dawn::native

View File

@ -29,41 +29,41 @@ struct __IOSurface;
typedef __IOSurface* IOSurfaceRef; typedef __IOSurface* IOSurfaceRef;
#ifdef __OBJC__ #ifdef __OBJC__
# import <Metal/Metal.h> #import <Metal/Metal.h>
#endif // __OBJC__ #endif // __OBJC__
namespace dawn::native::metal { namespace dawn::native::metal {
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase { struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
AdapterDiscoveryOptions(); AdapterDiscoveryOptions();
}; };
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorIOSurface : ExternalImageDescriptor { struct DAWN_NATIVE_EXPORT ExternalImageDescriptorIOSurface : ExternalImageDescriptor {
public: public:
ExternalImageDescriptorIOSurface(); ExternalImageDescriptorIOSurface();
IOSurfaceRef ioSurface; IOSurfaceRef ioSurface;
// This has been deprecated. // This has been deprecated.
uint32_t plane; uint32_t plane;
}; };
DAWN_NATIVE_EXPORT WGPUTexture DAWN_NATIVE_EXPORT WGPUTexture WrapIOSurface(WGPUDevice device,
WrapIOSurface(WGPUDevice device, const ExternalImageDescriptorIOSurface* descriptor); const ExternalImageDescriptorIOSurface* descriptor);
// When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't // When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't
// mean that the operations will be visible to other APIs/Metal devices right away. macOS // mean that the operations will be visible to other APIs/Metal devices right away. macOS
// does have a global queue of graphics operations, but the command buffers are inserted there // does have a global queue of graphics operations, but the command buffers are inserted there
// when they are "scheduled". Submitting other operations before the command buffer is // when they are "scheduled". Submitting other operations before the command buffer is
// scheduled could lead to races in who gets scheduled first and incorrect rendering. // scheduled could lead to races in who gets scheduled first and incorrect rendering.
DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(WGPUDevice device); DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(WGPUDevice device);
} // namespace dawn::native::metal } // namespace dawn::native::metal
#ifdef __OBJC__ #ifdef __OBJC__
namespace dawn::native::metal { namespace dawn::native::metal {
DAWN_NATIVE_EXPORT id<MTLDevice> GetMetalDevice(WGPUDevice device); DAWN_NATIVE_EXPORT id<MTLDevice> GetMetalDevice(WGPUDevice device);
} // namespace dawn::native::metal } // namespace dawn::native::metal
#endif // __OBJC__ #endif // __OBJC__

View File

@ -19,7 +19,7 @@
#include "dawn/native/DawnNative.h" #include "dawn/native/DawnNative.h"
namespace dawn::native::null { namespace dawn::native::null {
DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(); DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl();
} // namespace dawn::native::null } // namespace dawn::native::null
#endif // INCLUDE_DAWN_NATIVE_NULLBACKEND_H_ #endif // INCLUDE_DAWN_NATIVE_NULLBACKEND_H_

View File

@ -22,33 +22,34 @@ typedef void* EGLImage;
namespace dawn::native::opengl { namespace dawn::native::opengl {
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase { struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
AdapterDiscoveryOptions(); AdapterDiscoveryOptions();
void* (*getProc)(const char*); void* (*getProc)(const char*);
}; };
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsES : public AdapterDiscoveryOptionsBase { struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsES : public AdapterDiscoveryOptionsBase {
AdapterDiscoveryOptionsES(); AdapterDiscoveryOptionsES();
void* (*getProc)(const char*); void* (*getProc)(const char*);
}; };
using PresentCallback = void (*)(void*); using PresentCallback = void (*)(void*);
DAWN_NATIVE_EXPORT DawnSwapChainImplementation DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
CreateNativeSwapChainImpl(WGPUDevice device, PresentCallback present, void* presentUserdata); PresentCallback present,
DAWN_NATIVE_EXPORT WGPUTextureFormat void* presentUserdata);
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain); DAWN_NATIVE_EXPORT WGPUTextureFormat
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorEGLImage : ExternalImageDescriptor { struct DAWN_NATIVE_EXPORT ExternalImageDescriptorEGLImage : ExternalImageDescriptor {
public: public:
ExternalImageDescriptorEGLImage(); ExternalImageDescriptorEGLImage();
::EGLImage image; ::EGLImage image;
}; };
DAWN_NATIVE_EXPORT WGPUTexture DAWN_NATIVE_EXPORT WGPUTexture
WrapExternalEGLImage(WGPUDevice device, const ExternalImageDescriptorEGLImage* descriptor); WrapExternalEGLImage(WGPUDevice device, const ExternalImageDescriptorEGLImage* descriptor);
} // namespace dawn::native::opengl } // namespace dawn::native::opengl

View File

@ -24,116 +24,116 @@
namespace dawn::native::vulkan { namespace dawn::native::vulkan {
DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device); DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName); DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
DAWN_NATIVE_EXPORT DawnSwapChainImplementation DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surface); ::VkSurfaceKHR surface);
DAWN_NATIVE_EXPORT WGPUTextureFormat DAWN_NATIVE_EXPORT WGPUTextureFormat
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain); GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase { struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
AdapterDiscoveryOptions(); AdapterDiscoveryOptions();
bool forceSwiftShader = false; bool forceSwiftShader = false;
}; };
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorVk : ExternalImageDescriptor { struct DAWN_NATIVE_EXPORT ExternalImageDescriptorVk : ExternalImageDescriptor {
public: public:
// The following members may be ignored if |ExternalImageDescriptor::isInitialized| is false // The following members may be ignored if |ExternalImageDescriptor::isInitialized| is false
// since the import does not need to preserve texture contents. // since the import does not need to preserve texture contents.
// See https://www.khronos.org/registry/vulkan/specs/1.1/html/chap7.html. The acquire // See https://www.khronos.org/registry/vulkan/specs/1.1/html/chap7.html. The acquire
// operation old/new layouts must match exactly the layouts in the release operation. So // operation old/new layouts must match exactly the layouts in the release operation. So
// we may need to issue two barriers releasedOldLayout -> releasedNewLayout -> // we may need to issue two barriers releasedOldLayout -> releasedNewLayout ->
// cTextureDescriptor.usage if the new layout is not compatible with the desired usage. // cTextureDescriptor.usage if the new layout is not compatible with the desired usage.
// The first barrier is the queue transfer, the second is the layout transition to our // The first barrier is the queue transfer, the second is the layout transition to our
// desired usage. // desired usage.
VkImageLayout releasedOldLayout = VK_IMAGE_LAYOUT_GENERAL; VkImageLayout releasedOldLayout = VK_IMAGE_LAYOUT_GENERAL;
VkImageLayout releasedNewLayout = VK_IMAGE_LAYOUT_GENERAL; VkImageLayout releasedNewLayout = VK_IMAGE_LAYOUT_GENERAL;
protected: protected:
using ExternalImageDescriptor::ExternalImageDescriptor; using ExternalImageDescriptor::ExternalImageDescriptor;
}; };
struct ExternalImageExportInfoVk : ExternalImageExportInfo { struct ExternalImageExportInfoVk : ExternalImageExportInfo {
public: public:
// See comments in |ExternalImageDescriptorVk| // See comments in |ExternalImageDescriptorVk|
// Contains the old/new layouts used in the queue release operation. // Contains the old/new layouts used in the queue release operation.
VkImageLayout releasedOldLayout; VkImageLayout releasedOldLayout;
VkImageLayout releasedNewLayout; VkImageLayout releasedNewLayout;
protected: protected:
using ExternalImageExportInfo::ExternalImageExportInfo; using ExternalImageExportInfo::ExternalImageExportInfo;
}; };
// Can't use DAWN_PLATFORM_LINUX since header included in both Dawn and Chrome // Can't use DAWN_PLATFORM_LINUX since header included in both Dawn and Chrome
#ifdef __linux__ #ifdef __linux__
// Common properties of external images represented by FDs. On successful import the file // Common properties of external images represented by FDs. On successful import the file
// descriptor's ownership is transferred to the Dawn implementation and they shouldn't be // descriptor's ownership is transferred to the Dawn implementation and they shouldn't be
// used outside of Dawn again. TODO(enga): Also transfer ownership in the error case so the // used outside of Dawn again. TODO(enga): Also transfer ownership in the error case so the
// caller can assume the FD is always consumed. // caller can assume the FD is always consumed.
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorFD : ExternalImageDescriptorVk { struct DAWN_NATIVE_EXPORT ExternalImageDescriptorFD : ExternalImageDescriptorVk {
public: public:
int memoryFD; // A file descriptor from an export of the memory of the image int memoryFD; // A file descriptor from an export of the memory of the image
std::vector<int> waitFDs; // File descriptors of semaphores which will be waited on std::vector<int> waitFDs; // File descriptors of semaphores which will be waited on
protected: protected:
using ExternalImageDescriptorVk::ExternalImageDescriptorVk; using ExternalImageDescriptorVk::ExternalImageDescriptorVk;
}; };
// Descriptor for opaque file descriptor image import // Descriptor for opaque file descriptor image import
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorOpaqueFD : ExternalImageDescriptorFD { struct DAWN_NATIVE_EXPORT ExternalImageDescriptorOpaqueFD : ExternalImageDescriptorFD {
ExternalImageDescriptorOpaqueFD(); ExternalImageDescriptorOpaqueFD();
VkDeviceSize allocationSize; // Must match VkMemoryAllocateInfo from image creation VkDeviceSize allocationSize; // Must match VkMemoryAllocateInfo from image creation
uint32_t memoryTypeIndex; // Must match VkMemoryAllocateInfo from image creation uint32_t memoryTypeIndex; // Must match VkMemoryAllocateInfo from image creation
}; };
// Descriptor for dma-buf file descriptor image import // Descriptor for dma-buf file descriptor image import
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDmaBuf : ExternalImageDescriptorFD { struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDmaBuf : ExternalImageDescriptorFD {
ExternalImageDescriptorDmaBuf(); ExternalImageDescriptorDmaBuf();
uint32_t stride; // Stride of the buffer in bytes uint32_t stride; // Stride of the buffer in bytes
uint64_t drmModifier; // DRM modifier of the buffer uint64_t drmModifier; // DRM modifier of the buffer
}; };
// Info struct that is written to in |ExportVulkanImage|. // Info struct that is written to in |ExportVulkanImage|.
struct DAWN_NATIVE_EXPORT ExternalImageExportInfoFD : ExternalImageExportInfoVk { struct DAWN_NATIVE_EXPORT ExternalImageExportInfoFD : ExternalImageExportInfoVk {
public: public:
// Contains the exported semaphore handles. // Contains the exported semaphore handles.
std::vector<int> semaphoreHandles; std::vector<int> semaphoreHandles;
protected: protected:
using ExternalImageExportInfoVk::ExternalImageExportInfoVk; using ExternalImageExportInfoVk::ExternalImageExportInfoVk;
}; };
struct DAWN_NATIVE_EXPORT ExternalImageExportInfoOpaqueFD : ExternalImageExportInfoFD { struct DAWN_NATIVE_EXPORT ExternalImageExportInfoOpaqueFD : ExternalImageExportInfoFD {
ExternalImageExportInfoOpaqueFD(); ExternalImageExportInfoOpaqueFD();
}; };
struct DAWN_NATIVE_EXPORT ExternalImageExportInfoDmaBuf : ExternalImageExportInfoFD { struct DAWN_NATIVE_EXPORT ExternalImageExportInfoDmaBuf : ExternalImageExportInfoFD {
ExternalImageExportInfoDmaBuf(); ExternalImageExportInfoDmaBuf();
}; };
#endif // __linux__ #endif // __linux__
// Imports external memory into a Vulkan image. Internally, this uses external memory / // Imports external memory into a Vulkan image. Internally, this uses external memory /
// semaphore extensions to import the image and wait on the provided synchronizaton // semaphore extensions to import the image and wait on the provided synchronizaton
// primitives before the texture can be used. // primitives before the texture can be used.
// On failure, returns a nullptr. // On failure, returns a nullptr.
DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice device, DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice device,
const ExternalImageDescriptorVk* descriptor); const ExternalImageDescriptorVk* descriptor);
// Exports external memory from a Vulkan image. This must be called on wrapped textures // Exports external memory from a Vulkan image. This must be called on wrapped textures
// before they are destroyed. It writes the semaphore to wait on and the old/new image // before they are destroyed. It writes the semaphore to wait on and the old/new image
// layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to // layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to
// perform a layout transition. // perform a layout transition.
DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture texture, DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture texture,
VkImageLayout desiredLayout, VkImageLayout desiredLayout,
ExternalImageExportInfoVk* info); ExternalImageExportInfoVk* info);
} // namespace dawn::native::vulkan } // namespace dawn::native::vulkan

View File

@ -16,21 +16,21 @@
#define INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_ #define INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_
#if defined(DAWN_NATIVE_SHARED_LIBRARY) #if defined(DAWN_NATIVE_SHARED_LIBRARY)
# if defined(_WIN32) #if defined(_WIN32)
# if defined(DAWN_NATIVE_IMPLEMENTATION) #if defined(DAWN_NATIVE_IMPLEMENTATION)
# define DAWN_NATIVE_EXPORT __declspec(dllexport) #define DAWN_NATIVE_EXPORT __declspec(dllexport)
# else #else
# define DAWN_NATIVE_EXPORT __declspec(dllimport) #define DAWN_NATIVE_EXPORT __declspec(dllimport)
# endif #endif
# else // defined(_WIN32) #else // defined(_WIN32)
# if defined(DAWN_NATIVE_IMPLEMENTATION) #if defined(DAWN_NATIVE_IMPLEMENTATION)
# define DAWN_NATIVE_EXPORT __attribute__((visibility("default"))) #define DAWN_NATIVE_EXPORT __attribute__((visibility("default")))
# else #else
# define DAWN_NATIVE_EXPORT #define DAWN_NATIVE_EXPORT
# endif #endif
# endif // defined(_WIN32) #endif // defined(_WIN32)
#else // defined(DAWN_NATIVE_SHARED_LIBRARY) #else // defined(DAWN_NATIVE_SHARED_LIBRARY)
# define DAWN_NATIVE_EXPORT #define DAWN_NATIVE_EXPORT
#endif // defined(DAWN_NATIVE_SHARED_LIBRARY) #endif // defined(DAWN_NATIVE_SHARED_LIBRARY)
#endif // INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_ #endif // INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_

View File

@ -24,91 +24,90 @@
namespace dawn::platform { namespace dawn::platform {
enum class TraceCategory { enum class TraceCategory {
General, // General trace events General, // General trace events
Validation, // Dawn validation Validation, // Dawn validation
Recording, // Native command recording Recording, // Native command recording
GPUWork, // Actual GPU work GPUWork, // Actual GPU work
}; };
class DAWN_PLATFORM_EXPORT CachingInterface { class DAWN_PLATFORM_EXPORT CachingInterface {
public: public:
CachingInterface(); CachingInterface();
virtual ~CachingInterface(); virtual ~CachingInterface();
// LoadData has two modes. The first mode is used to get a value which // LoadData has two modes. The first mode is used to get a value which
// corresponds to the |key|. The |valueOut| is a caller provided buffer // corresponds to the |key|. The |valueOut| is a caller provided buffer
// allocated to the size |valueSize| which is loaded with data of the // allocated to the size |valueSize| which is loaded with data of the
// size returned. The second mode is used to query for the existence of // size returned. The second mode is used to query for the existence of
// the |key| where |valueOut| is nullptr and |valueSize| must be 0. // the |key| where |valueOut| is nullptr and |valueSize| must be 0.
// The return size is non-zero if the |key| exists. // The return size is non-zero if the |key| exists.
virtual size_t LoadData(const WGPUDevice device, virtual size_t LoadData(const WGPUDevice device,
const void* key, const void* key,
size_t keySize, size_t keySize,
void* valueOut, void* valueOut,
size_t valueSize) = 0; size_t valueSize) = 0;
// StoreData puts a |value| in the cache which corresponds to the |key|. // StoreData puts a |value| in the cache which corresponds to the |key|.
virtual void StoreData(const WGPUDevice device, virtual void StoreData(const WGPUDevice device,
const void* key, const void* key,
size_t keySize, size_t keySize,
const void* value, const void* value,
size_t valueSize) = 0; size_t valueSize) = 0;
private: private:
CachingInterface(const CachingInterface&) = delete; CachingInterface(const CachingInterface&) = delete;
CachingInterface& operator=(const CachingInterface&) = delete; CachingInterface& operator=(const CachingInterface&) = delete;
}; };
class DAWN_PLATFORM_EXPORT WaitableEvent { class DAWN_PLATFORM_EXPORT WaitableEvent {
public: public:
WaitableEvent() = default; WaitableEvent() = default;
virtual ~WaitableEvent() = default; virtual ~WaitableEvent() = default;
virtual void Wait() = 0; // Wait for completion virtual void Wait() = 0; // Wait for completion
virtual bool IsComplete() = 0; // Non-blocking check if the event is complete virtual bool IsComplete() = 0; // Non-blocking check if the event is complete
}; };
using PostWorkerTaskCallback = void (*)(void* userdata); using PostWorkerTaskCallback = void (*)(void* userdata);
class DAWN_PLATFORM_EXPORT WorkerTaskPool { class DAWN_PLATFORM_EXPORT WorkerTaskPool {
public: public:
WorkerTaskPool() = default; WorkerTaskPool() = default;
virtual ~WorkerTaskPool() = default; virtual ~WorkerTaskPool() = default;
virtual std::unique_ptr<WaitableEvent> PostWorkerTask(PostWorkerTaskCallback, virtual std::unique_ptr<WaitableEvent> PostWorkerTask(PostWorkerTaskCallback,
void* userdata) = 0; void* userdata) = 0;
}; };
class DAWN_PLATFORM_EXPORT Platform { class DAWN_PLATFORM_EXPORT Platform {
public: public:
Platform(); Platform();
virtual ~Platform(); virtual ~Platform();
virtual const unsigned char* GetTraceCategoryEnabledFlag(TraceCategory category); virtual const unsigned char* GetTraceCategoryEnabledFlag(TraceCategory category);
virtual double MonotonicallyIncreasingTime(); virtual double MonotonicallyIncreasingTime();
virtual uint64_t AddTraceEvent(char phase, virtual uint64_t AddTraceEvent(char phase,
const unsigned char* categoryGroupEnabled, const unsigned char* categoryGroupEnabled,
const char* name, const char* name,
uint64_t id, uint64_t id,
double timestamp, double timestamp,
int numArgs, int numArgs,
const char** argNames, const char** argNames,
const unsigned char* argTypes, const unsigned char* argTypes,
const uint64_t* argValues, const uint64_t* argValues,
unsigned char flags); unsigned char flags);
// The |fingerprint| is provided by Dawn to inform the client to discard the Dawn caches // The |fingerprint| is provided by Dawn to inform the client to discard the Dawn caches
// when the fingerprint changes. The returned CachingInterface is expected to outlive the // when the fingerprint changes. The returned CachingInterface is expected to outlive the
// device which uses it to persistently cache objects. // device which uses it to persistently cache objects.
virtual CachingInterface* GetCachingInterface(const void* fingerprint, virtual CachingInterface* GetCachingInterface(const void* fingerprint, size_t fingerprintSize);
size_t fingerprintSize); virtual std::unique_ptr<WorkerTaskPool> CreateWorkerTaskPool();
virtual std::unique_ptr<WorkerTaskPool> CreateWorkerTaskPool();
private: private:
Platform(const Platform&) = delete; Platform(const Platform&) = delete;
Platform& operator=(const Platform&) = delete; Platform& operator=(const Platform&) = delete;
}; };
} // namespace dawn::platform } // namespace dawn::platform

View File

@ -16,21 +16,21 @@
#define INCLUDE_DAWN_PLATFORM_DAWN_PLATFORM_EXPORT_H_ #define INCLUDE_DAWN_PLATFORM_DAWN_PLATFORM_EXPORT_H_
#if defined(DAWN_PLATFORM_SHARED_LIBRARY) #if defined(DAWN_PLATFORM_SHARED_LIBRARY)
# if defined(_WIN32) #if defined(_WIN32)
# if defined(DAWN_PLATFORM_IMPLEMENTATION) #if defined(DAWN_PLATFORM_IMPLEMENTATION)
# define DAWN_PLATFORM_EXPORT __declspec(dllexport) #define DAWN_PLATFORM_EXPORT __declspec(dllexport)
# else #else
# define DAWN_PLATFORM_EXPORT __declspec(dllimport) #define DAWN_PLATFORM_EXPORT __declspec(dllimport)
# endif #endif
# else // defined(_WIN32) #else // defined(_WIN32)
# if defined(DAWN_PLATFORM_IMPLEMENTATION) #if defined(DAWN_PLATFORM_IMPLEMENTATION)
# define DAWN_PLATFORM_EXPORT __attribute__((visibility("default"))) #define DAWN_PLATFORM_EXPORT __attribute__((visibility("default")))
# else #else
# define DAWN_PLATFORM_EXPORT #define DAWN_PLATFORM_EXPORT
# endif #endif
# endif // defined(_WIN32) #endif // defined(_WIN32)
#else // defined(DAWN_PLATFORM_SHARED_LIBRARY) #else // defined(DAWN_PLATFORM_SHARED_LIBRARY)
# define DAWN_PLATFORM_EXPORT #define DAWN_PLATFORM_EXPORT
#endif // defined(DAWN_PLATFORM_SHARED_LIBRARY) #endif // defined(DAWN_PLATFORM_SHARED_LIBRARY)
#endif // INCLUDE_DAWN_PLATFORM_DAWN_PLATFORM_EXPORT_H_ #endif // INCLUDE_DAWN_PLATFORM_DAWN_PLATFORM_EXPORT_H_

View File

@ -23,53 +23,52 @@
namespace dawn::wire { namespace dawn::wire {
class DAWN_WIRE_EXPORT CommandSerializer { class DAWN_WIRE_EXPORT CommandSerializer {
public: public:
CommandSerializer(); CommandSerializer();
virtual ~CommandSerializer(); virtual ~CommandSerializer();
CommandSerializer(const CommandSerializer& rhs) = delete; CommandSerializer(const CommandSerializer& rhs) = delete;
CommandSerializer& operator=(const CommandSerializer& rhs) = delete; CommandSerializer& operator=(const CommandSerializer& rhs) = delete;
// Get space for serializing commands. // Get space for serializing commands.
// GetCmdSpace will never be called with a value larger than // GetCmdSpace will never be called with a value larger than
// what GetMaximumAllocationSize returns. Return nullptr to indicate // what GetMaximumAllocationSize returns. Return nullptr to indicate
// a fatal error. // a fatal error.
virtual void* GetCmdSpace(size_t size) = 0; virtual void* GetCmdSpace(size_t size) = 0;
virtual bool Flush() = 0; virtual bool Flush() = 0;
virtual size_t GetMaximumAllocationSize() const = 0; virtual size_t GetMaximumAllocationSize() const = 0;
virtual void OnSerializeError(); virtual void OnSerializeError();
}; };
class DAWN_WIRE_EXPORT CommandHandler { class DAWN_WIRE_EXPORT CommandHandler {
public: public:
CommandHandler(); CommandHandler();
virtual ~CommandHandler(); virtual ~CommandHandler();
CommandHandler(const CommandHandler& rhs) = delete; CommandHandler(const CommandHandler& rhs) = delete;
CommandHandler& operator=(const CommandHandler& rhs) = delete; CommandHandler& operator=(const CommandHandler& rhs) = delete;
virtual const volatile char* HandleCommands(const volatile char* commands, size_t size) = 0; virtual const volatile char* HandleCommands(const volatile char* commands, size_t size) = 0;
}; };
DAWN_WIRE_EXPORT size_t DAWN_WIRE_EXPORT size_t
SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties); SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties);
DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties( DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties(const WGPUDeviceProperties* deviceProperties,
const WGPUDeviceProperties* deviceProperties, char* serializeBuffer);
char* serializeBuffer);
DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties, DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
const volatile char* deserializeBuffer, const volatile char* deserializeBuffer,
size_t deserializeBufferSize); size_t deserializeBufferSize);
DAWN_WIRE_EXPORT size_t DAWN_WIRE_EXPORT size_t
SerializedWGPUSupportedLimitsSize(const WGPUSupportedLimits* supportedLimits); SerializedWGPUSupportedLimitsSize(const WGPUSupportedLimits* supportedLimits);
DAWN_WIRE_EXPORT void SerializeWGPUSupportedLimits(const WGPUSupportedLimits* supportedLimits, DAWN_WIRE_EXPORT void SerializeWGPUSupportedLimits(const WGPUSupportedLimits* supportedLimits,
char* serializeBuffer); char* serializeBuffer);
DAWN_WIRE_EXPORT bool DeserializeWGPUSupportedLimits(WGPUSupportedLimits* supportedLimits, DAWN_WIRE_EXPORT bool DeserializeWGPUSupportedLimits(WGPUSupportedLimits* supportedLimits,
const volatile char* deserializeBuffer, const volatile char* deserializeBuffer,
size_t deserializeBufferSize); size_t deserializeBufferSize);
} // namespace dawn::wire } // namespace dawn::wire

View File

@ -23,160 +23,158 @@
namespace dawn::wire { namespace dawn::wire {
namespace client { namespace client {
class Client; class Client;
class MemoryTransferService; class MemoryTransferService;
DAWN_WIRE_EXPORT const DawnProcTable& GetProcs(); DAWN_WIRE_EXPORT const DawnProcTable& GetProcs();
} // namespace client } // namespace client
struct ReservedTexture { struct ReservedTexture {
WGPUTexture texture; WGPUTexture texture;
uint32_t id; uint32_t id;
uint32_t generation; uint32_t generation;
uint32_t deviceId; uint32_t deviceId;
uint32_t deviceGeneration; uint32_t deviceGeneration;
}; };
struct ReservedSwapChain { struct ReservedSwapChain {
WGPUSwapChain swapchain; WGPUSwapChain swapchain;
uint32_t id; uint32_t id;
uint32_t generation; uint32_t generation;
uint32_t deviceId; uint32_t deviceId;
uint32_t deviceGeneration; uint32_t deviceGeneration;
}; };
struct ReservedDevice { struct ReservedDevice {
WGPUDevice device; WGPUDevice device;
uint32_t id; uint32_t id;
uint32_t generation; uint32_t generation;
}; };
struct ReservedInstance { struct ReservedInstance {
WGPUInstance instance; WGPUInstance instance;
uint32_t id; uint32_t id;
uint32_t generation; uint32_t generation;
}; };
struct DAWN_WIRE_EXPORT WireClientDescriptor { struct DAWN_WIRE_EXPORT WireClientDescriptor {
CommandSerializer* serializer; CommandSerializer* serializer;
client::MemoryTransferService* memoryTransferService = nullptr; client::MemoryTransferService* memoryTransferService = nullptr;
}; };
class DAWN_WIRE_EXPORT WireClient : public CommandHandler { class DAWN_WIRE_EXPORT WireClient : public CommandHandler {
public:
explicit WireClient(const WireClientDescriptor& descriptor);
~WireClient() override;
const volatile char* HandleCommands(const volatile char* commands, size_t size) final;
ReservedTexture ReserveTexture(WGPUDevice device);
ReservedSwapChain ReserveSwapChain(WGPUDevice device);
ReservedDevice ReserveDevice();
ReservedInstance ReserveInstance();
void ReclaimTextureReservation(const ReservedTexture& reservation);
void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
void ReclaimDeviceReservation(const ReservedDevice& reservation);
void ReclaimInstanceReservation(const ReservedInstance& reservation);
// Disconnects the client.
// Commands allocated after this point will not be sent.
void Disconnect();
private:
std::unique_ptr<client::Client> mImpl;
};
namespace client {
class DAWN_WIRE_EXPORT MemoryTransferService {
public:
MemoryTransferService();
virtual ~MemoryTransferService();
class ReadHandle;
class WriteHandle;
// Create a handle for reading server data.
// This may fail and return nullptr.
virtual ReadHandle* CreateReadHandle(size_t) = 0;
// Create a handle for writing server data.
// This may fail and return nullptr.
virtual WriteHandle* CreateWriteHandle(size_t) = 0;
class DAWN_WIRE_EXPORT ReadHandle {
public: public:
explicit WireClient(const WireClientDescriptor& descriptor); ReadHandle();
~WireClient() override; virtual ~ReadHandle();
const volatile char* HandleCommands(const volatile char* commands, size_t size) final; // Get the required serialization size for SerializeCreate
virtual size_t SerializeCreateSize() = 0;
ReservedTexture ReserveTexture(WGPUDevice device); // Serialize the handle into |serializePointer| so it can be received by the server.
ReservedSwapChain ReserveSwapChain(WGPUDevice device); virtual void SerializeCreate(void* serializePointer) = 0;
ReservedDevice ReserveDevice();
ReservedInstance ReserveInstance();
void ReclaimTextureReservation(const ReservedTexture& reservation); // Simply return the base address of the allocation (without applying any offset)
void ReclaimSwapChainReservation(const ReservedSwapChain& reservation); // Returns nullptr if the allocation failed.
void ReclaimDeviceReservation(const ReservedDevice& reservation); // The data must live at least until the ReadHandle is destructued
void ReclaimInstanceReservation(const ReservedInstance& reservation); virtual const void* GetData() = 0;
// Disconnects the client. // Gets called when a MapReadCallback resolves.
// Commands allocated after this point will not be sent. // deserialize the data update and apply
void Disconnect(); // it to the range (offset, offset + size) of allocation
// There could be nothing to be deserialized (if using shared memory)
// Needs to check potential offset/size OOB and overflow
virtual bool DeserializeDataUpdate(const void* deserializePointer,
size_t deserializeSize,
size_t offset,
size_t size) = 0;
private: private:
std::unique_ptr<client::Client> mImpl; ReadHandle(const ReadHandle&) = delete;
ReadHandle& operator=(const ReadHandle&) = delete;
}; };
namespace client { class DAWN_WIRE_EXPORT WriteHandle {
class DAWN_WIRE_EXPORT MemoryTransferService { public:
public: WriteHandle();
MemoryTransferService(); virtual ~WriteHandle();
virtual ~MemoryTransferService();
class ReadHandle; // Get the required serialization size for SerializeCreate
class WriteHandle; virtual size_t SerializeCreateSize() = 0;
// Create a handle for reading server data. // Serialize the handle into |serializePointer| so it can be received by the server.
// This may fail and return nullptr. virtual void SerializeCreate(void* serializePointer) = 0;
virtual ReadHandle* CreateReadHandle(size_t) = 0;
// Create a handle for writing server data. // Simply return the base address of the allocation (without applying any offset)
// This may fail and return nullptr. // The data returned should be zero-initialized.
virtual WriteHandle* CreateWriteHandle(size_t) = 0; // The data returned must live at least until the WriteHandle is destructed.
// On failure, the pointer returned should be null.
virtual void* GetData() = 0;
class DAWN_WIRE_EXPORT ReadHandle { // Get the required serialization size for SerializeDataUpdate
public: virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
ReadHandle();
virtual ~ReadHandle();
// Get the required serialization size for SerializeCreate // Serialize a command to send the modified contents of
virtual size_t SerializeCreateSize() = 0; // the subrange (offset, offset + size) of the allocation at buffer unmap
// This subrange is always the whole mapped region for now
// There could be nothing to be serialized (if using shared memory)
virtual void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) = 0;
// Serialize the handle into |serializePointer| so it can be received by the server. private:
virtual void SerializeCreate(void* serializePointer) = 0; WriteHandle(const WriteHandle&) = delete;
WriteHandle& operator=(const WriteHandle&) = delete;
};
// Simply return the base address of the allocation (without applying any offset) private:
// Returns nullptr if the allocation failed. MemoryTransferService(const MemoryTransferService&) = delete;
// The data must live at least until the ReadHandle is destructued MemoryTransferService& operator=(const MemoryTransferService&) = delete;
virtual const void* GetData() = 0; };
// Gets called when a MapReadCallback resolves. // Backdoor to get the order of the ProcMap for testing
// deserialize the data update and apply DAWN_WIRE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
// it to the range (offset, offset + size) of allocation } // namespace client
// There could be nothing to be deserialized (if using shared memory)
// Needs to check potential offset/size OOB and overflow
virtual bool DeserializeDataUpdate(const void* deserializePointer,
size_t deserializeSize,
size_t offset,
size_t size) = 0;
private:
ReadHandle(const ReadHandle&) = delete;
ReadHandle& operator=(const ReadHandle&) = delete;
};
class DAWN_WIRE_EXPORT WriteHandle {
public:
WriteHandle();
virtual ~WriteHandle();
// Get the required serialization size for SerializeCreate
virtual size_t SerializeCreateSize() = 0;
// Serialize the handle into |serializePointer| so it can be received by the server.
virtual void SerializeCreate(void* serializePointer) = 0;
// Simply return the base address of the allocation (without applying any offset)
// The data returned should be zero-initialized.
// The data returned must live at least until the WriteHandle is destructed.
// On failure, the pointer returned should be null.
virtual void* GetData() = 0;
// Get the required serialization size for SerializeDataUpdate
virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
// Serialize a command to send the modified contents of
// the subrange (offset, offset + size) of the allocation at buffer unmap
// This subrange is always the whole mapped region for now
// There could be nothing to be serialized (if using shared memory)
virtual void SerializeDataUpdate(void* serializePointer,
size_t offset,
size_t size) = 0;
private:
WriteHandle(const WriteHandle&) = delete;
WriteHandle& operator=(const WriteHandle&) = delete;
};
private:
MemoryTransferService(const MemoryTransferService&) = delete;
MemoryTransferService& operator=(const MemoryTransferService&) = delete;
};
// Backdoor to get the order of the ProcMap for testing
DAWN_WIRE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
} // namespace client
} // namespace dawn::wire } // namespace dawn::wire
#endif // INCLUDE_DAWN_WIRE_WIRECLIENT_H_ #endif // INCLUDE_DAWN_WIRE_WIRECLIENT_H_

View File

@ -23,126 +23,126 @@ struct DawnProcTable;
namespace dawn::wire { namespace dawn::wire {
namespace server { namespace server {
class Server; class Server;
class MemoryTransferService; class MemoryTransferService;
} // namespace server } // namespace server
struct DAWN_WIRE_EXPORT WireServerDescriptor { struct DAWN_WIRE_EXPORT WireServerDescriptor {
const DawnProcTable* procs; const DawnProcTable* procs;
CommandSerializer* serializer; CommandSerializer* serializer;
server::MemoryTransferService* memoryTransferService = nullptr; server::MemoryTransferService* memoryTransferService = nullptr;
}; };
class DAWN_WIRE_EXPORT WireServer : public CommandHandler { class DAWN_WIRE_EXPORT WireServer : public CommandHandler {
public:
explicit WireServer(const WireServerDescriptor& descriptor);
~WireServer() override;
const volatile char* HandleCommands(const volatile char* commands, size_t size) final;
bool InjectTexture(WGPUTexture texture,
uint32_t id,
uint32_t generation,
uint32_t deviceId,
uint32_t deviceGeneration);
bool InjectSwapChain(WGPUSwapChain swapchain,
uint32_t id,
uint32_t generation,
uint32_t deviceId,
uint32_t deviceGeneration);
bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
// Look up a device by (id, generation) pair. Returns nullptr if the generation
// has expired or the id is not found.
// The Wire does not have destroy hooks to allow an embedder to observe when an object
// has been destroyed, but in Chrome, we need to know the list of live devices so we
// can call device.Tick() on all of them periodically to ensure progress on asynchronous
// work is made. Getting this list can be done by tracking the (id, generation) of
// previously injected devices, and observing if GetDevice(id, generation) returns non-null.
WGPUDevice GetDevice(uint32_t id, uint32_t generation);
private:
std::unique_ptr<server::Server> mImpl;
};
namespace server {
class DAWN_WIRE_EXPORT MemoryTransferService {
public:
MemoryTransferService();
virtual ~MemoryTransferService();
class ReadHandle;
class WriteHandle;
// Deserialize data to create Read/Write handles. These handles are for the client
// to Read/Write data.
virtual bool DeserializeReadHandle(const void* deserializePointer,
size_t deserializeSize,
ReadHandle** readHandle) = 0;
virtual bool DeserializeWriteHandle(const void* deserializePointer,
size_t deserializeSize,
WriteHandle** writeHandle) = 0;
class DAWN_WIRE_EXPORT ReadHandle {
public: public:
explicit WireServer(const WireServerDescriptor& descriptor); ReadHandle();
~WireServer() override; virtual ~ReadHandle();
const volatile char* HandleCommands(const volatile char* commands, size_t size) final; // Return the size of the command serialized if
// SerializeDataUpdate is called with the same offset/size args
virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0;
bool InjectTexture(WGPUTexture texture, // Gets called when a MapReadCallback resolves.
uint32_t id, // Serialize the data update for the range (offset, offset + size) into
uint32_t generation, // |serializePointer| to the client There could be nothing to be serialized (if
uint32_t deviceId, // using shared memory)
uint32_t deviceGeneration); virtual void SerializeDataUpdate(const void* data,
bool InjectSwapChain(WGPUSwapChain swapchain, size_t offset,
uint32_t id, size_t size,
uint32_t generation, void* serializePointer) = 0;
uint32_t deviceId,
uint32_t deviceGeneration);
bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
// Look up a device by (id, generation) pair. Returns nullptr if the generation
// has expired or the id is not found.
// The Wire does not have destroy hooks to allow an embedder to observe when an object
// has been destroyed, but in Chrome, we need to know the list of live devices so we
// can call device.Tick() on all of them periodically to ensure progress on asynchronous
// work is made. Getting this list can be done by tracking the (id, generation) of
// previously injected devices, and observing if GetDevice(id, generation) returns non-null.
WGPUDevice GetDevice(uint32_t id, uint32_t generation);
private: private:
std::unique_ptr<server::Server> mImpl; ReadHandle(const ReadHandle&) = delete;
ReadHandle& operator=(const ReadHandle&) = delete;
}; };
namespace server { class DAWN_WIRE_EXPORT WriteHandle {
class DAWN_WIRE_EXPORT MemoryTransferService { public:
public: WriteHandle();
MemoryTransferService(); virtual ~WriteHandle();
virtual ~MemoryTransferService();
class ReadHandle; // Set the target for writes from the client. DeserializeFlush should copy data
class WriteHandle; // into the target.
void SetTarget(void* data);
// Set Staging data length for OOB check
void SetDataLength(size_t dataLength);
// Deserialize data to create Read/Write handles. These handles are for the client // This function takes in the serialized result of
// to Read/Write data. // client::MemoryTransferService::WriteHandle::SerializeDataUpdate.
virtual bool DeserializeReadHandle(const void* deserializePointer, // Needs to check potential offset/size OOB and overflow
size_t deserializeSize, virtual bool DeserializeDataUpdate(const void* deserializePointer,
ReadHandle** readHandle) = 0; size_t deserializeSize,
virtual bool DeserializeWriteHandle(const void* deserializePointer, size_t offset,
size_t deserializeSize, size_t size) = 0;
WriteHandle** writeHandle) = 0;
class DAWN_WIRE_EXPORT ReadHandle { protected:
public: void* mTargetData = nullptr;
ReadHandle(); size_t mDataLength = 0;
virtual ~ReadHandle();
// Return the size of the command serialized if private:
// SerializeDataUpdate is called with the same offset/size args WriteHandle(const WriteHandle&) = delete;
virtual size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) = 0; WriteHandle& operator=(const WriteHandle&) = delete;
};
// Gets called when a MapReadCallback resolves. private:
// Serialize the data update for the range (offset, offset + size) into MemoryTransferService(const MemoryTransferService&) = delete;
// |serializePointer| to the client There could be nothing to be serialized (if MemoryTransferService& operator=(const MemoryTransferService&) = delete;
// using shared memory) };
virtual void SerializeDataUpdate(const void* data, } // namespace server
size_t offset,
size_t size,
void* serializePointer) = 0;
private:
ReadHandle(const ReadHandle&) = delete;
ReadHandle& operator=(const ReadHandle&) = delete;
};
class DAWN_WIRE_EXPORT WriteHandle {
public:
WriteHandle();
virtual ~WriteHandle();
// Set the target for writes from the client. DeserializeFlush should copy data
// into the target.
void SetTarget(void* data);
// Set Staging data length for OOB check
void SetDataLength(size_t dataLength);
// This function takes in the serialized result of
// client::MemoryTransferService::WriteHandle::SerializeDataUpdate.
// Needs to check potential offset/size OOB and overflow
virtual bool DeserializeDataUpdate(const void* deserializePointer,
size_t deserializeSize,
size_t offset,
size_t size) = 0;
protected:
void* mTargetData = nullptr;
size_t mDataLength = 0;
private:
WriteHandle(const WriteHandle&) = delete;
WriteHandle& operator=(const WriteHandle&) = delete;
};
private:
MemoryTransferService(const MemoryTransferService&) = delete;
MemoryTransferService& operator=(const MemoryTransferService&) = delete;
};
} // namespace server
} // namespace dawn::wire } // namespace dawn::wire

View File

@ -16,21 +16,21 @@
#define INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_ #define INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_
#if defined(DAWN_WIRE_SHARED_LIBRARY) #if defined(DAWN_WIRE_SHARED_LIBRARY)
# if defined(_WIN32) #if defined(_WIN32)
# if defined(DAWN_WIRE_IMPLEMENTATION) #if defined(DAWN_WIRE_IMPLEMENTATION)
# define DAWN_WIRE_EXPORT __declspec(dllexport) #define DAWN_WIRE_EXPORT __declspec(dllexport)
# else #else
# define DAWN_WIRE_EXPORT __declspec(dllimport) #define DAWN_WIRE_EXPORT __declspec(dllimport)
# endif #endif
# else // defined(_WIN32) #else // defined(_WIN32)
# if defined(DAWN_WIRE_IMPLEMENTATION) #if defined(DAWN_WIRE_IMPLEMENTATION)
# define DAWN_WIRE_EXPORT __attribute__((visibility("default"))) #define DAWN_WIRE_EXPORT __attribute__((visibility("default")))
# else #else
# define DAWN_WIRE_EXPORT #define DAWN_WIRE_EXPORT
# endif #endif
# endif // defined(_WIN32) #endif // defined(_WIN32)
#else // defined(DAWN_WIRE_SHARED_LIBRARY) #else // defined(DAWN_WIRE_SHARED_LIBRARY)
# define DAWN_WIRE_EXPORT #define DAWN_WIRE_EXPORT
#endif // defined(DAWN_WIRE_SHARED_LIBRARY) #endif // defined(DAWN_WIRE_SHARED_LIBRARY)
#endif // INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_ #endif // INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_

View File

@ -1,2 +0,0 @@
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Chromium

View File

@ -1 +0,0 @@
filter=-runtime/indentation_namespace

View File

@ -32,32 +32,32 @@
// MSVC triggers a warning in /W4 for do {} while(0). SDL worked around this by using (0,0) and // MSVC triggers a warning in /W4 for do {} while(0). SDL worked around this by using (0,0) and
// points out that it looks like an owl face. // points out that it looks like an owl face.
#if defined(DAWN_COMPILER_MSVC) #if defined(DAWN_COMPILER_MSVC)
# define DAWN_ASSERT_LOOP_CONDITION (0, 0) #define DAWN_ASSERT_LOOP_CONDITION (0, 0)
#else #else
# define DAWN_ASSERT_LOOP_CONDITION (0) #define DAWN_ASSERT_LOOP_CONDITION (0)
#endif #endif
// DAWN_ASSERT_CALLSITE_HELPER generates the actual assert code. In Debug it does what you would // DAWN_ASSERT_CALLSITE_HELPER generates the actual assert code. In Debug it does what you would
// expect of an assert and in release it tries to give hints to make the compiler generate better // expect of an assert and in release it tries to give hints to make the compiler generate better
// code. // code.
#if defined(DAWN_ENABLE_ASSERTS) #if defined(DAWN_ENABLE_ASSERTS)
# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \ #define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
do { \ do { \
if (!(condition)) { \ if (!(condition)) { \
HandleAssertionFailure(file, func, line, #condition); \ HandleAssertionFailure(file, func, line, #condition); \
} \ } \
} while (DAWN_ASSERT_LOOP_CONDITION) } while (DAWN_ASSERT_LOOP_CONDITION)
#else #else
# if defined(DAWN_COMPILER_MSVC) #if defined(DAWN_COMPILER_MSVC)
# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition) #define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition)
# elif defined(DAWN_COMPILER_CLANG) && defined(__builtin_assume) #elif defined(DAWN_COMPILER_CLANG) && defined(__builtin_assume)
# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition) #define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition)
# else #else
# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \ #define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
do { \ do { \
DAWN_UNUSED(sizeof(condition)); \ DAWN_UNUSED(sizeof(condition)); \
} while (DAWN_ASSERT_LOOP_CONDITION) } while (DAWN_ASSERT_LOOP_CONDITION)
# endif #endif
#endif #endif
#define DAWN_ASSERT(condition) DAWN_ASSERT_CALLSITE_HELPER(__FILE__, __func__, __LINE__, condition) #define DAWN_ASSERT(condition) DAWN_ASSERT_CALLSITE_HELPER(__FILE__, __func__, __LINE__, condition)
@ -68,8 +68,8 @@
} while (DAWN_ASSERT_LOOP_CONDITION) } while (DAWN_ASSERT_LOOP_CONDITION)
#if !defined(DAWN_SKIP_ASSERT_SHORTHANDS) #if !defined(DAWN_SKIP_ASSERT_SHORTHANDS)
# define ASSERT DAWN_ASSERT #define ASSERT DAWN_ASSERT
# define UNREACHABLE DAWN_UNREACHABLE #define UNREACHABLE DAWN_UNREACHABLE
#endif #endif
void HandleAssertionFailure(const char* file, void HandleAssertionFailure(const char* file,

View File

@ -62,24 +62,18 @@ class BitSetIterator final {
uint32_t mOffset; uint32_t mOffset;
}; };
Iterator begin() const { Iterator begin() const { return Iterator(mBits); }
return Iterator(mBits); Iterator end() const { return Iterator(std::bitset<N>(0)); }
}
Iterator end() const {
return Iterator(std::bitset<N>(0));
}
private: private:
const std::bitset<N> mBits; const std::bitset<N> mBits;
}; };
template <size_t N, typename T> template <size_t N, typename T>
BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) { BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {}
}
template <size_t N, typename T> template <size_t N, typename T>
BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) { BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {}
}
template <size_t N, typename T> template <size_t N, typename T>
BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) { BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) {

View File

@ -29,50 +29,50 @@
// Clang and GCC, check for __clang__ too to catch clang-cl masquarading as MSVC // Clang and GCC, check for __clang__ too to catch clang-cl masquarading as MSVC
#if defined(__GNUC__) || defined(__clang__) #if defined(__GNUC__) || defined(__clang__)
# if defined(__clang__) #if defined(__clang__)
# define DAWN_COMPILER_CLANG #define DAWN_COMPILER_CLANG
# else #else
# define DAWN_COMPILER_GCC #define DAWN_COMPILER_GCC
# endif #endif
# if defined(__i386__) || defined(__x86_64__) #if defined(__i386__) || defined(__x86_64__)
# define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t") #define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t")
# else #else
// TODO(cwallez@chromium.org): Implement breakpoint on all supported architectures // TODO(cwallez@chromium.org): Implement breakpoint on all supported architectures
# define DAWN_BREAKPOINT() #define DAWN_BREAKPOINT()
# endif #endif
# define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable() #define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable()
# define DAWN_LIKELY(x) __builtin_expect(!!(x), 1) #define DAWN_LIKELY(x) __builtin_expect(!!(x), 1)
# define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0) #define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0)
# if !defined(__has_cpp_attribute) #if !defined(__has_cpp_attribute)
# define __has_cpp_attribute(name) 0 #define __has_cpp_attribute(name) 0
# endif #endif
# define DAWN_DECLARE_UNUSED __attribute__((unused)) #define DAWN_DECLARE_UNUSED __attribute__((unused))
# if defined(NDEBUG) #if defined(NDEBUG)
# define DAWN_FORCE_INLINE inline __attribute__((always_inline)) #define DAWN_FORCE_INLINE inline __attribute__((always_inline))
# endif #endif
# define DAWN_NOINLINE __attribute__((noinline)) #define DAWN_NOINLINE __attribute__((noinline))
// MSVC // MSVC
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
# define DAWN_COMPILER_MSVC #define DAWN_COMPILER_MSVC
extern void __cdecl __debugbreak(void); extern void __cdecl __debugbreak(void);
# define DAWN_BREAKPOINT() __debugbreak() #define DAWN_BREAKPOINT() __debugbreak()
# define DAWN_BUILTIN_UNREACHABLE() __assume(false) #define DAWN_BUILTIN_UNREACHABLE() __assume(false)
# define DAWN_DECLARE_UNUSED #define DAWN_DECLARE_UNUSED
# if defined(NDEBUG) #if defined(NDEBUG)
# define DAWN_FORCE_INLINE __forceinline #define DAWN_FORCE_INLINE __forceinline
# endif #endif
# define DAWN_NOINLINE __declspec(noinline) #define DAWN_NOINLINE __declspec(noinline)
#else #else
# error "Unsupported compiler" #error "Unsupported compiler"
#endif #endif
// It seems that (void) EXPR works on all compilers to silence the unused variable warning. // It seems that (void) EXPR works on all compilers to silence the unused variable warning.
@ -82,16 +82,16 @@ extern void __cdecl __debugbreak(void);
// Add noop replacements for macros for features that aren't supported by the compiler. // Add noop replacements for macros for features that aren't supported by the compiler.
#if !defined(DAWN_LIKELY) #if !defined(DAWN_LIKELY)
# define DAWN_LIKELY(X) X #define DAWN_LIKELY(X) X
#endif #endif
#if !defined(DAWN_UNLIKELY) #if !defined(DAWN_UNLIKELY)
# define DAWN_UNLIKELY(X) X #define DAWN_UNLIKELY(X) X
#endif #endif
#if !defined(DAWN_FORCE_INLINE) #if !defined(DAWN_FORCE_INLINE)
# define DAWN_FORCE_INLINE inline #define DAWN_FORCE_INLINE inline
#endif #endif
#if !defined(DAWN_NOINLINE) #if !defined(DAWN_NOINLINE)
# define DAWN_NOINLINE #define DAWN_NOINLINE
#endif #endif
#endif // SRC_DAWN_COMMON_COMPILER_H_ #endif // SRC_DAWN_COMMON_COMPILER_H_

View File

@ -22,12 +22,8 @@
template <typename T> template <typename T>
struct CoreFoundationRefTraits { struct CoreFoundationRefTraits {
static constexpr T kNullValue = nullptr; static constexpr T kNullValue = nullptr;
static void Reference(T value) { static void Reference(T value) { CFRetain(value); }
CFRetain(value); static void Release(T value) { CFRelease(value); }
}
static void Release(T value) {
CFRelease(value);
}
}; };
template <typename T> template <typename T>

View File

@ -19,14 +19,14 @@
#include "dawn/common/Platform.h" #include "dawn/common/Platform.h"
#if DAWN_PLATFORM_WINDOWS #if DAWN_PLATFORM_WINDOWS
# include "dawn/common/windows_with_undefs.h" #include "dawn/common/windows_with_undefs.h"
# if DAWN_PLATFORM_WINUWP #if DAWN_PLATFORM_WINUWP
# include "dawn/common/WindowsUtils.h" #include "dawn/common/WindowsUtils.h"
# endif #endif
#elif DAWN_PLATFORM_POSIX #elif DAWN_PLATFORM_POSIX
# include <dlfcn.h> #include <dlfcn.h>
#else #else
# error "Unsupported platform for DynamicLib" #error "Unsupported platform for DynamicLib"
#endif #endif
DynamicLib::~DynamicLib() { DynamicLib::~DynamicLib() {
@ -48,11 +48,11 @@ bool DynamicLib::Valid() const {
bool DynamicLib::Open(const std::string& filename, std::string* error) { bool DynamicLib::Open(const std::string& filename, std::string* error) {
#if DAWN_PLATFORM_WINDOWS #if DAWN_PLATFORM_WINDOWS
# if DAWN_PLATFORM_WINUWP #if DAWN_PLATFORM_WINUWP
mHandle = LoadPackagedLibrary(UTF8ToWStr(filename.c_str()).c_str(), 0); mHandle = LoadPackagedLibrary(UTF8ToWStr(filename.c_str()).c_str(), 0);
# else #else
mHandle = LoadLibraryA(filename.c_str()); mHandle = LoadLibraryA(filename.c_str());
# endif #endif
if (mHandle == nullptr && error != nullptr) { if (mHandle == nullptr && error != nullptr) {
*error = "Windows Error: " + std::to_string(GetLastError()); *error = "Windows Error: " + std::to_string(GetLastError());
} }
@ -63,7 +63,7 @@ bool DynamicLib::Open(const std::string& filename, std::string* error) {
*error = dlerror(); *error = dlerror();
} }
#else #else
# error "Unsupported platform for DynamicLib" #error "Unsupported platform for DynamicLib"
#endif #endif
return mHandle != nullptr; return mHandle != nullptr;
@ -79,7 +79,7 @@ void DynamicLib::Close() {
#elif DAWN_PLATFORM_POSIX #elif DAWN_PLATFORM_POSIX
dlclose(mHandle); dlclose(mHandle);
#else #else
# error "Unsupported platform for DynamicLib" #error "Unsupported platform for DynamicLib"
#endif #endif
mHandle = nullptr; mHandle = nullptr;
@ -101,7 +101,7 @@ void* DynamicLib::GetProc(const std::string& procName, std::string* error) const
*error = dlerror(); *error = dlerror();
} }
#else #else
# error "Unsupported platform for DynamicLib" #error "Unsupported platform for DynamicLib"
#endif #endif
return proc; return proc;

View File

@ -20,89 +20,89 @@
#include "dawn/common/Assert.h" #include "dawn/common/Assert.h"
namespace gpu_info { namespace gpu_info {
namespace { namespace {
// Intel // Intel
// Referenced from the following Mesa source code: // Referenced from the following Mesa source code:
// https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h // https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
// gen9 // gen9
const std::array<uint32_t, 25> Skylake = { const std::array<uint32_t, 25> Skylake = {{0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913,
{0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913, 0x1915, 0x1916, 0x1915, 0x1916, 0x1917, 0x191A, 0x191B, 0x191D, 0x191E,
0x1917, 0x191A, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, 0x1926, 0x1927, 0x1921, 0x1923, 0x1926, 0x1927, 0x192A, 0x192B, 0x192D,
0x192A, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D}}; 0x1932, 0x193A, 0x193B, 0x193D}};
// gen9p5 // gen9p5
const std::array<uint32_t, 20> Kabylake = { const std::array<uint32_t, 20> Kabylake = {{0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E,
{0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E, 0x591E, 0x5912, 0x5917, 0x591E, 0x5912, 0x5917, 0x5902, 0x591B, 0x593B, 0x590B,
0x5902, 0x591B, 0x593B, 0x590B, 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}}; 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
const std::array<uint32_t, 17> Coffeelake = { const std::array<uint32_t, 17> Coffeelake = {{0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91,
{0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B, 0x3E94,
0x3E94, 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}}; 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}}; const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
const std::array<uint32_t, 21> Cometlake = { const std::array<uint32_t, 21> Cometlake = {
{0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0, {0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}}; 0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}};
// According to Intel graphics driver version schema, build number is generated from the // According to Intel graphics driver version schema, build number is generated from the
// last two fields. // last two fields.
// See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for // See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for
// more details. // more details.
uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) { uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) {
return driverVersion[2] * 10000 + driverVersion[3]; return driverVersion[2] * 10000 + driverVersion[3];
} }
} // anonymous namespace } // anonymous namespace
bool IsAMD(PCIVendorID vendorId) { bool IsAMD(PCIVendorID vendorId) {
return vendorId == kVendorID_AMD; return vendorId == kVendorID_AMD;
} }
bool IsARM(PCIVendorID vendorId) { bool IsARM(PCIVendorID vendorId) {
return vendorId == kVendorID_ARM; return vendorId == kVendorID_ARM;
} }
bool IsImgTec(PCIVendorID vendorId) { bool IsImgTec(PCIVendorID vendorId) {
return vendorId == kVendorID_ImgTec; return vendorId == kVendorID_ImgTec;
} }
bool IsIntel(PCIVendorID vendorId) { bool IsIntel(PCIVendorID vendorId) {
return vendorId == kVendorID_Intel; return vendorId == kVendorID_Intel;
} }
bool IsMesa(PCIVendorID vendorId) { bool IsMesa(PCIVendorID vendorId) {
return vendorId == kVendorID_Mesa; return vendorId == kVendorID_Mesa;
} }
bool IsNvidia(PCIVendorID vendorId) { bool IsNvidia(PCIVendorID vendorId) {
return vendorId == kVendorID_Nvidia; return vendorId == kVendorID_Nvidia;
} }
bool IsQualcomm(PCIVendorID vendorId) { bool IsQualcomm(PCIVendorID vendorId) {
return vendorId == kVendorID_Qualcomm; return vendorId == kVendorID_Qualcomm;
} }
bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) { bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) {
return vendorId == kVendorID_Google && deviceId == kDeviceID_Swiftshader; return vendorId == kVendorID_Google && deviceId == kDeviceID_Swiftshader;
} }
bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) { bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP; return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
}
int CompareD3DDriverVersion(PCIVendorID vendorId,
const D3DDriverVersion& version1,
const D3DDriverVersion& version2) {
if (IsIntel(vendorId)) {
uint32_t buildNumber1 = GetIntelD3DDriverBuildNumber(version1);
uint32_t buildNumber2 = GetIntelD3DDriverBuildNumber(version2);
return buildNumber1 < buildNumber2 ? -1 : (buildNumber1 == buildNumber2 ? 0 : 1);
} }
int CompareD3DDriverVersion(PCIVendorID vendorId, // TODO(crbug.com/dawn/823): support other GPU vendors
const D3DDriverVersion& version1, UNREACHABLE();
const D3DDriverVersion& version2) { return 0;
if (IsIntel(vendorId)) { }
uint32_t buildNumber1 = GetIntelD3DDriverBuildNumber(version1);
uint32_t buildNumber2 = GetIntelD3DDriverBuildNumber(version2);
return buildNumber1 < buildNumber2 ? -1 : (buildNumber1 == buildNumber2 ? 0 : 1);
}
// TODO(crbug.com/dawn/823): support other GPU vendors // Intel GPUs
UNREACHABLE(); bool IsSkylake(PCIDeviceID deviceId) {
return 0; return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
} }
bool IsKabylake(PCIDeviceID deviceId) {
// Intel GPUs return std::find(Kabylake.cbegin(), Kabylake.cend(), deviceId) != Kabylake.cend();
bool IsSkylake(PCIDeviceID deviceId) { }
return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend(); bool IsCoffeelake(PCIDeviceID deviceId) {
} return (std::find(Coffeelake.cbegin(), Coffeelake.cend(), deviceId) != Coffeelake.cend()) ||
bool IsKabylake(PCIDeviceID deviceId) { (std::find(Whiskylake.cbegin(), Whiskylake.cend(), deviceId) != Whiskylake.cend()) ||
return std::find(Kabylake.cbegin(), Kabylake.cend(), deviceId) != Kabylake.cend(); (std::find(Cometlake.cbegin(), Cometlake.cend(), deviceId) != Cometlake.cend());
} }
bool IsCoffeelake(PCIDeviceID deviceId) {
return (std::find(Coffeelake.cbegin(), Coffeelake.cend(), deviceId) != Coffeelake.cend()) ||
(std::find(Whiskylake.cbegin(), Whiskylake.cend(), deviceId) != Whiskylake.cend()) ||
(std::find(Cometlake.cbegin(), Cometlake.cend(), deviceId) != Cometlake.cend());
}
} // namespace gpu_info } // namespace gpu_info

View File

@ -23,44 +23,44 @@ using PCIDeviceID = uint32_t;
namespace gpu_info { namespace gpu_info {
static constexpr PCIVendorID kVendorID_AMD = 0x1002; static constexpr PCIVendorID kVendorID_AMD = 0x1002;
static constexpr PCIVendorID kVendorID_ARM = 0x13B5; static constexpr PCIVendorID kVendorID_ARM = 0x13B5;
static constexpr PCIVendorID kVendorID_ImgTec = 0x1010; static constexpr PCIVendorID kVendorID_ImgTec = 0x1010;
static constexpr PCIVendorID kVendorID_Intel = 0x8086; static constexpr PCIVendorID kVendorID_Intel = 0x8086;
static constexpr PCIVendorID kVendorID_Mesa = 0x10005; static constexpr PCIVendorID kVendorID_Mesa = 0x10005;
static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE; static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE;
static constexpr PCIVendorID kVendorID_Qualcomm = 0x5143; static constexpr PCIVendorID kVendorID_Qualcomm = 0x5143;
static constexpr PCIVendorID kVendorID_Google = 0x1AE0; static constexpr PCIVendorID kVendorID_Google = 0x1AE0;
static constexpr PCIVendorID kVendorID_Microsoft = 0x1414; static constexpr PCIVendorID kVendorID_Microsoft = 0x1414;
static constexpr PCIDeviceID kDeviceID_Swiftshader = 0xC0DE; static constexpr PCIDeviceID kDeviceID_Swiftshader = 0xC0DE;
static constexpr PCIDeviceID kDeviceID_WARP = 0x8c; static constexpr PCIDeviceID kDeviceID_WARP = 0x8c;
bool IsAMD(PCIVendorID vendorId); bool IsAMD(PCIVendorID vendorId);
bool IsARM(PCIVendorID vendorId); bool IsARM(PCIVendorID vendorId);
bool IsImgTec(PCIVendorID vendorId); bool IsImgTec(PCIVendorID vendorId);
bool IsIntel(PCIVendorID vendorId); bool IsIntel(PCIVendorID vendorId);
bool IsMesa(PCIVendorID vendorId); bool IsMesa(PCIVendorID vendorId);
bool IsNvidia(PCIVendorID vendorId); bool IsNvidia(PCIVendorID vendorId);
bool IsQualcomm(PCIVendorID vendorId); bool IsQualcomm(PCIVendorID vendorId);
bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId); bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId);
bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId); bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId);
using D3DDriverVersion = std::array<uint16_t, 4>; using D3DDriverVersion = std::array<uint16_t, 4>;
// Do comparison between two driver versions. Currently we only support the comparison between // Do comparison between two driver versions. Currently we only support the comparison between
// Intel D3D driver versions. // Intel D3D driver versions.
// - Return -1 if build number of version1 is smaller // - Return -1 if build number of version1 is smaller
// - Return 1 if build number of version1 is bigger // - Return 1 if build number of version1 is bigger
// - Return 0 if version1 and version2 represent same driver version // - Return 0 if version1 and version2 represent same driver version
int CompareD3DDriverVersion(PCIVendorID vendorId, int CompareD3DDriverVersion(PCIVendorID vendorId,
const D3DDriverVersion& version1, const D3DDriverVersion& version1,
const D3DDriverVersion& version2); const D3DDriverVersion& version2);
// Intel architectures // Intel architectures
bool IsSkylake(PCIDeviceID deviceId); bool IsSkylake(PCIDeviceID deviceId);
bool IsKabylake(PCIDeviceID deviceId); bool IsKabylake(PCIDeviceID deviceId);
bool IsCoffeelake(PCIDeviceID deviceId); bool IsCoffeelake(PCIDeviceID deviceId);
} // namespace gpu_info } // namespace gpu_info
#endif // SRC_DAWN_COMMON_GPUINFO_H_ #endif // SRC_DAWN_COMMON_GPUINFO_H_

View File

@ -50,7 +50,7 @@ void HashCombine(size_t* hash, const T& value) {
#elif defined(DAWN_PLATFORM_32_BIT) #elif defined(DAWN_PLATFORM_32_BIT)
const size_t offset = 0x9e3779b9; const size_t offset = 0x9e3779b9;
#else #else
# error "Unsupported platform" #error "Unsupported platform"
#endif #endif
*hash ^= Hash(value) + offset + (*hash << 6) + (*hash >> 2); *hash ^= Hash(value) + offset + (*hash << 6) + (*hash >> 2);
} }
@ -89,13 +89,13 @@ size_t Hash(const std::bitset<N>& value) {
#endif #endif
namespace std { namespace std {
template <typename Index, size_t N> template <typename Index, size_t N>
struct hash<ityp::bitset<Index, N>> { struct hash<ityp::bitset<Index, N>> {
public: public:
size_t operator()(const ityp::bitset<Index, N>& value) const { size_t operator()(const ityp::bitset<Index, N>& value) const {
return Hash(static_cast<const std::bitset<N>&>(value)); return Hash(static_cast<const std::bitset<N>&>(value));
} }
}; };
} // namespace std } // namespace std
#endif // SRC_DAWN_COMMON_HASHUTILS_H_ #endif // SRC_DAWN_COMMON_HASHUTILS_H_

View File

@ -22,12 +22,8 @@
template <typename T> template <typename T>
struct IOKitRefTraits { struct IOKitRefTraits {
static constexpr T kNullValue = IO_OBJECT_NULL; static constexpr T kNullValue = IO_OBJECT_NULL;
static void Reference(T value) { static void Reference(T value) { IOObjectRetain(value); }
IOObjectRetain(value); static void Release(T value) { IOObjectRelease(value); }
}
static void Release(T value) {
IOObjectRelease(value);
}
}; };
template <typename T> template <typename T>

View File

@ -99,10 +99,8 @@ class LinkedList;
template <typename T> template <typename T>
class LinkNode { class LinkNode {
public: public:
LinkNode() : previous_(nullptr), next_(nullptr) { LinkNode() : previous_(nullptr), next_(nullptr) {}
} LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {}
LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {
}
LinkNode(LinkNode<T>&& rhs) { LinkNode(LinkNode<T>&& rhs) {
next_ = rhs.next_; next_ = rhs.next_;
@ -154,22 +152,14 @@ class LinkNode {
return true; return true;
} }
LinkNode<T>* previous() const { LinkNode<T>* previous() const { return previous_; }
return previous_;
}
LinkNode<T>* next() const { LinkNode<T>* next() const { return next_; }
return next_;
}
// Cast from the node-type to the value type. // Cast from the node-type to the value type.
const T* value() const { const T* value() const { return static_cast<const T*>(this); }
return static_cast<const T*>(this);
}
T* value() { T* value() { return static_cast<T*>(this); }
return static_cast<T*>(this);
}
private: private:
friend class LinkedList<T>; friend class LinkedList<T>;
@ -183,8 +173,7 @@ class LinkedList {
// The "root" node is self-referential, and forms the basis of a circular // The "root" node is self-referential, and forms the basis of a circular
// list (root_.next() will point back to the start of the list, // list (root_.next() will point back to the start of the list,
// and root_->previous() wraps around to the end of the list). // and root_->previous() wraps around to the end of the list).
LinkedList() : root_(&root_, &root_) { LinkedList() : root_(&root_, &root_) {}
}
~LinkedList() { ~LinkedList() {
// If any LinkNodes still exist in the LinkedList, there will be outstanding references to // If any LinkNodes still exist in the LinkedList, there will be outstanding references to
@ -194,9 +183,7 @@ class LinkedList {
} }
// Appends |e| to the end of the linked list. // Appends |e| to the end of the linked list.
void Append(LinkNode<T>* e) { void Append(LinkNode<T>* e) { e->InsertBefore(&root_); }
e->InsertBefore(&root_);
}
// Moves all elements (in order) of the list and appends them into |l| leaving the list empty. // Moves all elements (in order) of the list and appends them into |l| leaving the list empty.
void MoveInto(LinkedList<T>* l) { void MoveInto(LinkedList<T>* l) {
@ -212,21 +199,13 @@ class LinkedList {
root_.previous_ = &root_; root_.previous_ = &root_;
} }
LinkNode<T>* head() const { LinkNode<T>* head() const { return root_.next(); }
return root_.next();
}
LinkNode<T>* tail() const { LinkNode<T>* tail() const { return root_.previous(); }
return root_.previous();
}
const LinkNode<T>* end() const { const LinkNode<T>* end() const { return &root_; }
return &root_;
}
bool empty() const { bool empty() const { return head() == end(); }
return head() == end();
}
private: private:
LinkNode<T> root_; LinkNode<T> root_;
@ -235,8 +214,7 @@ class LinkedList {
template <typename T> template <typename T>
class LinkedListIterator { class LinkedListIterator {
public: public:
explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) { explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {}
}
// We keep an early reference to the next node in the list so that even if the current element // We keep an early reference to the next node in the list so that even if the current element
// is modified or removed from the list, we have a valid next node. // is modified or removed from the list, we have a valid next node.
@ -246,13 +224,9 @@ class LinkedListIterator {
return *this; return *this;
} }
bool operator!=(const LinkedListIterator<T>& other) const { bool operator!=(const LinkedListIterator<T>& other) const { return current_ != other.current_; }
return current_ != other.current_;
}
LinkNode<T>* operator*() const { LinkNode<T>* operator*() const { return current_; }
return current_;
}
private: private:
LinkNode<T>* current_; LinkNode<T>* current_;

View File

@ -21,97 +21,96 @@
#include "dawn/common/Platform.h" #include "dawn/common/Platform.h"
#if defined(DAWN_PLATFORM_ANDROID) #if defined(DAWN_PLATFORM_ANDROID)
# include <android/log.h> #include <android/log.h>
#endif #endif
namespace dawn { namespace dawn {
namespace { namespace {
const char* SeverityName(LogSeverity severity) { const char* SeverityName(LogSeverity severity) {
switch (severity) { switch (severity) {
case LogSeverity::Debug: case LogSeverity::Debug:
return "Debug"; return "Debug";
case LogSeverity::Info: case LogSeverity::Info:
return "Info"; return "Info";
case LogSeverity::Warning: case LogSeverity::Warning:
return "Warning"; return "Warning";
case LogSeverity::Error: case LogSeverity::Error:
return "Error"; return "Error";
default: default:
UNREACHABLE(); UNREACHABLE();
return ""; return "";
} }
} }
#if defined(DAWN_PLATFORM_ANDROID) #if defined(DAWN_PLATFORM_ANDROID)
android_LogPriority AndroidLogPriority(LogSeverity severity) { android_LogPriority AndroidLogPriority(LogSeverity severity) {
switch (severity) { switch (severity) {
case LogSeverity::Debug: case LogSeverity::Debug:
return ANDROID_LOG_INFO; return ANDROID_LOG_INFO;
case LogSeverity::Info: case LogSeverity::Info:
return ANDROID_LOG_INFO; return ANDROID_LOG_INFO;
case LogSeverity::Warning: case LogSeverity::Warning:
return ANDROID_LOG_WARN; return ANDROID_LOG_WARN;
case LogSeverity::Error: case LogSeverity::Error:
return ANDROID_LOG_ERROR; return ANDROID_LOG_ERROR;
default: default:
UNREACHABLE(); UNREACHABLE();
return ANDROID_LOG_ERROR; return ANDROID_LOG_ERROR;
} }
} }
#endif // defined(DAWN_PLATFORM_ANDROID) #endif // defined(DAWN_PLATFORM_ANDROID)
} // anonymous namespace } // anonymous namespace
LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) { LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {}
LogMessage::~LogMessage() {
std::string fullMessage = mStream.str();
// If this message has been moved, its stream is empty.
if (fullMessage.empty()) {
return;
} }
LogMessage::~LogMessage() { const char* severityName = SeverityName(mSeverity);
std::string fullMessage = mStream.str();
// If this message has been moved, its stream is empty.
if (fullMessage.empty()) {
return;
}
const char* severityName = SeverityName(mSeverity);
#if defined(DAWN_PLATFORM_ANDROID) #if defined(DAWN_PLATFORM_ANDROID)
android_LogPriority androidPriority = AndroidLogPriority(mSeverity); android_LogPriority androidPriority = AndroidLogPriority(mSeverity);
__android_log_print(androidPriority, "Dawn", "%s: %s\n", severityName, fullMessage.c_str()); __android_log_print(androidPriority, "Dawn", "%s: %s\n", severityName, fullMessage.c_str());
#else // defined(DAWN_PLATFORM_ANDROID) #else // defined(DAWN_PLATFORM_ANDROID)
FILE* outputStream = stdout; FILE* outputStream = stdout;
if (mSeverity == LogSeverity::Warning || mSeverity == LogSeverity::Error) { if (mSeverity == LogSeverity::Warning || mSeverity == LogSeverity::Error) {
outputStream = stderr; outputStream = stderr;
} }
// Note: we use fprintf because <iostream> includes static initializers. // Note: we use fprintf because <iostream> includes static initializers.
fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str()); fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str());
fflush(outputStream); fflush(outputStream);
#endif // defined(DAWN_PLATFORM_ANDROID) #endif // defined(DAWN_PLATFORM_ANDROID)
} }
LogMessage DebugLog() { LogMessage DebugLog() {
return LogMessage(LogSeverity::Debug); return LogMessage(LogSeverity::Debug);
} }
LogMessage InfoLog() { LogMessage InfoLog() {
return LogMessage(LogSeverity::Info); return LogMessage(LogSeverity::Info);
} }
LogMessage WarningLog() { LogMessage WarningLog() {
return LogMessage(LogSeverity::Warning); return LogMessage(LogSeverity::Warning);
} }
LogMessage ErrorLog() { LogMessage ErrorLog() {
return LogMessage(LogSeverity::Error); return LogMessage(LogSeverity::Error);
} }
LogMessage DebugLog(const char* file, const char* function, int line) { LogMessage DebugLog(const char* file, const char* function, int line) {
LogMessage message = DebugLog(); LogMessage message = DebugLog();
message << file << ":" << line << "(" << function << ")"; message << file << ":" << line << "(" << function << ")";
return message; return message;
} }
} // namespace dawn } // namespace dawn

View File

@ -47,47 +47,47 @@
namespace dawn { namespace dawn {
// Log levels mostly used to signal intent where the log message is produced and used to route // Log levels mostly used to signal intent where the log message is produced and used to route
// the message to the correct output. // the message to the correct output.
enum class LogSeverity { enum class LogSeverity {
Debug, Debug,
Info, Info,
Warning, Warning,
Error, Error,
}; };
// Essentially an ostringstream that will print itself in its destructor. // Essentially an ostringstream that will print itself in its destructor.
class LogMessage { class LogMessage {
public: public:
explicit LogMessage(LogSeverity severity); explicit LogMessage(LogSeverity severity);
~LogMessage(); ~LogMessage();
LogMessage(LogMessage&& other) = default; LogMessage(LogMessage&& other) = default;
LogMessage& operator=(LogMessage&& other) = default; LogMessage& operator=(LogMessage&& other) = default;
template <typename T> template <typename T>
LogMessage& operator<<(T&& value) { LogMessage& operator<<(T&& value) {
mStream << value; mStream << value;
return *this; return *this;
} }
private: private:
LogMessage(const LogMessage& other) = delete; LogMessage(const LogMessage& other) = delete;
LogMessage& operator=(const LogMessage& other) = delete; LogMessage& operator=(const LogMessage& other) = delete;
LogSeverity mSeverity; LogSeverity mSeverity;
std::ostringstream mStream; std::ostringstream mStream;
}; };
// Short-hands to create a LogMessage with the respective severity. // Short-hands to create a LogMessage with the respective severity.
LogMessage DebugLog(); LogMessage DebugLog();
LogMessage InfoLog(); LogMessage InfoLog();
LogMessage WarningLog(); LogMessage WarningLog();
LogMessage ErrorLog(); LogMessage ErrorLog();
// DAWN_DEBUG is a helper macro that creates a DebugLog and outputs file/line/function // DAWN_DEBUG is a helper macro that creates a DebugLog and outputs file/line/function
// information // information
LogMessage DebugLog(const char* file, const char* function, int line); LogMessage DebugLog(const char* file, const char* function, int line);
#define DAWN_DEBUG() ::dawn::DebugLog(__FILE__, __func__, __LINE__) #define DAWN_DEBUG() ::dawn::DebugLog(__FILE__, __func__, __LINE__)
} // namespace dawn } // namespace dawn

View File

@ -22,7 +22,7 @@
#include "dawn/common/Platform.h" #include "dawn/common/Platform.h"
#if defined(DAWN_COMPILER_MSVC) #if defined(DAWN_COMPILER_MSVC)
# include <intrin.h> #include <intrin.h>
#endif #endif
uint32_t ScanForward(uint32_t bits) { uint32_t ScanForward(uint32_t bits) {
@ -54,13 +54,13 @@ uint32_t Log2(uint32_t value) {
uint32_t Log2(uint64_t value) { uint32_t Log2(uint64_t value) {
ASSERT(value != 0); ASSERT(value != 0);
#if defined(DAWN_COMPILER_MSVC) #if defined(DAWN_COMPILER_MSVC)
# if defined(DAWN_PLATFORM_64_BIT) #if defined(DAWN_PLATFORM_64_BIT)
// NOLINTNEXTLINE(runtime/int) // NOLINTNEXTLINE(runtime/int)
unsigned long firstBitIndex = 0ul; unsigned long firstBitIndex = 0ul;
unsigned char ret = _BitScanReverse64(&firstBitIndex, value); unsigned char ret = _BitScanReverse64(&firstBitIndex, value);
ASSERT(ret != 0); ASSERT(ret != 0);
return firstBitIndex; return firstBitIndex;
# else // defined(DAWN_PLATFORM_64_BIT) #else // defined(DAWN_PLATFORM_64_BIT)
// NOLINTNEXTLINE(runtime/int) // NOLINTNEXTLINE(runtime/int)
unsigned long firstBitIndex = 0ul; unsigned long firstBitIndex = 0ul;
if (_BitScanReverse(&firstBitIndex, value >> 32)) { if (_BitScanReverse(&firstBitIndex, value >> 32)) {
@ -69,10 +69,10 @@ uint32_t Log2(uint64_t value) {
unsigned char ret = _BitScanReverse(&firstBitIndex, value & 0xFFFFFFFF); unsigned char ret = _BitScanReverse(&firstBitIndex, value & 0xFFFFFFFF);
ASSERT(ret != 0); ASSERT(ret != 0);
return firstBitIndex; return firstBitIndex;
# endif // defined(DAWN_PLATFORM_64_BIT) #endif // defined(DAWN_PLATFORM_64_BIT)
#else // defined(DAWN_COMPILER_MSVC) #else // defined(DAWN_COMPILER_MSVC)
return 63 - static_cast<uint32_t>(__builtin_clzll(value)); return 63 - static_cast<uint32_t>(__builtin_clzll(value));
#endif // defined(DAWN_COMPILER_MSVC) #endif // defined(DAWN_COMPILER_MSVC)
} }
uint64_t NextPowerOfTwo(uint64_t n) { uint64_t NextPowerOfTwo(uint64_t n) {

View File

@ -20,7 +20,7 @@
#import <Foundation/NSObject.h> #import <Foundation/NSObject.h>
#if !defined(__OBJC__) #if !defined(__OBJC__)
# error "NSRef can only be used in Objective C/C++ code." #error "NSRef can only be used in Objective C/C++ code."
#endif #endif
// This file contains smart pointers that automatically reference and release Objective C objects // This file contains smart pointers that automatically reference and release Objective C objects
@ -67,12 +67,8 @@
template <typename T> template <typename T>
struct NSRefTraits { struct NSRefTraits {
static constexpr T kNullValue = nullptr; static constexpr T kNullValue = nullptr;
static void Reference(T value) { static void Reference(T value) { [value retain]; }
[value retain]; static void Release(T value) { [value release]; }
}
static void Release(T value) {
[value release];
}
}; };
template <typename T> template <typename T>
@ -80,13 +76,9 @@ class NSRef : public RefBase<T*, NSRefTraits<T*>> {
public: public:
using RefBase<T*, NSRefTraits<T*>>::RefBase; using RefBase<T*, NSRefTraits<T*>>::RefBase;
const T* operator*() const { const T* operator*() const { return this->Get(); }
return this->Get();
}
T* operator*() { T* operator*() { return this->Get(); }
return this->Get();
}
}; };
template <typename T> template <typename T>
@ -104,13 +96,9 @@ class NSPRef : public RefBase<T, NSRefTraits<T>> {
public: public:
using RefBase<T, NSRefTraits<T>>::RefBase; using RefBase<T, NSRefTraits<T>>::RefBase;
const T operator*() const { const T operator*() const { return this->Get(); }
return this->Get();
}
T operator*() { T operator*() { return this->Get(); }
return this->Get();
}
}; };
template <typename T> template <typename T>

View File

@ -22,17 +22,17 @@
namespace detail { namespace detail {
template <typename T> template <typename T>
inline constexpr uint32_t u32_sizeof() { inline constexpr uint32_t u32_sizeof() {
static_assert(sizeof(T) <= std::numeric_limits<uint32_t>::max()); static_assert(sizeof(T) <= std::numeric_limits<uint32_t>::max());
return uint32_t(sizeof(T)); return uint32_t(sizeof(T));
} }
template <typename T> template <typename T>
inline constexpr uint32_t u32_alignof() { inline constexpr uint32_t u32_alignof() {
static_assert(alignof(T) <= std::numeric_limits<uint32_t>::max()); static_assert(alignof(T) <= std::numeric_limits<uint32_t>::max());
return uint32_t(alignof(T)); return uint32_t(alignof(T));
} }
} // namespace detail } // namespace detail

View File

@ -16,67 +16,67 @@
#define SRC_DAWN_COMMON_PLATFORM_H_ #define SRC_DAWN_COMMON_PLATFORM_H_
#if defined(_WIN32) || defined(_WIN64) #if defined(_WIN32) || defined(_WIN64)
# include <winapifamily.h> #include <winapifamily.h>
# define DAWN_PLATFORM_WINDOWS 1 #define DAWN_PLATFORM_WINDOWS 1
# if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP #if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
# define DAWN_PLATFORM_WIN32 1 #define DAWN_PLATFORM_WIN32 1
# elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP #elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP
# define DAWN_PLATFORM_WINUWP 1 #define DAWN_PLATFORM_WINUWP 1
# else #else
# error "Unsupported Windows platform." #error "Unsupported Windows platform."
# endif #endif
#elif defined(__linux__) #elif defined(__linux__)
# define DAWN_PLATFORM_LINUX 1 #define DAWN_PLATFORM_LINUX 1
# define DAWN_PLATFORM_POSIX 1 #define DAWN_PLATFORM_POSIX 1
# if defined(__ANDROID__) #if defined(__ANDROID__)
# define DAWN_PLATFORM_ANDROID 1 #define DAWN_PLATFORM_ANDROID 1
# endif #endif
#elif defined(__APPLE__) #elif defined(__APPLE__)
# define DAWN_PLATFORM_APPLE 1 #define DAWN_PLATFORM_APPLE 1
# define DAWN_PLATFORM_POSIX 1 #define DAWN_PLATFORM_POSIX 1
# include <TargetConditionals.h> #include <TargetConditionals.h>
# if TARGET_OS_IPHONE #if TARGET_OS_IPHONE
# define DAWN_PLATFORM_IOS #define DAWN_PLATFORM_IOS
# elif TARGET_OS_MAC #elif TARGET_OS_MAC
# define DAWN_PLATFORM_MACOS #define DAWN_PLATFORM_MACOS
# else #else
# error "Unsupported Apple platform." #error "Unsupported Apple platform."
# endif #endif
#elif defined(__Fuchsia__) #elif defined(__Fuchsia__)
# define DAWN_PLATFORM_FUCHSIA 1 #define DAWN_PLATFORM_FUCHSIA 1
# define DAWN_PLATFORM_POSIX 1 #define DAWN_PLATFORM_POSIX 1
#elif defined(__EMSCRIPTEN__) #elif defined(__EMSCRIPTEN__)
# define DAWN_PLATFORM_EMSCRIPTEN 1 #define DAWN_PLATFORM_EMSCRIPTEN 1
# define DAWN_PLATFORM_POSIX 1 #define DAWN_PLATFORM_POSIX 1
#else #else
# error "Unsupported platform." #error "Unsupported platform."
#endif #endif
// Distinguish mips32. // Distinguish mips32.
#if defined(__mips__) && (_MIPS_SIM == _ABIO32) && !defined(__mips32__) #if defined(__mips__) && (_MIPS_SIM == _ABIO32) && !defined(__mips32__)
# define __mips32__ #define __mips32__
#endif #endif
// Distinguish mips64. // Distinguish mips64.
#if defined(__mips__) && (_MIPS_SIM == _ABI64) && !defined(__mips64__) #if defined(__mips__) && (_MIPS_SIM == _ABI64) && !defined(__mips64__)
# define __mips64__ #define __mips64__
#endif #endif
#if defined(_WIN64) || defined(__aarch64__) || defined(__x86_64__) || defined(__mips64__) || \ #if defined(_WIN64) || defined(__aarch64__) || defined(__x86_64__) || defined(__mips64__) || \
defined(__s390x__) || defined(__PPC64__) defined(__s390x__) || defined(__PPC64__)
# define DAWN_PLATFORM_64_BIT 1 #define DAWN_PLATFORM_64_BIT 1
static_assert(sizeof(sizeof(char)) == 8, "Expect sizeof(size_t) == 8"); static_assert(sizeof(sizeof(char)) == 8, "Expect sizeof(size_t) == 8");
#elif defined(_WIN32) || defined(__arm__) || defined(__i386__) || defined(__mips32__) || \ #elif defined(_WIN32) || defined(__arm__) || defined(__i386__) || defined(__mips32__) || \
defined(__s390__) || defined(__EMSCRIPTEN__) defined(__s390__) || defined(__EMSCRIPTEN__)
# define DAWN_PLATFORM_32_BIT 1 #define DAWN_PLATFORM_32_BIT 1
static_assert(sizeof(sizeof(char)) == 4, "Expect sizeof(size_t) == 4"); static_assert(sizeof(sizeof(char)) == 4, "Expect sizeof(size_t) == 4");
#else #else
# error "Unsupported platform" #error "Unsupported platform"
#endif #endif
#endif // SRC_DAWN_COMMON_PLATFORM_H_ #endif // SRC_DAWN_COMMON_PLATFORM_H_

View File

@ -36,17 +36,13 @@ template <typename T, typename Traits>
class RefBase { class RefBase {
public: public:
// Default constructor and destructor. // Default constructor and destructor.
RefBase() : mValue(Traits::kNullValue) { RefBase() : mValue(Traits::kNullValue) {}
}
~RefBase() { ~RefBase() { Release(mValue); }
Release(mValue);
}
// Constructors from nullptr. // Constructors from nullptr.
// NOLINTNEXTLINE(runtime/explicit) // NOLINTNEXTLINE(runtime/explicit)
constexpr RefBase(std::nullptr_t) : RefBase() { constexpr RefBase(std::nullptr_t) : RefBase() {}
}
RefBase<T, Traits>& operator=(std::nullptr_t) { RefBase<T, Traits>& operator=(std::nullptr_t) {
Set(Traits::kNullValue); Set(Traits::kNullValue);
@ -55,9 +51,7 @@ class RefBase {
// Constructors from a value T. // Constructors from a value T.
// NOLINTNEXTLINE(runtime/explicit) // NOLINTNEXTLINE(runtime/explicit)
RefBase(T value) : mValue(value) { RefBase(T value) : mValue(value) { Reference(value); }
Reference(value);
}
RefBase<T, Traits>& operator=(const T& value) { RefBase<T, Traits>& operator=(const T& value) {
Set(value); Set(value);
@ -65,18 +59,14 @@ class RefBase {
} }
// Constructors from a RefBase<T> // Constructors from a RefBase<T>
RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) { RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) { Reference(other.mValue); }
Reference(other.mValue);
}
RefBase<T, Traits>& operator=(const RefBase<T, Traits>& other) { RefBase<T, Traits>& operator=(const RefBase<T, Traits>& other) {
Set(other.mValue); Set(other.mValue);
return *this; return *this;
} }
RefBase(RefBase<T, Traits>&& other) { RefBase(RefBase<T, Traits>&& other) { mValue = other.Detach(); }
mValue = other.Detach();
}
RefBase<T, Traits>& operator=(RefBase<T, Traits>&& other) { RefBase<T, Traits>& operator=(RefBase<T, Traits>&& other) {
if (&other != this) { if (&other != this) {
@ -113,28 +103,16 @@ class RefBase {
} }
// Comparison operators. // Comparison operators.
bool operator==(const T& other) const { bool operator==(const T& other) const { return mValue == other; }
return mValue == other;
}
bool operator!=(const T& other) const { bool operator!=(const T& other) const { return mValue != other; }
return mValue != other;
}
const T operator->() const { const T operator->() const { return mValue; }
return mValue; T operator->() { return mValue; }
}
T operator->() {
return mValue;
}
// Smart pointer methods. // Smart pointer methods.
const T& Get() const { const T& Get() const { return mValue; }
return mValue; T& Get() { return mValue; }
}
T& Get() {
return mValue;
}
[[nodiscard]] T Detach() { [[nodiscard]] T Detach() {
T value{std::move(mValue)}; T value{std::move(mValue)};

View File

@ -45,12 +45,8 @@ class RefCounted {
template <typename T> template <typename T>
struct RefCountedTraits { struct RefCountedTraits {
static constexpr T* kNullValue = nullptr; static constexpr T* kNullValue = nullptr;
static void Reference(T* value) { static void Reference(T* value) { value->Reference(); }
value->Reference(); static void Release(T* value) { value->Release(); }
}
static void Release(T* value) {
value->Release();
}
}; };
template <typename T> template <typename T>

View File

@ -17,14 +17,14 @@
// Implementation details of the tagged pointer Results // Implementation details of the tagged pointer Results
namespace detail { namespace detail {
intptr_t MakePayload(const void* pointer, PayloadType type) { intptr_t MakePayload(const void* pointer, PayloadType type) {
intptr_t payload = reinterpret_cast<intptr_t>(pointer); intptr_t payload = reinterpret_cast<intptr_t>(pointer);
ASSERT((payload & 3) == 0); ASSERT((payload & 3) == 0);
return payload | type; return payload | type;
} }
PayloadType GetPayloadType(intptr_t payload) { PayloadType GetPayloadType(intptr_t payload) {
return static_cast<PayloadType>(payload & 3); return static_cast<PayloadType>(payload & 3);
} }
} // namespace detail } // namespace detail

View File

@ -63,7 +63,7 @@ class [[nodiscard]] Result<void, E> {
Result(); Result();
Result(std::unique_ptr<E> error); Result(std::unique_ptr<E> error);
Result(Result<void, E> && other); Result(Result<void, E>&& other);
Result<void, E>& operator=(Result<void, E>&& other); Result<void, E>& operator=(Result<void, E>&& other);
~Result(); ~Result();
@ -89,23 +89,23 @@ constexpr size_t alignof_if_defined_else_default<T, Default, decltype(alignof(T)
// tagged pointer. The tag for Success is 0 so that returning the value is fastest. // tagged pointer. The tag for Success is 0 so that returning the value is fastest.
namespace detail { namespace detail {
// Utility functions to manipulate the tagged pointer. Some of them don't need to be templated // Utility functions to manipulate the tagged pointer. Some of them don't need to be templated
// but we really want them inlined so we keep them in the headers // but we really want them inlined so we keep them in the headers
enum PayloadType { enum PayloadType {
Success = 0, Success = 0,
Error = 1, Error = 1,
Empty = 2, Empty = 2,
}; };
intptr_t MakePayload(const void* pointer, PayloadType type); intptr_t MakePayload(const void* pointer, PayloadType type);
PayloadType GetPayloadType(intptr_t payload); PayloadType GetPayloadType(intptr_t payload);
template <typename T> template <typename T>
static T* GetSuccessFromPayload(intptr_t payload); static T* GetSuccessFromPayload(intptr_t payload);
template <typename E> template <typename E>
static E* GetErrorFromPayload(intptr_t payload); static E* GetErrorFromPayload(intptr_t payload);
constexpr static intptr_t kEmptyPayload = Empty; constexpr static intptr_t kEmptyPayload = Empty;
} // namespace detail } // namespace detail
template <typename T, typename E> template <typename T, typename E>
@ -116,12 +116,12 @@ class [[nodiscard]] Result<T*, E> {
static_assert(alignof_if_defined_else_default<E, 4> >= 4, static_assert(alignof_if_defined_else_default<E, 4> >= 4,
"Result<T*, E*> reserves two bits for tagging pointers"); "Result<T*, E*> reserves two bits for tagging pointers");
Result(T * success); Result(T* success);
Result(std::unique_ptr<E> error); Result(std::unique_ptr<E> error);
// Support returning a Result<T*, E*> from a Result<TChild*, E*> // Support returning a Result<T*, E*> from a Result<TChild*, E*>
template <typename TChild> template <typename TChild>
Result(Result<TChild*, E> && other); Result(Result<TChild*, E>&& other);
template <typename TChild> template <typename TChild>
Result<T*, E>& operator=(Result<TChild*, E>&& other); Result<T*, E>& operator=(Result<TChild*, E>&& other);
@ -151,7 +151,7 @@ class [[nodiscard]] Result<const T*, E> {
Result(const T* success); Result(const T* success);
Result(std::unique_ptr<E> error); Result(std::unique_ptr<E> error);
Result(Result<const T*, E> && other); Result(Result<const T*, E>&& other);
Result<const T*, E>& operator=(Result<const T*, E>&& other); Result<const T*, E>& operator=(Result<const T*, E>&& other);
~Result(); ~Result();
@ -178,13 +178,13 @@ class [[nodiscard]] Result<Ref<T>, E> {
"Result<Ref<T>, E> reserves two bits for tagging pointers"); "Result<Ref<T>, E> reserves two bits for tagging pointers");
template <typename U> template <typename U>
Result(Ref<U> && success); Result(Ref<U>&& success);
template <typename U> template <typename U>
Result(const Ref<U>& success); Result(const Ref<U>& success);
Result(std::unique_ptr<E> error); Result(std::unique_ptr<E> error);
template <typename U> template <typename U>
Result(Result<Ref<U>, E> && other); Result(Result<Ref<U>, E>&& other);
template <typename U> template <typename U>
Result<Ref<U>, E>& operator=(Result<Ref<U>, E>&& other); Result<Ref<U>, E>& operator=(Result<Ref<U>, E>&& other);
@ -209,10 +209,10 @@ class [[nodiscard]] Result<Ref<T>, E> {
template <typename T, typename E> template <typename T, typename E>
class [[nodiscard]] Result { class [[nodiscard]] Result {
public: public:
Result(T && success); Result(T&& success);
Result(std::unique_ptr<E> error); Result(std::unique_ptr<E> error);
Result(Result<T, E> && other); Result(Result<T, E>&& other);
Result<T, E>& operator=(Result<T, E>&& other); Result<T, E>& operator=(Result<T, E>&& other);
~Result(); ~Result();
@ -237,16 +237,13 @@ class [[nodiscard]] Result {
// Implementation of Result<void, E> // Implementation of Result<void, E>
template <typename E> template <typename E>
Result<void, E>::Result() { Result<void, E>::Result() {}
}
template <typename E> template <typename E>
Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) { Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {}
}
template <typename E> template <typename E>
Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) { Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {}
}
template <typename E> template <typename E>
Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) { Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) {
@ -271,8 +268,7 @@ bool Result<void, E>::IsSuccess() const {
} }
template <typename E> template <typename E>
void Result<void, E>::AcquireSuccess() { void Result<void, E>::AcquireSuccess() {}
}
template <typename E> template <typename E>
std::unique_ptr<E> Result<void, E>::AcquireError() { std::unique_ptr<E> Result<void, E>::AcquireError() {
@ -282,29 +278,27 @@ std::unique_ptr<E> Result<void, E>::AcquireError() {
// Implementation details of the tagged pointer Results // Implementation details of the tagged pointer Results
namespace detail { namespace detail {
template <typename T> template <typename T>
T* GetSuccessFromPayload(intptr_t payload) { T* GetSuccessFromPayload(intptr_t payload) {
ASSERT(GetPayloadType(payload) == Success); ASSERT(GetPayloadType(payload) == Success);
return reinterpret_cast<T*>(payload); return reinterpret_cast<T*>(payload);
} }
template <typename E> template <typename E>
E* GetErrorFromPayload(intptr_t payload) { E* GetErrorFromPayload(intptr_t payload) {
ASSERT(GetPayloadType(payload) == Error); ASSERT(GetPayloadType(payload) == Error);
return reinterpret_cast<E*>(payload ^ 1); return reinterpret_cast<E*>(payload ^ 1);
} }
} // namespace detail } // namespace detail
// Implementation of Result<T*, E> // Implementation of Result<T*, E>
template <typename T, typename E> template <typename T, typename E>
Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) { Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {}
}
template <typename T, typename E> template <typename T, typename E>
Result<T*, E>::Result(std::unique_ptr<E> error) Result<T*, E>::Result(std::unique_ptr<E> error)
: mPayload(detail::MakePayload(error.release(), detail::Error)) { : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
}
template <typename T, typename E> template <typename T, typename E>
template <typename TChild> template <typename TChild>
@ -355,13 +349,11 @@ std::unique_ptr<E> Result<T*, E>::AcquireError() {
// Implementation of Result<const T*, E*> // Implementation of Result<const T*, E*>
template <typename T, typename E> template <typename T, typename E>
Result<const T*, E>::Result(const T* success) Result<const T*, E>::Result(const T* success)
: mPayload(detail::MakePayload(success, detail::Success)) { : mPayload(detail::MakePayload(success, detail::Success)) {}
}
template <typename T, typename E> template <typename T, typename E>
Result<const T*, E>::Result(std::unique_ptr<E> error) Result<const T*, E>::Result(std::unique_ptr<E> error)
: mPayload(detail::MakePayload(error.release(), detail::Error)) { : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
}
template <typename T, typename E> template <typename T, typename E>
Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) { Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) {
@ -415,13 +407,11 @@ Result<Ref<T>, E>::Result(Ref<U>&& success)
template <typename T, typename E> template <typename T, typename E>
template <typename U> template <typename U>
Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) { Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {}
}
template <typename T, typename E> template <typename T, typename E>
Result<Ref<T>, E>::Result(std::unique_ptr<E> error) Result<Ref<T>, E>::Result(std::unique_ptr<E> error)
: mPayload(detail::MakePayload(error.release(), detail::Error)) { : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
}
template <typename T, typename E> template <typename T, typename E>
template <typename U> template <typename U>
@ -473,12 +463,10 @@ std::unique_ptr<E> Result<Ref<T>, E>::AcquireError() {
// Implementation of Result<T, E> // Implementation of Result<T, E>
template <typename T, typename E> template <typename T, typename E>
Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) { Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {}
}
template <typename T, typename E> template <typename T, typename E>
Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) { Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {}
}
template <typename T, typename E> template <typename T, typename E>
Result<T, E>::~Result() { Result<T, E>::~Result() {

View File

@ -193,8 +193,7 @@ typename SerialStorage<Derived>::StorageIterator SerialStorage<Derived>::FindUpT
template <typename Derived> template <typename Derived>
SerialStorage<Derived>::BeginEnd::BeginEnd(typename SerialStorage<Derived>::StorageIterator start, SerialStorage<Derived>::BeginEnd::BeginEnd(typename SerialStorage<Derived>::StorageIterator start,
typename SerialStorage<Derived>::StorageIterator end) typename SerialStorage<Derived>::StorageIterator end)
: mStartIt(start), mEndIt(end) { : mStartIt(start), mEndIt(end) {}
}
template <typename Derived> template <typename Derived>
typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::begin() const { typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::begin() const {
@ -210,8 +209,7 @@ typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::end(
template <typename Derived> template <typename Derived>
SerialStorage<Derived>::Iterator::Iterator(typename SerialStorage<Derived>::StorageIterator start) SerialStorage<Derived>::Iterator::Iterator(typename SerialStorage<Derived>::StorageIterator start)
: mStorageIterator(start), mSerialIterator(nullptr) { : mStorageIterator(start), mSerialIterator(nullptr) {}
}
template <typename Derived> template <typename Derived>
typename SerialStorage<Derived>::Iterator& SerialStorage<Derived>::Iterator::operator++() { typename SerialStorage<Derived>::Iterator& SerialStorage<Derived>::Iterator::operator++() {
@ -257,8 +255,7 @@ template <typename Derived>
SerialStorage<Derived>::ConstBeginEnd::ConstBeginEnd( SerialStorage<Derived>::ConstBeginEnd::ConstBeginEnd(
typename SerialStorage<Derived>::ConstStorageIterator start, typename SerialStorage<Derived>::ConstStorageIterator start,
typename SerialStorage<Derived>::ConstStorageIterator end) typename SerialStorage<Derived>::ConstStorageIterator end)
: mStartIt(start), mEndIt(end) { : mStartIt(start), mEndIt(end) {}
}
template <typename Derived> template <typename Derived>
typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::begin() typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::begin()
@ -276,8 +273,7 @@ typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBegi
template <typename Derived> template <typename Derived>
SerialStorage<Derived>::ConstIterator::ConstIterator( SerialStorage<Derived>::ConstIterator::ConstIterator(
typename SerialStorage<Derived>::ConstStorageIterator start) typename SerialStorage<Derived>::ConstStorageIterator start)
: mStorageIterator(start), mSerialIterator(nullptr) { : mStorageIterator(start), mSerialIterator(nullptr) {}
}
template <typename Derived> template <typename Derived>
typename SerialStorage<Derived>::ConstIterator& typename SerialStorage<Derived>::ConstIterator&

View File

@ -25,19 +25,16 @@
// IndexLinkNode // IndexLinkNode
SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex) SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
: index(index), nextIndex(nextIndex) { : index(index), nextIndex(nextIndex) {}
}
// Slab // Slab
SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head) SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head)
: allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) { : allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {}
}
SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default; SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default;
SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) { SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {}
}
SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default; SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default;
@ -83,8 +80,7 @@ SlabAllocatorImpl::SlabAllocatorImpl(SlabAllocatorImpl&& rhs)
mTotalAllocationSize(rhs.mTotalAllocationSize), mTotalAllocationSize(rhs.mTotalAllocationSize),
mAvailableSlabs(std::move(rhs.mAvailableSlabs)), mAvailableSlabs(std::move(rhs.mAvailableSlabs)),
mFullSlabs(std::move(rhs.mFullSlabs)), mFullSlabs(std::move(rhs.mFullSlabs)),
mRecycledSlabs(std::move(rhs.mRecycledSlabs)) { mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {}
}
SlabAllocatorImpl::~SlabAllocatorImpl() = default; SlabAllocatorImpl::~SlabAllocatorImpl() = default;

View File

@ -168,8 +168,7 @@ class SlabAllocator : public SlabAllocatorImpl {
SlabAllocator(size_t totalObjectBytes, SlabAllocator(size_t totalObjectBytes,
uint32_t objectSize = u32_sizeof<T>, uint32_t objectSize = u32_sizeof<T>,
uint32_t objectAlignment = u32_alignof<T>) uint32_t objectAlignment = u32_alignof<T>)
: SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) { : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {}
}
template <typename... Args> template <typename... Args>
T* Allocate(Args&&... args) { T* Allocate(Args&&... args) {
@ -177,9 +176,7 @@ class SlabAllocator : public SlabAllocatorImpl {
return new (ptr) T(std::forward<Args>(args)...); return new (ptr) T(std::forward<Args>(args)...);
} }
void Deallocate(T* object) { void Deallocate(T* object) { SlabAllocatorImpl::Deallocate(object); }
SlabAllocatorImpl::Deallocate(object);
}
}; };
#endif // SRC_DAWN_COMMON_SLABALLOCATOR_H_ #endif // SRC_DAWN_COMMON_SLABALLOCATOR_H_

View File

@ -41,16 +41,11 @@ class StackAllocator : public std::allocator<T> {
// maintaining this for as long as any containers using this allocator are // maintaining this for as long as any containers using this allocator are
// live. // live.
struct Source { struct Source {
Source() : used_stack_buffer_(false) { Source() : used_stack_buffer_(false) {}
}
// Casts the buffer in its right type. // Casts the buffer in its right type.
T* stack_buffer() { T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); }
return reinterpret_cast<T*>(stack_buffer_); const T* stack_buffer() const { return reinterpret_cast<const T*>(&stack_buffer_); }
}
const T* stack_buffer() const {
return reinterpret_cast<const T*>(&stack_buffer_);
}
// The buffer itself. It is not of type T because we don't want the // The buffer itself. It is not of type T because we don't want the
// constructors and destructors to be automatically called. Define a POD // constructors and destructors to be automatically called. Define a POD
@ -73,8 +68,7 @@ class StackAllocator : public std::allocator<T> {
// For the straight up copy c-tor, we can share storage. // For the straight up copy c-tor, we can share storage.
StackAllocator(const StackAllocator<T, stack_capacity>& rhs) StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
: std::allocator<T>(), source_(rhs.source_) { : std::allocator<T>(), source_(rhs.source_) {}
}
// ISO C++ requires the following constructor to be defined, // ISO C++ requires the following constructor to be defined,
// and std::vector in VC++2008SP1 Release fails with an error // and std::vector in VC++2008SP1 Release fails with an error
@ -84,18 +78,15 @@ class StackAllocator : public std::allocator<T> {
// no guarantee that the Source buffer of Ts is large enough // no guarantee that the Source buffer of Ts is large enough
// for Us. // for Us.
template <typename U, size_t other_capacity> template <typename U, size_t other_capacity>
StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) { StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {}
}
// This constructor must exist. It creates a default allocator that doesn't // This constructor must exist. It creates a default allocator that doesn't
// actually have a stack buffer. glibc's std::string() will compare the // actually have a stack buffer. glibc's std::string() will compare the
// current allocator against the default-constructed allocator, so this // current allocator against the default-constructed allocator, so this
// should be fast. // should be fast.
StackAllocator() : source_(nullptr) { StackAllocator() : source_(nullptr) {}
}
explicit StackAllocator(Source* source) : source_(source) { explicit StackAllocator(Source* source) : source_(source) {}
}
// Actually do the allocation. Use the stack buffer if nobody has used it yet // Actually do the allocation. Use the stack buffer if nobody has used it yet
// and the size requested fits. Otherwise, fall through to the standard // and the size requested fits. Otherwise, fall through to the standard
@ -154,28 +145,18 @@ class StackContainer {
// shorter lifetimes than the source. The copy will share the same allocator // shorter lifetimes than the source. The copy will share the same allocator
// and therefore the same stack buffer as the original. Use std::copy to // and therefore the same stack buffer as the original. Use std::copy to
// copy into a "real" container for longer-lived objects. // copy into a "real" container for longer-lived objects.
ContainerType& container() { ContainerType& container() { return container_; }
return container_; const ContainerType& container() const { return container_; }
}
const ContainerType& container() const {
return container_;
}
// Support operator-> to get to the container. This allows nicer syntax like: // Support operator-> to get to the container. This allows nicer syntax like:
// StackContainer<...> foo; // StackContainer<...> foo;
// std::sort(foo->begin(), foo->end()); // std::sort(foo->begin(), foo->end());
ContainerType* operator->() { ContainerType* operator->() { return &container_; }
return &container_; const ContainerType* operator->() const { return &container_; }
}
const ContainerType* operator->() const {
return &container_;
}
// Retrieves the stack source so that that unit tests can verify that the // Retrieves the stack source so that that unit tests can verify that the
// buffer is being used properly. // buffer is being used properly.
const typename Allocator::Source& stack_data() const { const typename Allocator::Source& stack_data() const { return stack_data_; }
return stack_data_;
}
protected: protected:
typename Allocator::Source stack_data_; typename Allocator::Source stack_data_;
@ -225,8 +206,7 @@ class StackVector
: public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> { : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> {
public: public:
StackVector() StackVector()
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() { : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {}
}
// We need to put this in STL containers sometimes, which requires a copy // We need to put this in STL containers sometimes, which requires a copy
// constructor. We can't call the regular copy constructor because that will // constructor. We can't call the regular copy constructor because that will
@ -244,12 +224,8 @@ class StackVector
// Vectors are commonly indexed, which isn't very convenient even with // Vectors are commonly indexed, which isn't very convenient even with
// operator-> (using "->at()" does exception stuff we don't want). // operator-> (using "->at()" does exception stuff we don't want).
T& operator[](size_t i) { T& operator[](size_t i) { return this->container().operator[](i); }
return this->container().operator[](i); const T& operator[](size_t i) const { return this->container().operator[](i); }
}
const T& operator[](size_t i) const {
return this->container().operator[](i);
}
private: private:
// StackVector(const StackVector& rhs) = delete; // StackVector(const StackVector& rhs) = delete;

View File

@ -18,17 +18,17 @@
#include "dawn/common/Log.h" #include "dawn/common/Log.h"
#if defined(DAWN_PLATFORM_WINDOWS) #if defined(DAWN_PLATFORM_WINDOWS)
# include <Windows.h> #include <Windows.h>
# include <vector> #include <vector>
#elif defined(DAWN_PLATFORM_LINUX) #elif defined(DAWN_PLATFORM_LINUX)
# include <dlfcn.h> #include <dlfcn.h>
# include <limits.h> #include <limits.h>
# include <unistd.h> #include <unistd.h>
# include <cstdlib> #include <cstdlib>
#elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS) #elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
# include <dlfcn.h> #include <dlfcn.h>
# include <mach-o/dyld.h> #include <mach-o/dyld.h>
# include <vector> #include <vector>
#endif #endif
#include <array> #include <array>
@ -84,7 +84,7 @@ bool SetEnvironmentVar(const char* variableName, const char* value) {
return setenv(variableName, value, 1) == 0; return setenv(variableName, value, 1) == 0;
} }
#else #else
# error "Implement Get/SetEnvironmentVar for your platform." #error "Implement Get/SetEnvironmentVar for your platform."
#endif #endif
#if defined(DAWN_PLATFORM_WINDOWS) #if defined(DAWN_PLATFORM_WINDOWS)
@ -134,7 +134,7 @@ std::optional<std::string> GetExecutablePath() {
return {}; return {};
} }
#else #else
# error "Implement GetExecutablePath for your platform." #error "Implement GetExecutablePath for your platform."
#endif #endif
std::optional<std::string> GetExecutableDirectory() { std::optional<std::string> GetExecutableDirectory() {
@ -168,15 +168,15 @@ std::optional<std::string> GetModulePath() {
static int placeholderSymbol = 0; static int placeholderSymbol = 0;
HMODULE module = nullptr; HMODULE module = nullptr;
// GetModuleHandleEx is unavailable on UWP // GetModuleHandleEx is unavailable on UWP
# if defined(DAWN_IS_WINUWP) #if defined(DAWN_IS_WINUWP)
return {}; return {};
# else #else
if (!GetModuleHandleExA( if (!GetModuleHandleExA(
GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
reinterpret_cast<LPCSTR>(&placeholderSymbol), &module)) { reinterpret_cast<LPCSTR>(&placeholderSymbol), &module)) {
return {}; return {};
} }
# endif #endif
return GetHModulePath(module); return GetHModulePath(module);
} }
#elif defined(DAWN_PLATFORM_FUCHSIA) #elif defined(DAWN_PLATFORM_FUCHSIA)
@ -188,7 +188,7 @@ std::optional<std::string> GetModulePath() {
return {}; return {};
} }
#else #else
# error "Implement GetModulePath for your platform." #error "Implement GetModulePath for your platform."
#endif #endif
std::optional<std::string> GetModuleDirectory() { std::optional<std::string> GetModuleDirectory() {
@ -208,8 +208,7 @@ std::optional<std::string> GetModuleDirectory() {
ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value) ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value)
: mName(variableName), : mName(variableName),
mOriginalValue(GetEnvironmentVar(variableName)), mOriginalValue(GetEnvironmentVar(variableName)),
mIsSet(SetEnvironmentVar(variableName, value)) { mIsSet(SetEnvironmentVar(variableName, value)) {}
}
ScopedEnvironmentVar::~ScopedEnvironmentVar() { ScopedEnvironmentVar::~ScopedEnvironmentVar() {
if (mIsSet) { if (mIsSet) {

View File

@ -50,8 +50,8 @@
// uint32_t aValue = static_cast<uint32_t>(a); // uint32_t aValue = static_cast<uint32_t>(a);
// //
namespace detail { namespace detail {
template <typename Tag, typename T> template <typename Tag, typename T>
class TypedIntegerImpl; class TypedIntegerImpl;
} // namespace detail } // namespace detail
template <typename Tag, typename T, typename = std::enable_if_t<std::is_integral<T>::value>> template <typename Tag, typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
@ -62,200 +62,198 @@ using TypedInteger = T;
#endif #endif
namespace detail { namespace detail {
template <typename Tag, typename T> template <typename Tag, typename T>
class alignas(T) TypedIntegerImpl { class alignas(T) TypedIntegerImpl {
static_assert(std::is_integral<T>::value, "TypedInteger must be integral"); static_assert(std::is_integral<T>::value, "TypedInteger must be integral");
T mValue; T mValue;
public: public:
constexpr TypedIntegerImpl() : mValue(0) { constexpr TypedIntegerImpl() : mValue(0) {
static_assert(alignof(TypedIntegerImpl) == alignof(T)); static_assert(alignof(TypedIntegerImpl) == alignof(T));
static_assert(sizeof(TypedIntegerImpl) == sizeof(T)); static_assert(sizeof(TypedIntegerImpl) == sizeof(T));
} }
// Construction from non-narrowing integral types. // Construction from non-narrowing integral types.
template <typename I, template <typename I,
typename = std::enable_if_t< typename =
std::is_integral<I>::value && std::enable_if_t<std::is_integral<I>::value &&
std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() && std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>> std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) { explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {}
}
// Allow explicit casts only to the underlying type. If you're casting out of an // Allow explicit casts only to the underlying type. If you're casting out of an
// TypedInteger, you should know what what you're doing, and exactly what type you // TypedInteger, you should know what what you're doing, and exactly what type you
// expect. // expect.
explicit constexpr operator T() const { explicit constexpr operator T() const { return static_cast<T>(this->mValue); }
return static_cast<T>(this->mValue);
}
// Same-tag TypedInteger comparison operators // Same-tag TypedInteger comparison operators
#define TYPED_COMPARISON(op) \ #define TYPED_COMPARISON(op) \
constexpr bool operator op(const TypedIntegerImpl& rhs) const { \ constexpr bool operator op(const TypedIntegerImpl& rhs) const { return mValue op rhs.mValue; }
return mValue op rhs.mValue; \ TYPED_COMPARISON(<)
} TYPED_COMPARISON(<=)
TYPED_COMPARISON(<) TYPED_COMPARISON(>)
TYPED_COMPARISON(<=) TYPED_COMPARISON(>=)
TYPED_COMPARISON(>) TYPED_COMPARISON(==)
TYPED_COMPARISON(>=) TYPED_COMPARISON(!=)
TYPED_COMPARISON(==)
TYPED_COMPARISON(!=)
#undef TYPED_COMPARISON #undef TYPED_COMPARISON
// Increment / decrement operators for for-loop iteration // Increment / decrement operators for for-loop iteration
constexpr TypedIntegerImpl& operator++() { constexpr TypedIntegerImpl& operator++() {
ASSERT(this->mValue < std::numeric_limits<T>::max()); ASSERT(this->mValue < std::numeric_limits<T>::max());
++this->mValue; ++this->mValue;
return *this; return *this;
}
constexpr TypedIntegerImpl operator++(int) {
TypedIntegerImpl ret = *this;
ASSERT(this->mValue < std::numeric_limits<T>::max());
++this->mValue;
return ret;
}
constexpr TypedIntegerImpl& operator--() {
ASSERT(this->mValue > std::numeric_limits<T>::min());
--this->mValue;
return *this;
}
constexpr TypedIntegerImpl operator--(int) {
TypedIntegerImpl ret = *this;
ASSERT(this->mValue > std::numeric_limits<T>::min());
--this->mValue;
return ret;
}
template <typename T2 = T>
static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))> AddImpl(
TypedIntegerImpl<Tag, T> lhs,
TypedIntegerImpl<Tag, T2> rhs) {
static_assert(std::is_same<T, T2>::value);
// Overflow would wrap around
ASSERT(lhs.mValue + rhs.mValue >= lhs.mValue);
return lhs.mValue + rhs.mValue;
}
template <typename T2 = T>
static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))> AddImpl(
TypedIntegerImpl<Tag, T> lhs,
TypedIntegerImpl<Tag, T2> rhs) {
static_assert(std::is_same<T, T2>::value);
if (lhs.mValue > 0) {
// rhs is positive: |rhs| is at most the distance between max and |lhs|.
// rhs is negative: (positive + negative) won't overflow
ASSERT(rhs.mValue <= std::numeric_limits<T>::max() - lhs.mValue);
} else {
// rhs is postive: (negative + positive) won't underflow
// rhs is negative: |rhs| isn't less than the (negative) distance between min
// and |lhs|
ASSERT(rhs.mValue >= std::numeric_limits<T>::min() - lhs.mValue);
} }
return lhs.mValue + rhs.mValue;
}
constexpr TypedIntegerImpl operator++(int) { template <typename T2 = T>
TypedIntegerImpl ret = *this; static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))> SubImpl(
TypedIntegerImpl<Tag, T> lhs,
TypedIntegerImpl<Tag, T2> rhs) {
static_assert(std::is_same<T, T2>::value);
ASSERT(this->mValue < std::numeric_limits<T>::max()); // Overflow would wrap around
++this->mValue; ASSERT(lhs.mValue - rhs.mValue <= lhs.mValue);
return ret; return lhs.mValue - rhs.mValue;
}
template <typename T2 = T>
static constexpr std::enable_if_t<std::is_signed<T>::value, decltype(T(0) - T2(0))> SubImpl(
TypedIntegerImpl<Tag, T> lhs,
TypedIntegerImpl<Tag, T2> rhs) {
static_assert(std::is_same<T, T2>::value);
if (lhs.mValue > 0) {
// rhs is positive: positive minus positive won't overflow
// rhs is negative: |rhs| isn't less than the (negative) distance between |lhs|
// and max.
ASSERT(rhs.mValue >= lhs.mValue - std::numeric_limits<T>::max());
} else {
// rhs is positive: |rhs| is at most the distance between min and |lhs|
// rhs is negative: negative minus negative won't overflow
ASSERT(rhs.mValue <= lhs.mValue - std::numeric_limits<T>::min());
} }
return lhs.mValue - rhs.mValue;
}
constexpr TypedIntegerImpl& operator--() { template <typename T2 = T>
ASSERT(this->mValue > std::numeric_limits<T>::min()); constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
--this->mValue; static_assert(std::is_same<T, T2>::value);
return *this; // The negation of the most negative value cannot be represented.
} ASSERT(this->mValue != std::numeric_limits<T>::min());
return TypedIntegerImpl(-this->mValue);
}
constexpr TypedIntegerImpl operator--(int) { constexpr TypedIntegerImpl operator+(TypedIntegerImpl rhs) const {
TypedIntegerImpl ret = *this; auto result = AddImpl(*this, rhs);
static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Add instead.");
return TypedIntegerImpl(result);
}
ASSERT(this->mValue > std::numeric_limits<T>::min()); constexpr TypedIntegerImpl operator-(TypedIntegerImpl rhs) const {
--this->mValue; auto result = SubImpl(*this, rhs);
return ret; static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Sub instead.");
} return TypedIntegerImpl(result);
}
template <typename T2 = T> };
static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))>
AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
static_assert(std::is_same<T, T2>::value);
// Overflow would wrap around
ASSERT(lhs.mValue + rhs.mValue >= lhs.mValue);
return lhs.mValue + rhs.mValue;
}
template <typename T2 = T>
static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))>
AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
static_assert(std::is_same<T, T2>::value);
if (lhs.mValue > 0) {
// rhs is positive: |rhs| is at most the distance between max and |lhs|.
// rhs is negative: (positive + negative) won't overflow
ASSERT(rhs.mValue <= std::numeric_limits<T>::max() - lhs.mValue);
} else {
// rhs is postive: (negative + positive) won't underflow
// rhs is negative: |rhs| isn't less than the (negative) distance between min
// and |lhs|
ASSERT(rhs.mValue >= std::numeric_limits<T>::min() - lhs.mValue);
}
return lhs.mValue + rhs.mValue;
}
template <typename T2 = T>
static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))>
SubImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
static_assert(std::is_same<T, T2>::value);
// Overflow would wrap around
ASSERT(lhs.mValue - rhs.mValue <= lhs.mValue);
return lhs.mValue - rhs.mValue;
}
template <typename T2 = T>
static constexpr std::enable_if_t<std::is_signed<T>::value, decltype(T(0) - T2(0))> SubImpl(
TypedIntegerImpl<Tag, T> lhs,
TypedIntegerImpl<Tag, T2> rhs) {
static_assert(std::is_same<T, T2>::value);
if (lhs.mValue > 0) {
// rhs is positive: positive minus positive won't overflow
// rhs is negative: |rhs| isn't less than the (negative) distance between |lhs|
// and max.
ASSERT(rhs.mValue >= lhs.mValue - std::numeric_limits<T>::max());
} else {
// rhs is positive: |rhs| is at most the distance between min and |lhs|
// rhs is negative: negative minus negative won't overflow
ASSERT(rhs.mValue <= lhs.mValue - std::numeric_limits<T>::min());
}
return lhs.mValue - rhs.mValue;
}
template <typename T2 = T>
constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
static_assert(std::is_same<T, T2>::value);
// The negation of the most negative value cannot be represented.
ASSERT(this->mValue != std::numeric_limits<T>::min());
return TypedIntegerImpl(-this->mValue);
}
constexpr TypedIntegerImpl operator+(TypedIntegerImpl rhs) const {
auto result = AddImpl(*this, rhs);
static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Add instead.");
return TypedIntegerImpl(result);
}
constexpr TypedIntegerImpl operator-(TypedIntegerImpl rhs) const {
auto result = SubImpl(*this, rhs);
static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Sub instead.");
return TypedIntegerImpl(result);
}
};
} // namespace detail } // namespace detail
namespace std { namespace std {
template <typename Tag, typename T> template <typename Tag, typename T>
class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> { class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
public: public:
static detail::TypedIntegerImpl<Tag, T> max() noexcept { static detail::TypedIntegerImpl<Tag, T> max() noexcept {
return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max()); return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max());
} }
static detail::TypedIntegerImpl<Tag, T> min() noexcept { static detail::TypedIntegerImpl<Tag, T> min() noexcept {
return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min()); return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min());
} }
}; };
} // namespace std } // namespace std
namespace ityp { namespace ityp {
// These helpers below are provided since the default arithmetic operators for small integer // These helpers below are provided since the default arithmetic operators for small integer
// types like uint8_t and uint16_t return integers, not their same type. To avoid lots of // types like uint8_t and uint16_t return integers, not their same type. To avoid lots of
// casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and // casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and
// ityp::Sub(a, b) instead. // ityp::Sub(a, b) instead.
template <typename Tag, typename T> template <typename Tag, typename T>
constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs, constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs,
::detail::TypedIntegerImpl<Tag, T> rhs) { ::detail::TypedIntegerImpl<Tag, T> rhs) {
return ::detail::TypedIntegerImpl<Tag, T>( return ::detail::TypedIntegerImpl<Tag, T>(
static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::AddImpl(lhs, rhs))); static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::AddImpl(lhs, rhs)));
} }
template <typename Tag, typename T> template <typename Tag, typename T>
constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs, constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs,
::detail::TypedIntegerImpl<Tag, T> rhs) { ::detail::TypedIntegerImpl<Tag, T> rhs) {
return ::detail::TypedIntegerImpl<Tag, T>( return ::detail::TypedIntegerImpl<Tag, T>(
static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::SubImpl(lhs, rhs))); static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::SubImpl(lhs, rhs)));
} }
template <typename T> template <typename T>
constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) { constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) {
return static_cast<T>(lhs + rhs); return static_cast<T>(lhs + rhs);
} }
template <typename T> template <typename T>
constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) { constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) {
return static_cast<T>(lhs - rhs); return static_cast<T>(lhs - rhs);
} }
} // namespace ityp } // namespace ityp

View File

@ -22,27 +22,27 @@
// template parameter. It includes a specialization for detail::TypedIntegerImpl which yields // template parameter. It includes a specialization for detail::TypedIntegerImpl which yields
// the wrapped integer type. // the wrapped integer type.
namespace detail { namespace detail {
template <typename T, typename Enable = void> template <typename T, typename Enable = void>
struct UnderlyingTypeImpl; struct UnderlyingTypeImpl;
template <typename I> template <typename I>
struct UnderlyingTypeImpl<I, typename std::enable_if_t<std::is_integral<I>::value>> { struct UnderlyingTypeImpl<I, typename std::enable_if_t<std::is_integral<I>::value>> {
using type = I; using type = I;
}; };
template <typename E> template <typename E>
struct UnderlyingTypeImpl<E, typename std::enable_if_t<std::is_enum<E>::value>> { struct UnderlyingTypeImpl<E, typename std::enable_if_t<std::is_enum<E>::value>> {
using type = std::underlying_type_t<E>; using type = std::underlying_type_t<E>;
}; };
// Forward declare the TypedInteger impl. // Forward declare the TypedInteger impl.
template <typename Tag, typename T> template <typename Tag, typename T>
class TypedIntegerImpl; class TypedIntegerImpl;
template <typename Tag, typename I> template <typename Tag, typename I>
struct UnderlyingTypeImpl<TypedIntegerImpl<Tag, I>> { struct UnderlyingTypeImpl<TypedIntegerImpl<Tag, I>> {
using type = typename UnderlyingTypeImpl<I>::type; using type = typename UnderlyingTypeImpl<I>::type;
}; };
} // namespace detail } // namespace detail
template <typename T> template <typename T>

View File

@ -26,75 +26,64 @@
namespace ityp { namespace ityp {
// ityp::array is a helper class that wraps std::array with the restriction that // ityp::array is a helper class that wraps std::array with the restriction that
// indices must be a particular type |Index|. Dawn uses multiple flat maps of // indices must be a particular type |Index|. Dawn uses multiple flat maps of
// index-->data, and this class helps ensure an indices cannot be passed interchangably // index-->data, and this class helps ensure an indices cannot be passed interchangably
// to a flat map of a different type. // to a flat map of a different type.
template <typename Index, typename Value, size_t Size> template <typename Index, typename Value, size_t Size>
class array : private std::array<Value, Size> { class array : private std::array<Value, Size> {
using I = UnderlyingType<Index>; using I = UnderlyingType<Index>;
using Base = std::array<Value, Size>; using Base = std::array<Value, Size>;
static_assert(Size <= std::numeric_limits<I>::max()); static_assert(Size <= std::numeric_limits<I>::max());
public: public:
constexpr array() = default; constexpr array() = default;
template <typename... Values> template <typename... Values>
// NOLINTNEXTLINE(runtime/explicit) // NOLINTNEXTLINE(runtime/explicit)
constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} { constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {}
}
Value& operator[](Index i) { Value& operator[](Index i) {
I index = static_cast<I>(i); I index = static_cast<I>(i);
ASSERT(index >= 0 && index < I(Size)); ASSERT(index >= 0 && index < I(Size));
return Base::operator[](index); return Base::operator[](index);
} }
constexpr const Value& operator[](Index i) const { constexpr const Value& operator[](Index i) const {
I index = static_cast<I>(i); I index = static_cast<I>(i);
ASSERT(index >= 0 && index < I(Size)); ASSERT(index >= 0 && index < I(Size));
return Base::operator[](index); return Base::operator[](index);
} }
Value& at(Index i) { Value& at(Index i) {
I index = static_cast<I>(i); I index = static_cast<I>(i);
ASSERT(index >= 0 && index < I(Size)); ASSERT(index >= 0 && index < I(Size));
return Base::at(index); return Base::at(index);
} }
constexpr const Value& at(Index i) const { constexpr const Value& at(Index i) const {
I index = static_cast<I>(i); I index = static_cast<I>(i);
ASSERT(index >= 0 && index < I(Size)); ASSERT(index >= 0 && index < I(Size));
return Base::at(index); return Base::at(index);
} }
typename Base::iterator begin() noexcept { typename Base::iterator begin() noexcept { return Base::begin(); }
return Base::begin();
}
typename Base::const_iterator begin() const noexcept { typename Base::const_iterator begin() const noexcept { return Base::begin(); }
return Base::begin();
}
typename Base::iterator end() noexcept { typename Base::iterator end() noexcept { return Base::end(); }
return Base::end();
}
typename Base::const_iterator end() const noexcept { typename Base::const_iterator end() const noexcept { return Base::end(); }
return Base::end();
}
constexpr Index size() const { constexpr Index size() const { return Index(I(Size)); }
return Index(I(Size));
}
using Base::back; using Base::back;
using Base::data; using Base::data;
using Base::empty; using Base::empty;
using Base::fill; using Base::fill;
using Base::front; using Base::front;
}; };
} // namespace ityp } // namespace ityp

View File

@ -21,116 +21,95 @@
namespace ityp { namespace ityp {
// ityp::bitset is a helper class that wraps std::bitset with the restriction that // ityp::bitset is a helper class that wraps std::bitset with the restriction that
// indices must be a particular type |Index|. // indices must be a particular type |Index|.
template <typename Index, size_t N> template <typename Index, size_t N>
class bitset : private std::bitset<N> { class bitset : private std::bitset<N> {
using I = UnderlyingType<Index>; using I = UnderlyingType<Index>;
using Base = std::bitset<N>; using Base = std::bitset<N>;
static_assert(sizeof(I) <= sizeof(size_t)); static_assert(sizeof(I) <= sizeof(size_t));
explicit constexpr bitset(const Base& rhs) : Base(rhs) { explicit constexpr bitset(const Base& rhs) : Base(rhs) {}
}
public: public:
using reference = typename Base::reference; using reference = typename Base::reference;
constexpr bitset() noexcept : Base() { constexpr bitset() noexcept : Base() {}
}
// NOLINTNEXTLINE(runtime/explicit) // NOLINTNEXTLINE(runtime/explicit)
constexpr bitset(uint64_t value) noexcept : Base(value) { constexpr bitset(uint64_t value) noexcept : Base(value) {}
}
constexpr bool operator[](Index i) const { constexpr bool operator[](Index i) const { return Base::operator[](static_cast<I>(i)); }
return Base::operator[](static_cast<I>(i));
}
typename Base::reference operator[](Index i) { typename Base::reference operator[](Index i) { return Base::operator[](static_cast<I>(i)); }
return Base::operator[](static_cast<I>(i));
}
bool test(Index i) const { bool test(Index i) const { return Base::test(static_cast<I>(i)); }
return Base::test(static_cast<I>(i));
}
using Base::all; using Base::all;
using Base::any; using Base::any;
using Base::count; using Base::count;
using Base::none; using Base::none;
using Base::size; using Base::size;
bool operator==(const bitset& other) const noexcept { bool operator==(const bitset& other) const noexcept {
return Base::operator==(static_cast<const Base&>(other)); return Base::operator==(static_cast<const Base&>(other));
} }
bool operator!=(const bitset& other) const noexcept { bool operator!=(const bitset& other) const noexcept {
return Base::operator!=(static_cast<const Base&>(other)); return Base::operator!=(static_cast<const Base&>(other));
} }
bitset& operator&=(const bitset& other) noexcept { bitset& operator&=(const bitset& other) noexcept {
return static_cast<bitset&>(Base::operator&=(static_cast<const Base&>(other))); return static_cast<bitset&>(Base::operator&=(static_cast<const Base&>(other)));
} }
bitset& operator|=(const bitset& other) noexcept { bitset& operator|=(const bitset& other) noexcept {
return static_cast<bitset&>(Base::operator|=(static_cast<const Base&>(other))); return static_cast<bitset&>(Base::operator|=(static_cast<const Base&>(other)));
} }
bitset& operator^=(const bitset& other) noexcept { bitset& operator^=(const bitset& other) noexcept {
return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other))); return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
} }
bitset operator~() const noexcept { bitset operator~() const noexcept { return bitset(*this).flip(); }
return bitset(*this).flip();
}
bitset& set() noexcept { bitset& set() noexcept { return static_cast<bitset&>(Base::set()); }
return static_cast<bitset&>(Base::set());
}
bitset& set(Index i, bool value = true) { bitset& set(Index i, bool value = true) {
return static_cast<bitset&>(Base::set(static_cast<I>(i), value)); return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
} }
bitset& reset() noexcept { bitset& reset() noexcept { return static_cast<bitset&>(Base::reset()); }
return static_cast<bitset&>(Base::reset());
}
bitset& reset(Index i) { bitset& reset(Index i) { return static_cast<bitset&>(Base::reset(static_cast<I>(i))); }
return static_cast<bitset&>(Base::reset(static_cast<I>(i)));
}
bitset& flip() noexcept { bitset& flip() noexcept { return static_cast<bitset&>(Base::flip()); }
return static_cast<bitset&>(Base::flip());
}
bitset& flip(Index i) { bitset& flip(Index i) { return static_cast<bitset&>(Base::flip(static_cast<I>(i))); }
return static_cast<bitset&>(Base::flip(static_cast<I>(i)));
}
using Base::to_string; using Base::to_string;
using Base::to_ullong; using Base::to_ullong;
using Base::to_ulong; using Base::to_ulong;
friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept { friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept {
return bitset(static_cast<const Base&>(lhs) & static_cast<const Base&>(rhs)); return bitset(static_cast<const Base&>(lhs) & static_cast<const Base&>(rhs));
} }
friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept { friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept {
return bitset(static_cast<const Base&>(lhs) | static_cast<const Base&>(rhs)); return bitset(static_cast<const Base&>(lhs) | static_cast<const Base&>(rhs));
} }
friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept { friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept {
return bitset(static_cast<const Base&>(lhs) ^ static_cast<const Base&>(rhs)); return bitset(static_cast<const Base&>(lhs) ^ static_cast<const Base&>(rhs));
} }
friend BitSetIterator<N, Index> IterateBitSet(const bitset& bitset) { friend BitSetIterator<N, Index> IterateBitSet(const bitset& bitset) {
return BitSetIterator<N, Index>(static_cast<const Base&>(bitset)); return BitSetIterator<N, Index>(static_cast<const Base&>(bitset));
} }
friend struct std::hash<bitset>; friend struct std::hash<bitset>;
}; };
} // namespace ityp } // namespace ityp
@ -147,7 +126,7 @@ Index GetHighestBitIndexPlusOne(const ityp::bitset<Index, N>& bitset) {
using I = UnderlyingType<Index>; using I = UnderlyingType<Index>;
#if defined(DAWN_COMPILER_MSVC) #if defined(DAWN_COMPILER_MSVC)
if constexpr (N > 32) { if constexpr (N > 32) {
# if defined(DAWN_PLATFORM_64_BIT) #if defined(DAWN_PLATFORM_64_BIT)
// NOLINTNEXTLINE(runtime/int) // NOLINTNEXTLINE(runtime/int)
unsigned long firstBitIndex = 0ul; unsigned long firstBitIndex = 0ul;
unsigned char ret = _BitScanReverse64(&firstBitIndex, bitset.to_ullong()); unsigned char ret = _BitScanReverse64(&firstBitIndex, bitset.to_ullong());
@ -155,7 +134,7 @@ Index GetHighestBitIndexPlusOne(const ityp::bitset<Index, N>& bitset) {
return Index(static_cast<I>(0)); return Index(static_cast<I>(0));
} }
return Index(static_cast<I>(firstBitIndex + 1)); return Index(static_cast<I>(firstBitIndex + 1));
# else // defined(DAWN_PLATFORM_64_BIT) #else // defined(DAWN_PLATFORM_64_BIT)
if (bitset.none()) { if (bitset.none()) {
return Index(static_cast<I>(0)); return Index(static_cast<I>(0));
} }
@ -165,7 +144,7 @@ Index GetHighestBitIndexPlusOne(const ityp::bitset<Index, N>& bitset) {
} }
} }
UNREACHABLE(); UNREACHABLE();
# endif // defined(DAWN_PLATFORM_64_BIT) #endif // defined(DAWN_PLATFORM_64_BIT)
} else { } else {
// NOLINTNEXTLINE(runtime/int) // NOLINTNEXTLINE(runtime/int)
unsigned long firstBitIndex = 0ul; unsigned long firstBitIndex = 0ul;

View File

@ -22,81 +22,65 @@
namespace ityp { namespace ityp {
// ityp::span is a helper class that wraps an unowned packed array of type |Value|. // ityp::span is a helper class that wraps an unowned packed array of type |Value|.
// It stores the size and pointer to first element. It has the restriction that // It stores the size and pointer to first element. It has the restriction that
// indices must be a particular type |Index|. This provides a type-safe way to index // indices must be a particular type |Index|. This provides a type-safe way to index
// raw pointers. // raw pointers.
template <typename Index, typename Value> template <typename Index, typename Value>
class span { class span {
using I = UnderlyingType<Index>; using I = UnderlyingType<Index>;
public: public:
constexpr span() : mData(nullptr), mSize(0) { constexpr span() : mData(nullptr), mSize(0) {}
} constexpr span(Value* data, Index size) : mData(data), mSize(size) {}
constexpr span(Value* data, Index size) : mData(data), mSize(size) {
}
constexpr Value& operator[](Index i) const { constexpr Value& operator[](Index i) const {
ASSERT(i < mSize); ASSERT(i < mSize);
return mData[static_cast<I>(i)]; return mData[static_cast<I>(i)];
} }
Value* data() noexcept { Value* data() noexcept { return mData; }
return mData;
}
const Value* data() const noexcept { const Value* data() const noexcept { return mData; }
return mData;
}
Value* begin() noexcept { Value* begin() noexcept { return mData; }
return mData;
}
const Value* begin() const noexcept { const Value* begin() const noexcept { return mData; }
return mData;
}
Value* end() noexcept { Value* end() noexcept { return mData + static_cast<I>(mSize); }
return mData + static_cast<I>(mSize);
}
const Value* end() const noexcept { const Value* end() const noexcept { return mData + static_cast<I>(mSize); }
return mData + static_cast<I>(mSize);
}
Value& front() { Value& front() {
ASSERT(mData != nullptr); ASSERT(mData != nullptr);
ASSERT(static_cast<I>(mSize) >= 0); ASSERT(static_cast<I>(mSize) >= 0);
return *mData; return *mData;
} }
const Value& front() const { const Value& front() const {
ASSERT(mData != nullptr); ASSERT(mData != nullptr);
ASSERT(static_cast<I>(mSize) >= 0); ASSERT(static_cast<I>(mSize) >= 0);
return *mData; return *mData;
} }
Value& back() { Value& back() {
ASSERT(mData != nullptr); ASSERT(mData != nullptr);
ASSERT(static_cast<I>(mSize) >= 0); ASSERT(static_cast<I>(mSize) >= 0);
return *(mData + static_cast<I>(mSize) - 1); return *(mData + static_cast<I>(mSize) - 1);
} }
const Value& back() const { const Value& back() const {
ASSERT(mData != nullptr); ASSERT(mData != nullptr);
ASSERT(static_cast<I>(mSize) >= 0); ASSERT(static_cast<I>(mSize) >= 0);
return *(mData + static_cast<I>(mSize) - 1); return *(mData + static_cast<I>(mSize) - 1);
} }
Index size() const { Index size() const { return mSize; }
return mSize;
}
private: private:
Value* mData; Value* mData;
Index mSize; Index mSize;
}; };
} // namespace ityp } // namespace ityp

View File

@ -24,82 +24,53 @@
namespace ityp { namespace ityp {
template <typename Index, typename Value, size_t StaticCapacity> template <typename Index, typename Value, size_t StaticCapacity>
class stack_vec : private StackVector<Value, StaticCapacity> { class stack_vec : private StackVector<Value, StaticCapacity> {
using I = UnderlyingType<Index>; using I = UnderlyingType<Index>;
using Base = StackVector<Value, StaticCapacity>; using Base = StackVector<Value, StaticCapacity>;
using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>; using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
static_assert(StaticCapacity <= std::numeric_limits<I>::max()); static_assert(StaticCapacity <= std::numeric_limits<I>::max());
public: public:
stack_vec() : Base() { stack_vec() : Base() {}
} explicit stack_vec(Index size) : Base() { this->container().resize(static_cast<I>(size)); }
explicit stack_vec(Index size) : Base() {
this->container().resize(static_cast<I>(size));
}
Value& operator[](Index i) { Value& operator[](Index i) {
ASSERT(i < size()); ASSERT(i < size());
return Base::operator[](static_cast<I>(i)); return Base::operator[](static_cast<I>(i));
} }
constexpr const Value& operator[](Index i) const { constexpr const Value& operator[](Index i) const {
ASSERT(i < size()); ASSERT(i < size());
return Base::operator[](static_cast<I>(i)); return Base::operator[](static_cast<I>(i));
} }
void resize(Index size) { void resize(Index size) { this->container().resize(static_cast<I>(size)); }
this->container().resize(static_cast<I>(size));
}
void reserve(Index size) { void reserve(Index size) { this->container().reserve(static_cast<I>(size)); }
this->container().reserve(static_cast<I>(size));
}
Value* data() { Value* data() { return this->container().data(); }
return this->container().data();
}
const Value* data() const { const Value* data() const { return this->container().data(); }
return this->container().data();
}
typename VectorBase::iterator begin() noexcept { typename VectorBase::iterator begin() noexcept { return this->container().begin(); }
return this->container().begin();
}
typename VectorBase::const_iterator begin() const noexcept { typename VectorBase::const_iterator begin() const noexcept { return this->container().begin(); }
return this->container().begin();
}
typename VectorBase::iterator end() noexcept { typename VectorBase::iterator end() noexcept { return this->container().end(); }
return this->container().end();
}
typename VectorBase::const_iterator end() const noexcept { typename VectorBase::const_iterator end() const noexcept { return this->container().end(); }
return this->container().end();
}
typename VectorBase::reference front() { typename VectorBase::reference front() { return this->container().front(); }
return this->container().front();
}
typename VectorBase::const_reference front() const { typename VectorBase::const_reference front() const { return this->container().front(); }
return this->container().front();
}
typename VectorBase::reference back() { typename VectorBase::reference back() { return this->container().back(); }
return this->container().back();
}
typename VectorBase::const_reference back() const { typename VectorBase::const_reference back() const { return this->container().back(); }
return this->container().back();
}
Index size() const { Index size() const { return Index(static_cast<I>(this->container().size())); }
return Index(static_cast<I>(this->container().size())); };
}
};
} // namespace ityp } // namespace ityp

View File

@ -24,85 +24,75 @@
namespace ityp { namespace ityp {
// ityp::vector is a helper class that wraps std::vector with the restriction that // ityp::vector is a helper class that wraps std::vector with the restriction that
// indices must be a particular type |Index|. // indices must be a particular type |Index|.
template <typename Index, typename Value> template <typename Index, typename Value>
class vector : public std::vector<Value> { class vector : public std::vector<Value> {
using I = UnderlyingType<Index>; using I = UnderlyingType<Index>;
using Base = std::vector<Value>; using Base = std::vector<Value>;
private: private:
// Disallow access to base constructors and untyped index/size-related operators. // Disallow access to base constructors and untyped index/size-related operators.
using Base::Base; using Base::Base;
using Base::operator=; using Base::operator=;
using Base::operator[]; using Base::operator[];
using Base::at; using Base::at;
using Base::reserve; using Base::reserve;
using Base::resize; using Base::resize;
using Base::size; using Base::size;
public: public:
vector() : Base() { vector() : Base() {}
}
explicit vector(Index size) : Base(static_cast<I>(size)) { explicit vector(Index size) : Base(static_cast<I>(size)) {}
}
vector(Index size, const Value& init) : Base(static_cast<I>(size), init) { vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {}
}
vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) { vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {}
}
vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) { vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {}
}
vector(std::initializer_list<Value> init) : Base(init) { vector(std::initializer_list<Value> init) : Base(init) {}
}
vector& operator=(const vector& rhs) { vector& operator=(const vector& rhs) {
Base::operator=(static_cast<const Base&>(rhs)); Base::operator=(static_cast<const Base&>(rhs));
return *this; return *this;
} }
vector& operator=(vector&& rhs) noexcept { vector& operator=(vector&& rhs) noexcept {
Base::operator=(static_cast<Base&&>(rhs)); Base::operator=(static_cast<Base&&>(rhs));
return *this; return *this;
} }
Value& operator[](Index i) { Value& operator[](Index i) {
ASSERT(i >= Index(0) && i < size()); ASSERT(i >= Index(0) && i < size());
return Base::operator[](static_cast<I>(i)); return Base::operator[](static_cast<I>(i));
} }
constexpr const Value& operator[](Index i) const { constexpr const Value& operator[](Index i) const {
ASSERT(i >= Index(0) && i < size()); ASSERT(i >= Index(0) && i < size());
return Base::operator[](static_cast<I>(i)); return Base::operator[](static_cast<I>(i));
} }
Value& at(Index i) { Value& at(Index i) {
ASSERT(i >= Index(0) && i < size()); ASSERT(i >= Index(0) && i < size());
return Base::at(static_cast<I>(i)); return Base::at(static_cast<I>(i));
} }
constexpr const Value& at(Index i) const { constexpr const Value& at(Index i) const {
ASSERT(i >= Index(0) && i < size()); ASSERT(i >= Index(0) && i < size());
return Base::at(static_cast<I>(i)); return Base::at(static_cast<I>(i));
} }
constexpr Index size() const { constexpr Index size() const {
ASSERT(std::numeric_limits<I>::max() >= Base::size()); ASSERT(std::numeric_limits<I>::max() >= Base::size());
return Index(static_cast<I>(Base::size())); return Index(static_cast<I>(Base::size()));
} }
void resize(Index size) { void resize(Index size) { Base::resize(static_cast<I>(size)); }
Base::resize(static_cast<I>(size));
}
void reserve(Index size) { void reserve(Index size) { Base::reserve(static_cast<I>(size)); }
Base::reserve(static_cast<I>(size)); };
}
};
} // namespace ityp } // namespace ityp

View File

@ -16,10 +16,10 @@
#define SRC_DAWN_COMMON_VULKAN_PLATFORM_H_ #define SRC_DAWN_COMMON_VULKAN_PLATFORM_H_
#if !defined(DAWN_ENABLE_BACKEND_VULKAN) #if !defined(DAWN_ENABLE_BACKEND_VULKAN)
# error "vulkan_platform.h included without the Vulkan backend enabled" #error "vulkan_platform.h included without the Vulkan backend enabled"
#endif #endif
#if defined(VULKAN_CORE_H_) #if defined(VULKAN_CORE_H_)
# error "vulkan.h included before vulkan_platform.h" #error "vulkan.h included before vulkan_platform.h"
#endif #endif
#include <cstddef> #include <cstddef>
@ -36,7 +36,7 @@
// (like vulkan.h on 64 bit) but makes sure the types are different on 32 bit architectures. // (like vulkan.h on 64 bit) but makes sure the types are different on 32 bit architectures.
#if defined(DAWN_PLATFORM_64_BIT) #if defined(DAWN_PLATFORM_64_BIT)
# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*; #define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
// This function is needed because MSVC doesn't accept reinterpret_cast from uint64_t from uint64_t // This function is needed because MSVC doesn't accept reinterpret_cast from uint64_t from uint64_t
// TODO(cwallez@chromium.org): Remove this once we rework vulkan_platform.h // TODO(cwallez@chromium.org): Remove this once we rework vulkan_platform.h
template <typename T> template <typename T>
@ -44,13 +44,13 @@ T NativeNonDispatachableHandleFromU64(uint64_t u64) {
return reinterpret_cast<T>(u64); return reinterpret_cast<T>(u64);
} }
#elif defined(DAWN_PLATFORM_32_BIT) #elif defined(DAWN_PLATFORM_32_BIT)
# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t; #define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
template <typename T> template <typename T>
T NativeNonDispatachableHandleFromU64(uint64_t u64) { T NativeNonDispatachableHandleFromU64(uint64_t u64) {
return u64; return u64;
} }
#else #else
# error "Unsupported platform" #error "Unsupported platform"
#endif #endif
// Define a placeholder Vulkan handle for use before we include vulkan.h // Define a placeholder Vulkan handle for use before we include vulkan.h
@ -67,89 +67,73 @@ DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(VkSomeHandle)
namespace dawn::native::vulkan { namespace dawn::native::vulkan {
namespace detail { namespace detail {
template <typename T> template <typename T>
struct WrapperStruct { struct WrapperStruct {
T member; T member;
}; };
template <typename T> template <typename T>
static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>); static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>; static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>; static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
// Simple handle types that supports "nullptr_t" as a 0 value. // Simple handle types that supports "nullptr_t" as a 0 value.
template <typename Tag, typename HandleType> template <typename Tag, typename HandleType>
class alignas(detail::kNativeVkHandleAlignment) VkHandle { class alignas(detail::kNativeVkHandleAlignment) VkHandle {
public: public:
// Default constructor and assigning of VK_NULL_HANDLE // Default constructor and assigning of VK_NULL_HANDLE
VkHandle() = default; VkHandle() = default;
VkHandle(std::nullptr_t) { VkHandle(std::nullptr_t) {}
}
// Use default copy constructor/assignment // Use default copy constructor/assignment
VkHandle(const VkHandle<Tag, HandleType>& other) = default; VkHandle(const VkHandle<Tag, HandleType>& other) = default;
VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default; VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
// Comparisons between handles // Comparisons between handles
bool operator==(VkHandle<Tag, HandleType> other) const { bool operator==(VkHandle<Tag, HandleType> other) const { return mHandle == other.mHandle; }
return mHandle == other.mHandle; bool operator!=(VkHandle<Tag, HandleType> other) const { return mHandle != other.mHandle; }
}
bool operator!=(VkHandle<Tag, HandleType> other) const {
return mHandle != other.mHandle;
}
// Comparisons between handles and VK_NULL_HANDLE // Comparisons between handles and VK_NULL_HANDLE
bool operator==(std::nullptr_t) const { bool operator==(std::nullptr_t) const { return mHandle == 0; }
return mHandle == 0; bool operator!=(std::nullptr_t) const { return mHandle != 0; }
}
bool operator!=(std::nullptr_t) const {
return mHandle != 0;
}
// Implicit conversion to real Vulkan types. // Implicit conversion to real Vulkan types.
operator HandleType() const { operator HandleType() const { return GetHandle(); }
return GetHandle();
}
HandleType GetHandle() const { HandleType GetHandle() const { return mHandle; }
return mHandle;
}
HandleType& operator*() { HandleType& operator*() { return mHandle; }
return mHandle;
}
static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) { static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
return VkHandle{handle}; return VkHandle{handle};
}
private:
explicit VkHandle(HandleType handle) : mHandle(handle) {
}
HandleType mHandle = 0;
};
} // namespace detail
static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
template <typename Tag, typename HandleType>
HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
return reinterpret_cast<HandleType*>(handle);
} }
private:
explicit VkHandle(HandleType handle) : mHandle(handle) {}
HandleType mHandle = 0;
};
} // namespace detail
static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
template <typename Tag, typename HandleType>
HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
return reinterpret_cast<HandleType*>(handle);
}
} // namespace dawn::native::vulkan } // namespace dawn::native::vulkan
#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \ #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) \ DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) \
namespace dawn::native::vulkan { \ namespace dawn::native::vulkan { \
using object = detail::VkHandle<struct VkTag##object, ::object>; \ using object = detail::VkHandle<struct VkTag##object, ::object>; \
static_assert(sizeof(object) == sizeof(uint64_t)); \ static_assert(sizeof(object) == sizeof(uint64_t)); \
static_assert(alignof(object) == detail::kUint64Alignment); \ static_assert(alignof(object) == detail::kUint64Alignment); \
static_assert(sizeof(object) == sizeof(::object)); \ static_assert(sizeof(object) == sizeof(::object)); \
static_assert(alignof(object) == detail::kNativeVkHandleAlignment); \ static_assert(alignof(object) == detail::kNativeVkHandleAlignment); \
} // namespace dawn::native::vulkan } // namespace dawn::native::vulkan
// Import additional parts of Vulkan that are supported on our architecture and preemptively include // Import additional parts of Vulkan that are supported on our architecture and preemptively include
@ -157,36 +141,36 @@ namespace dawn::native::vulkan {
// defines are defined already in the Vulkan-Header BUILD.gn, but are needed when building with // defines are defined already in the Vulkan-Header BUILD.gn, but are needed when building with
// CMake, hence they cannot be removed at the moment. // CMake, hence they cannot be removed at the moment.
#if defined(DAWN_PLATFORM_WINDOWS) #if defined(DAWN_PLATFORM_WINDOWS)
# ifndef VK_USE_PLATFORM_WIN32_KHR #ifndef VK_USE_PLATFORM_WIN32_KHR
# define VK_USE_PLATFORM_WIN32_KHR #define VK_USE_PLATFORM_WIN32_KHR
# endif #endif
# include "dawn/common/windows_with_undefs.h" #include "dawn/common/windows_with_undefs.h"
#endif // DAWN_PLATFORM_WINDOWS #endif // DAWN_PLATFORM_WINDOWS
#if defined(DAWN_USE_X11) #if defined(DAWN_USE_X11)
# define VK_USE_PLATFORM_XLIB_KHR #define VK_USE_PLATFORM_XLIB_KHR
# ifndef VK_USE_PLATFORM_XCB_KHR #ifndef VK_USE_PLATFORM_XCB_KHR
# define VK_USE_PLATFORM_XCB_KHR #define VK_USE_PLATFORM_XCB_KHR
# endif #endif
# include "dawn/common/xlib_with_undefs.h" #include "dawn/common/xlib_with_undefs.h"
#endif // defined(DAWN_USE_X11) #endif // defined(DAWN_USE_X11)
#if defined(DAWN_ENABLE_BACKEND_METAL) #if defined(DAWN_ENABLE_BACKEND_METAL)
# ifndef VK_USE_PLATFORM_METAL_EXT #ifndef VK_USE_PLATFORM_METAL_EXT
# define VK_USE_PLATFORM_METAL_EXT #define VK_USE_PLATFORM_METAL_EXT
# endif #endif
#endif // defined(DAWN_ENABLE_BACKEND_METAL) #endif // defined(DAWN_ENABLE_BACKEND_METAL)
#if defined(DAWN_PLATFORM_ANDROID) #if defined(DAWN_PLATFORM_ANDROID)
# ifndef VK_USE_PLATFORM_ANDROID_KHR #ifndef VK_USE_PLATFORM_ANDROID_KHR
# define VK_USE_PLATFORM_ANDROID_KHR #define VK_USE_PLATFORM_ANDROID_KHR
# endif #endif
#endif // defined(DAWN_PLATFORM_ANDROID) #endif // defined(DAWN_PLATFORM_ANDROID)
#if defined(DAWN_PLATFORM_FUCHSIA) #if defined(DAWN_PLATFORM_FUCHSIA)
# ifndef VK_USE_PLATFORM_FUCHSIA #ifndef VK_USE_PLATFORM_FUCHSIA
# define VK_USE_PLATFORM_FUCHSIA #define VK_USE_PLATFORM_FUCHSIA
# endif #endif
#endif // defined(DAWN_PLATFORM_FUCHSIA) #endif // defined(DAWN_PLATFORM_FUCHSIA)
// The actual inclusion of vulkan.h! // The actual inclusion of vulkan.h!
@ -200,7 +184,7 @@ static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
#elif defined(DAWN_PLATFORM_32_BIT) #elif defined(DAWN_PLATFORM_32_BIT)
static constexpr uint64_t VK_NULL_HANDLE = 0; static constexpr uint64_t VK_NULL_HANDLE = 0;
#else #else
# error "Unsupported platform" #error "Unsupported platform"
#endif #endif
#endif // SRC_DAWN_COMMON_VULKAN_PLATFORM_H_ #endif // SRC_DAWN_COMMON_VULKAN_PLATFORM_H_

View File

@ -18,7 +18,7 @@
#include "dawn/common/Platform.h" #include "dawn/common/Platform.h"
#if !defined(DAWN_PLATFORM_WINDOWS) #if !defined(DAWN_PLATFORM_WINDOWS)
# error "windows_with_undefs.h included on non-Windows" #error "windows_with_undefs.h included on non-Windows"
#endif #endif
// This header includes <windows.h> but removes all the extra defines that conflict with identifiers // This header includes <windows.h> but removes all the extra defines that conflict with identifiers

View File

@ -18,7 +18,7 @@
#include "dawn/common/Platform.h" #include "dawn/common/Platform.h"
#if !defined(DAWN_PLATFORM_LINUX) #if !defined(DAWN_PLATFORM_LINUX)
# error "xlib_with_undefs.h included on non-Linux" #error "xlib_with_undefs.h included on non-Linux"
#endif #endif
// This header includes <X11/Xlib.h> but removes all the extra defines that conflict with // This header includes <X11/Xlib.h> but removes all the extra defines that conflict with

View File

@ -29,39 +29,37 @@
namespace { namespace {
class DevNull : public dawn::wire::CommandSerializer { class DevNull : public dawn::wire::CommandSerializer {
public: public:
size_t GetMaximumAllocationSize() const override { size_t GetMaximumAllocationSize() const override {
// Some fuzzer bots have a 2GB allocation limit. Pick a value reasonably below that. // Some fuzzer bots have a 2GB allocation limit. Pick a value reasonably below that.
return 1024 * 1024 * 1024; return 1024 * 1024 * 1024;
}
void* GetCmdSpace(size_t size) override {
if (size > buf.size()) {
buf.resize(size);
}
return buf.data();
}
bool Flush() override {
return true;
}
private:
std::vector<char> buf;
};
std::unique_ptr<dawn::native::Instance> sInstance;
WGPUProcDeviceCreateSwapChain sOriginalDeviceCreateSwapChain = nullptr;
bool sCommandsComplete = false;
WGPUSwapChain ErrorDeviceCreateSwapChain(WGPUDevice device,
WGPUSurface surface,
const WGPUSwapChainDescriptor*) {
WGPUSwapChainDescriptor desc = {};
// A 0 implementation will trigger a swapchain creation error.
desc.implementation = 0;
return sOriginalDeviceCreateSwapChain(device, surface, &desc);
} }
void* GetCmdSpace(size_t size) override {
if (size > buf.size()) {
buf.resize(size);
}
return buf.data();
}
bool Flush() override { return true; }
private:
std::vector<char> buf;
};
std::unique_ptr<dawn::native::Instance> sInstance;
WGPUProcDeviceCreateSwapChain sOriginalDeviceCreateSwapChain = nullptr;
bool sCommandsComplete = false;
WGPUSwapChain ErrorDeviceCreateSwapChain(WGPUDevice device,
WGPUSurface surface,
const WGPUSwapChainDescriptor*) {
WGPUSwapChainDescriptor desc = {};
// A 0 implementation will trigger a swapchain creation error.
desc.implementation = 0;
return sOriginalDeviceCreateSwapChain(device, surface, &desc);
}
} // namespace } // namespace

View File

@ -22,17 +22,17 @@
namespace dawn::native { namespace dawn::native {
class Instance; class Instance;
} // namespace dawn::native } // namespace dawn::native
namespace DawnWireServerFuzzer { namespace DawnWireServerFuzzer {
using MakeDeviceFn = std::function<wgpu::Device(dawn::native::Instance*)>; using MakeDeviceFn = std::function<wgpu::Device(dawn::native::Instance*)>;
int Initialize(int* argc, char*** argv); int Initialize(int* argc, char*** argv);
int Run(const uint8_t* data, size_t size, MakeDeviceFn MakeDevice, bool supportsErrorInjection); int Run(const uint8_t* data, size_t size, MakeDeviceFn MakeDevice, bool supportsErrorInjection);
} // namespace DawnWireServerFuzzer } // namespace DawnWireServerFuzzer

View File

@ -24,207 +24,206 @@
namespace dawn::native { namespace dawn::native {
AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend) AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
: mInstance(instance), mBackend(backend) { : mInstance(instance), mBackend(backend) {
mSupportedFeatures.EnableFeature(Feature::DawnNative); mSupportedFeatures.EnableFeature(Feature::DawnNative);
mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages); mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages);
}
MaybeError AdapterBase::Initialize() {
DAWN_TRY_CONTEXT(InitializeImpl(), "initializing adapter (backend=%s)", mBackend);
DAWN_TRY_CONTEXT(
InitializeSupportedFeaturesImpl(),
"gathering supported features for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
"backend=%s type=%s)",
mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
DAWN_TRY_CONTEXT(
InitializeSupportedLimitsImpl(&mLimits),
"gathering supported limits for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
"backend=%s type=%s)",
mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
// Enforce internal Dawn constants.
mLimits.v1.maxVertexBufferArrayStride =
std::min(mLimits.v1.maxVertexBufferArrayStride, kMaxVertexBufferArrayStride);
mLimits.v1.maxBindGroups = std::min(mLimits.v1.maxBindGroups, kMaxBindGroups);
mLimits.v1.maxVertexAttributes =
std::min(mLimits.v1.maxVertexAttributes, uint32_t(kMaxVertexAttributes));
mLimits.v1.maxVertexBuffers =
std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
mLimits.v1.maxInterStageShaderComponents =
std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
mLimits.v1.maxSampledTexturesPerShaderStage =
std::min(mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
mLimits.v1.maxSamplersPerShaderStage =
std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
mLimits.v1.maxStorageBuffersPerShaderStage =
std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
mLimits.v1.maxStorageTexturesPerShaderStage =
std::min(mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
mLimits.v1.maxUniformBuffersPerShaderStage =
std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
std::min(mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout,
kMaxDynamicUniformBuffersPerPipelineLayout);
mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout =
std::min(mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout,
kMaxDynamicStorageBuffersPerPipelineLayout);
return {};
}
bool AdapterBase::APIGetLimits(SupportedLimits* limits) const {
return GetLimits(limits);
}
void AdapterBase::APIGetProperties(AdapterProperties* properties) const {
properties->vendorID = mVendorId;
properties->deviceID = mDeviceId;
properties->name = mName.c_str();
properties->driverDescription = mDriverDescription.c_str();
properties->adapterType = mAdapterType;
properties->backendType = mBackend;
}
bool AdapterBase::APIHasFeature(wgpu::FeatureName feature) const {
return mSupportedFeatures.IsEnabled(feature);
}
size_t AdapterBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
return mSupportedFeatures.EnumerateFeatures(features);
}
DeviceBase* AdapterBase::APICreateDevice(const DeviceDescriptor* descriptor) {
DeviceDescriptor defaultDesc = {};
if (descriptor == nullptr) {
descriptor = &defaultDesc;
} }
auto result = CreateDeviceInternal(descriptor);
MaybeError AdapterBase::Initialize() { if (result.IsError()) {
DAWN_TRY_CONTEXT(InitializeImpl(), "initializing adapter (backend=%s)", mBackend); mInstance->ConsumedError(result.AcquireError());
DAWN_TRY_CONTEXT( return nullptr;
InitializeSupportedFeaturesImpl(),
"gathering supported features for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
"backend=%s type=%s)",
mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
DAWN_TRY_CONTEXT(
InitializeSupportedLimitsImpl(&mLimits),
"gathering supported limits for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
"backend=%s type=%s)",
mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
// Enforce internal Dawn constants.
mLimits.v1.maxVertexBufferArrayStride =
std::min(mLimits.v1.maxVertexBufferArrayStride, kMaxVertexBufferArrayStride);
mLimits.v1.maxBindGroups = std::min(mLimits.v1.maxBindGroups, kMaxBindGroups);
mLimits.v1.maxVertexAttributes =
std::min(mLimits.v1.maxVertexAttributes, uint32_t(kMaxVertexAttributes));
mLimits.v1.maxVertexBuffers =
std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
mLimits.v1.maxInterStageShaderComponents =
std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
mLimits.v1.maxSampledTexturesPerShaderStage = std::min(
mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
mLimits.v1.maxSamplersPerShaderStage =
std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
mLimits.v1.maxStorageBuffersPerShaderStage =
std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
mLimits.v1.maxStorageTexturesPerShaderStage = std::min(
mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
mLimits.v1.maxUniformBuffersPerShaderStage =
std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
std::min(mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout,
kMaxDynamicUniformBuffersPerPipelineLayout);
mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout =
std::min(mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout,
kMaxDynamicStorageBuffersPerPipelineLayout);
return {};
} }
return result.AcquireSuccess().Detach();
}
bool AdapterBase::APIGetLimits(SupportedLimits* limits) const { void AdapterBase::APIRequestDevice(const DeviceDescriptor* descriptor,
return GetLimits(limits); WGPURequestDeviceCallback callback,
void* userdata) {
static constexpr DeviceDescriptor kDefaultDescriptor = {};
if (descriptor == nullptr) {
descriptor = &kDefaultDescriptor;
} }
auto result = CreateDeviceInternal(descriptor);
void AdapterBase::APIGetProperties(AdapterProperties* properties) const { if (result.IsError()) {
properties->vendorID = mVendorId; std::unique_ptr<ErrorData> errorData = result.AcquireError();
properties->deviceID = mDeviceId;
properties->name = mName.c_str();
properties->driverDescription = mDriverDescription.c_str();
properties->adapterType = mAdapterType;
properties->backendType = mBackend;
}
bool AdapterBase::APIHasFeature(wgpu::FeatureName feature) const {
return mSupportedFeatures.IsEnabled(feature);
}
size_t AdapterBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
return mSupportedFeatures.EnumerateFeatures(features);
}
DeviceBase* AdapterBase::APICreateDevice(const DeviceDescriptor* descriptor) {
DeviceDescriptor defaultDesc = {};
if (descriptor == nullptr) {
descriptor = &defaultDesc;
}
auto result = CreateDeviceInternal(descriptor);
if (result.IsError()) {
mInstance->ConsumedError(result.AcquireError());
return nullptr;
}
return result.AcquireSuccess().Detach();
}
void AdapterBase::APIRequestDevice(const DeviceDescriptor* descriptor,
WGPURequestDeviceCallback callback,
void* userdata) {
static constexpr DeviceDescriptor kDefaultDescriptor = {};
if (descriptor == nullptr) {
descriptor = &kDefaultDescriptor;
}
auto result = CreateDeviceInternal(descriptor);
if (result.IsError()) {
std::unique_ptr<ErrorData> errorData = result.AcquireError();
// TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
callback(WGPURequestDeviceStatus_Error, nullptr,
errorData->GetFormattedMessage().c_str(), userdata);
return;
}
Ref<DeviceBase> device = result.AcquireSuccess();
WGPURequestDeviceStatus status =
device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
// TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
callback(status, ToAPI(device.Detach()), nullptr, userdata); callback(WGPURequestDeviceStatus_Error, nullptr, errorData->GetFormattedMessage().c_str(),
userdata);
return;
} }
uint32_t AdapterBase::GetVendorId() const { Ref<DeviceBase> device = result.AcquireSuccess();
return mVendorId;
}
uint32_t AdapterBase::GetDeviceId() const { WGPURequestDeviceStatus status =
return mDeviceId; device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
} // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
callback(status, ToAPI(device.Detach()), nullptr, userdata);
}
wgpu::BackendType AdapterBase::GetBackendType() const { uint32_t AdapterBase::GetVendorId() const {
return mBackend; return mVendorId;
} }
InstanceBase* AdapterBase::GetInstance() const { uint32_t AdapterBase::GetDeviceId() const {
return mInstance; return mDeviceId;
} }
FeaturesSet AdapterBase::GetSupportedFeatures() const { wgpu::BackendType AdapterBase::GetBackendType() const {
return mSupportedFeatures; return mBackend;
} }
bool AdapterBase::SupportsAllRequiredFeatures( InstanceBase* AdapterBase::GetInstance() const {
const ityp::span<size_t, const wgpu::FeatureName>& features) const { return mInstance;
for (wgpu::FeatureName f : features) { }
if (!mSupportedFeatures.IsEnabled(f)) {
return false;
}
}
return true;
}
WGPUDeviceProperties AdapterBase::GetAdapterProperties() const { FeaturesSet AdapterBase::GetSupportedFeatures() const {
WGPUDeviceProperties adapterProperties = {}; return mSupportedFeatures;
adapterProperties.deviceID = mDeviceId; }
adapterProperties.vendorID = mVendorId;
adapterProperties.adapterType = static_cast<WGPUAdapterType>(mAdapterType);
mSupportedFeatures.InitializeDeviceProperties(&adapterProperties); bool AdapterBase::SupportsAllRequiredFeatures(
// This is OK for now because there are no limit feature structs. const ityp::span<size_t, const wgpu::FeatureName>& features) const {
// If we add additional structs, the caller will need to provide memory for (wgpu::FeatureName f : features) {
// to store them (ex. by calling GetLimits directly instead). Currently, if (!mSupportedFeatures.IsEnabled(f)) {
// we keep this function as it's only used internally in Chromium to
// send the adapter properties across the wire.
GetLimits(FromAPI(&adapterProperties.limits));
return adapterProperties;
}
bool AdapterBase::GetLimits(SupportedLimits* limits) const {
ASSERT(limits != nullptr);
if (limits->nextInChain != nullptr) {
return false; return false;
} }
if (mUseTieredLimits) { }
limits->limits = ApplyLimitTiers(mLimits.v1); return true;
} else { }
limits->limits = mLimits.v1;
} WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
return true; WGPUDeviceProperties adapterProperties = {};
adapterProperties.deviceID = mDeviceId;
adapterProperties.vendorID = mVendorId;
adapterProperties.adapterType = static_cast<WGPUAdapterType>(mAdapterType);
mSupportedFeatures.InitializeDeviceProperties(&adapterProperties);
// This is OK for now because there are no limit feature structs.
// If we add additional structs, the caller will need to provide memory
// to store them (ex. by calling GetLimits directly instead). Currently,
// we keep this function as it's only used internally in Chromium to
// send the adapter properties across the wire.
GetLimits(FromAPI(&adapterProperties.limits));
return adapterProperties;
}
bool AdapterBase::GetLimits(SupportedLimits* limits) const {
ASSERT(limits != nullptr);
if (limits->nextInChain != nullptr) {
return false;
}
if (mUseTieredLimits) {
limits->limits = ApplyLimitTiers(mLimits.v1);
} else {
limits->limits = mLimits.v1;
}
return true;
}
ResultOrError<Ref<DeviceBase>> AdapterBase::CreateDeviceInternal(
const DeviceDescriptor* descriptor) {
ASSERT(descriptor != nullptr);
for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) {
wgpu::FeatureName f = descriptor->requiredFeatures[i];
DAWN_TRY(ValidateFeatureName(f));
DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f), "Requested feature %s is not supported.",
f);
} }
ResultOrError<Ref<DeviceBase>> AdapterBase::CreateDeviceInternal( if (descriptor->requiredLimits != nullptr) {
const DeviceDescriptor* descriptor) { DAWN_TRY_CONTEXT(ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
ASSERT(descriptor != nullptr); descriptor->requiredLimits->limits),
"validating required limits");
for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) { DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
wgpu::FeatureName f = descriptor->requiredFeatures[i]; "nextInChain is not nullptr.");
DAWN_TRY(ValidateFeatureName(f));
DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f),
"Requested feature %s is not supported.", f);
}
if (descriptor->requiredLimits != nullptr) {
DAWN_TRY_CONTEXT(
ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
descriptor->requiredLimits->limits),
"validating required limits");
DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
"nextInChain is not nullptr.");
}
return CreateDeviceImpl(descriptor);
} }
return CreateDeviceImpl(descriptor);
}
void AdapterBase::SetUseTieredLimits(bool useTieredLimits) { void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
mUseTieredLimits = useTieredLimits; mUseTieredLimits = useTieredLimits;
} }
void AdapterBase::ResetInternalDeviceForTesting() { void AdapterBase::ResetInternalDeviceForTesting() {
mInstance->ConsumedError(ResetInternalDeviceForTestingImpl()); mInstance->ConsumedError(ResetInternalDeviceForTestingImpl());
} }
MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() { MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() {
return DAWN_INTERNAL_ERROR( return DAWN_INTERNAL_ERROR(
"ResetInternalDeviceForTesting should only be used with the D3D12 backend."); "ResetInternalDeviceForTesting should only be used with the D3D12 backend.");
} }
} // namespace dawn::native } // namespace dawn::native

View File

@ -28,71 +28,70 @@
namespace dawn::native { namespace dawn::native {
class DeviceBase; class DeviceBase;
class AdapterBase : public RefCounted { class AdapterBase : public RefCounted {
public: public:
AdapterBase(InstanceBase* instance, wgpu::BackendType backend); AdapterBase(InstanceBase* instance, wgpu::BackendType backend);
virtual ~AdapterBase() = default; virtual ~AdapterBase() = default;
MaybeError Initialize(); MaybeError Initialize();
// WebGPU API // WebGPU API
bool APIGetLimits(SupportedLimits* limits) const; bool APIGetLimits(SupportedLimits* limits) const;
void APIGetProperties(AdapterProperties* properties) const; void APIGetProperties(AdapterProperties* properties) const;
bool APIHasFeature(wgpu::FeatureName feature) const; bool APIHasFeature(wgpu::FeatureName feature) const;
size_t APIEnumerateFeatures(wgpu::FeatureName* features) const; size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
void APIRequestDevice(const DeviceDescriptor* descriptor, void APIRequestDevice(const DeviceDescriptor* descriptor,
WGPURequestDeviceCallback callback, WGPURequestDeviceCallback callback,
void* userdata); void* userdata);
DeviceBase* APICreateDevice(const DeviceDescriptor* descriptor = nullptr); DeviceBase* APICreateDevice(const DeviceDescriptor* descriptor = nullptr);
uint32_t GetVendorId() const; uint32_t GetVendorId() const;
uint32_t GetDeviceId() const; uint32_t GetDeviceId() const;
wgpu::BackendType GetBackendType() const; wgpu::BackendType GetBackendType() const;
InstanceBase* GetInstance() const; InstanceBase* GetInstance() const;
void ResetInternalDeviceForTesting(); void ResetInternalDeviceForTesting();
FeaturesSet GetSupportedFeatures() const; FeaturesSet GetSupportedFeatures() const;
bool SupportsAllRequiredFeatures( bool SupportsAllRequiredFeatures(
const ityp::span<size_t, const wgpu::FeatureName>& features) const; const ityp::span<size_t, const wgpu::FeatureName>& features) const;
WGPUDeviceProperties GetAdapterProperties() const; WGPUDeviceProperties GetAdapterProperties() const;
bool GetLimits(SupportedLimits* limits) const; bool GetLimits(SupportedLimits* limits) const;
void SetUseTieredLimits(bool useTieredLimits); void SetUseTieredLimits(bool useTieredLimits);
virtual bool SupportsExternalImages() const = 0; virtual bool SupportsExternalImages() const = 0;
protected: protected:
uint32_t mVendorId = 0xFFFFFFFF; uint32_t mVendorId = 0xFFFFFFFF;
uint32_t mDeviceId = 0xFFFFFFFF; uint32_t mDeviceId = 0xFFFFFFFF;
std::string mName; std::string mName;
wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown; wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown;
std::string mDriverDescription; std::string mDriverDescription;
FeaturesSet mSupportedFeatures; FeaturesSet mSupportedFeatures;
private: private:
virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl( virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) = 0;
const DeviceDescriptor* descriptor) = 0;
virtual MaybeError InitializeImpl() = 0; virtual MaybeError InitializeImpl() = 0;
// Check base WebGPU features and discover supported featurees. // Check base WebGPU features and discover supported featurees.
virtual MaybeError InitializeSupportedFeaturesImpl() = 0; virtual MaybeError InitializeSupportedFeaturesImpl() = 0;
// Check base WebGPU limits and populate supported limits. // Check base WebGPU limits and populate supported limits.
virtual MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) = 0; virtual MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) = 0;
ResultOrError<Ref<DeviceBase>> CreateDeviceInternal(const DeviceDescriptor* descriptor); ResultOrError<Ref<DeviceBase>> CreateDeviceInternal(const DeviceDescriptor* descriptor);
virtual MaybeError ResetInternalDeviceForTestingImpl(); virtual MaybeError ResetInternalDeviceForTestingImpl();
InstanceBase* mInstance = nullptr; InstanceBase* mInstance = nullptr;
wgpu::BackendType mBackend; wgpu::BackendType mBackend;
CombinedLimits mLimits; CombinedLimits mLimits;
bool mUseTieredLimits = false; bool mUseTieredLimits = false;
}; };
} // namespace dawn::native } // namespace dawn::native

View File

@ -20,62 +20,61 @@
namespace dawn::native { namespace dawn::native {
AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool) AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
: mWorkerTaskPool(workerTaskPool) { : mWorkerTaskPool(workerTaskPool) {}
}
void AsyncTaskManager::PostTask(AsyncTask asyncTask) { void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
// If these allocations becomes expensive, we can slab-allocate tasks. // If these allocations becomes expensive, we can slab-allocate tasks.
Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask()); Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask());
waitableTask->taskManager = this; waitableTask->taskManager = this;
waitableTask->asyncTask = std::move(asyncTask); waitableTask->asyncTask = std::move(asyncTask);
{ {
// We insert new waitableTask objects into mPendingTasks in main thread (PostTask()), // We insert new waitableTask objects into mPendingTasks in main thread (PostTask()),
// and we may remove waitableTask objects from mPendingTasks in either main thread // and we may remove waitableTask objects from mPendingTasks in either main thread
// (WaitAllPendingTasks()) or sub-thread (TaskCompleted), so mPendingTasks should be // (WaitAllPendingTasks()) or sub-thread (TaskCompleted), so mPendingTasks should be
// protected by a mutex. // protected by a mutex.
std::lock_guard<std::mutex> lock(mPendingTasksMutex);
mPendingTasks.emplace(waitableTask.Get(), waitableTask);
}
// Ref the task since it is accessed inside the worker function.
// The worker function will acquire and release the task upon completion.
waitableTask->Reference();
waitableTask->waitableEvent =
mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
}
void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
std::lock_guard<std::mutex> lock(mPendingTasksMutex); std::lock_guard<std::mutex> lock(mPendingTasksMutex);
auto iter = mPendingTasks.find(task); mPendingTasks.emplace(waitableTask.Get(), waitableTask);
if (iter != mPendingTasks.end()) {
mPendingTasks.erase(iter);
}
} }
void AsyncTaskManager::WaitAllPendingTasks() { // Ref the task since it is accessed inside the worker function.
std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks; // The worker function will acquire and release the task upon completion.
waitableTask->Reference();
waitableTask->waitableEvent =
mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
}
{ void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
std::lock_guard<std::mutex> lock(mPendingTasksMutex); std::lock_guard<std::mutex> lock(mPendingTasksMutex);
allPendingTasks.swap(mPendingTasks); auto iter = mPendingTasks.find(task);
} if (iter != mPendingTasks.end()) {
mPendingTasks.erase(iter);
for (auto& [_, task] : allPendingTasks) {
task->waitableEvent->Wait();
}
} }
}
bool AsyncTaskManager::HasPendingTasks() { void AsyncTaskManager::WaitAllPendingTasks() {
std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks;
{
std::lock_guard<std::mutex> lock(mPendingTasksMutex); std::lock_guard<std::mutex> lock(mPendingTasksMutex);
return !mPendingTasks.empty(); allPendingTasks.swap(mPendingTasks);
} }
void AsyncTaskManager::DoWaitableTask(void* task) { for (auto& [_, task] : allPendingTasks) {
Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task)); task->waitableEvent->Wait();
waitableTask->asyncTask();
waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());
} }
}
bool AsyncTaskManager::HasPendingTasks() {
std::lock_guard<std::mutex> lock(mPendingTasksMutex);
return !mPendingTasks.empty();
}
void AsyncTaskManager::DoWaitableTask(void* task) {
Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task));
waitableTask->asyncTask();
waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());
}
} // namespace dawn::native } // namespace dawn::native

View File

@ -23,43 +23,43 @@
#include "dawn/common/RefCounted.h" #include "dawn/common/RefCounted.h"
namespace dawn::platform { namespace dawn::platform {
class WaitableEvent; class WaitableEvent;
class WorkerTaskPool; class WorkerTaskPool;
} // namespace dawn::platform } // namespace dawn::platform
namespace dawn::native { namespace dawn::native {
// TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like // TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like
// Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just // Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just
// shutting down the device. RunNow() could be used for more advanced scenarios, for example // shutting down the device. RunNow() could be used for more advanced scenarios, for example
// always doing ShaderModule initial compilation asynchronously, but being able to steal the // always doing ShaderModule initial compilation asynchronously, but being able to steal the
// task if we need it for synchronous pipeline compilation. // task if we need it for synchronous pipeline compilation.
using AsyncTask = std::function<void()>; using AsyncTask = std::function<void()>;
class AsyncTaskManager { class AsyncTaskManager {
public:
explicit AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool);
void PostTask(AsyncTask asyncTask);
void WaitAllPendingTasks();
bool HasPendingTasks();
private:
class WaitableTask : public RefCounted {
public: public:
explicit AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool); AsyncTask asyncTask;
AsyncTaskManager* taskManager;
void PostTask(AsyncTask asyncTask); std::unique_ptr<dawn::platform::WaitableEvent> waitableEvent;
void WaitAllPendingTasks();
bool HasPendingTasks();
private:
class WaitableTask : public RefCounted {
public:
AsyncTask asyncTask;
AsyncTaskManager* taskManager;
std::unique_ptr<dawn::platform::WaitableEvent> waitableEvent;
};
static void DoWaitableTask(void* task);
void HandleTaskCompletion(WaitableTask* task);
std::mutex mPendingTasksMutex;
std::unordered_map<WaitableTask*, Ref<WaitableTask>> mPendingTasks;
dawn::platform::WorkerTaskPool* mWorkerTaskPool;
}; };
static void DoWaitableTask(void* task);
void HandleTaskCompletion(WaitableTask* task);
std::mutex mPendingTasksMutex;
std::unordered_map<WaitableTask*, Ref<WaitableTask>> mPendingTasks;
dawn::platform::WorkerTaskPool* mWorkerTaskPool;
};
} // namespace dawn::native } // namespace dawn::native
#endif // SRC_DAWN_NATIVE_ASYNCTASK_H_ #endif // SRC_DAWN_NATIVE_ASYNCTASK_H_

View File

@ -21,155 +21,148 @@
namespace dawn::native { namespace dawn::native {
AttachmentStateBlueprint::AttachmentStateBlueprint( AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor)
const RenderBundleEncoderDescriptor* descriptor) : mSampleCount(descriptor->sampleCount) {
: mSampleCount(descriptor->sampleCount) { ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments); for (ColorAttachmentIndex i(uint8_t(0));
i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorFormatsCount)); ++i) {
wgpu::TextureFormat format = descriptor->colorFormats[static_cast<uint8_t>(i)];
if (format != wgpu::TextureFormat::Undefined) {
mColorAttachmentsSet.set(i);
mColorFormats[i] = format;
}
}
mDepthStencilFormat = descriptor->depthStencilFormat;
}
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
: mSampleCount(descriptor->multisample.count) {
if (descriptor->fragment != nullptr) {
ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
for (ColorAttachmentIndex i(uint8_t(0)); for (ColorAttachmentIndex i(uint8_t(0));
i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorFormatsCount)); ++i) { i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
wgpu::TextureFormat format = descriptor->colorFormats[static_cast<uint8_t>(i)]; ++i) {
wgpu::TextureFormat format =
descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
if (format != wgpu::TextureFormat::Undefined) { if (format != wgpu::TextureFormat::Undefined) {
mColorAttachmentsSet.set(i); mColorAttachmentsSet.set(i);
mColorFormats[i] = format; mColorFormats[i] = format;
} }
} }
mDepthStencilFormat = descriptor->depthStencilFormat;
} }
if (descriptor->depthStencil != nullptr) {
mDepthStencilFormat = descriptor->depthStencil->format;
}
}
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor) AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
: mSampleCount(descriptor->multisample.count) { for (ColorAttachmentIndex i(uint8_t(0));
if (descriptor->fragment != nullptr) { i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount)); ++i) {
ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments); TextureViewBase* attachment = descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
for (ColorAttachmentIndex i(uint8_t(0)); if (attachment == nullptr) {
i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount)); continue;
++i) {
wgpu::TextureFormat format =
descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
if (format != wgpu::TextureFormat::Undefined) {
mColorAttachmentsSet.set(i);
mColorFormats[i] = format;
}
}
} }
if (descriptor->depthStencil != nullptr) { mColorAttachmentsSet.set(i);
mDepthStencilFormat = descriptor->depthStencil->format; mColorFormats[i] = attachment->GetFormat().format;
if (mSampleCount == 0) {
mSampleCount = attachment->GetTexture()->GetSampleCount();
} else {
ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
} }
} }
if (descriptor->depthStencilAttachment != nullptr) {
TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
mDepthStencilFormat = attachment->GetFormat().format;
if (mSampleCount == 0) {
mSampleCount = attachment->GetTexture()->GetSampleCount();
} else {
ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
}
}
ASSERT(mSampleCount > 0);
}
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) { AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) = default;
for (ColorAttachmentIndex i(uint8_t(0));
i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount)); size_t AttachmentStateBlueprint::HashFunc::operator()(
++i) { const AttachmentStateBlueprint* attachmentState) const {
TextureViewBase* attachment = size_t hash = 0;
descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
if (attachment == nullptr) { // Hash color formats
continue; HashCombine(&hash, attachmentState->mColorAttachmentsSet);
} for (ColorAttachmentIndex i : IterateBitSet(attachmentState->mColorAttachmentsSet)) {
mColorAttachmentsSet.set(i); HashCombine(&hash, attachmentState->mColorFormats[i]);
mColorFormats[i] = attachment->GetFormat().format;
if (mSampleCount == 0) {
mSampleCount = attachment->GetTexture()->GetSampleCount();
} else {
ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
}
}
if (descriptor->depthStencilAttachment != nullptr) {
TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
mDepthStencilFormat = attachment->GetFormat().format;
if (mSampleCount == 0) {
mSampleCount = attachment->GetTexture()->GetSampleCount();
} else {
ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
}
}
ASSERT(mSampleCount > 0);
} }
AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) = // Hash depth stencil attachment
default; HashCombine(&hash, attachmentState->mDepthStencilFormat);
size_t AttachmentStateBlueprint::HashFunc::operator()( // Hash sample count
const AttachmentStateBlueprint* attachmentState) const { HashCombine(&hash, attachmentState->mSampleCount);
size_t hash = 0;
// Hash color formats return hash;
HashCombine(&hash, attachmentState->mColorAttachmentsSet); }
for (ColorAttachmentIndex i : IterateBitSet(attachmentState->mColorAttachmentsSet)) {
HashCombine(&hash, attachmentState->mColorFormats[i]);
}
// Hash depth stencil attachment bool AttachmentStateBlueprint::EqualityFunc::operator()(const AttachmentStateBlueprint* a,
HashCombine(&hash, attachmentState->mDepthStencilFormat); const AttachmentStateBlueprint* b) const {
// Check set attachments
// Hash sample count if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
HashCombine(&hash, attachmentState->mSampleCount); return false;
return hash;
} }
bool AttachmentStateBlueprint::EqualityFunc::operator()( // Check color formats
const AttachmentStateBlueprint* a, for (ColorAttachmentIndex i : IterateBitSet(a->mColorAttachmentsSet)) {
const AttachmentStateBlueprint* b) const { if (a->mColorFormats[i] != b->mColorFormats[i]) {
// Check set attachments
if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
return false; return false;
} }
// Check color formats
for (ColorAttachmentIndex i : IterateBitSet(a->mColorAttachmentsSet)) {
if (a->mColorFormats[i] != b->mColorFormats[i]) {
return false;
}
}
// Check depth stencil format
if (a->mDepthStencilFormat != b->mDepthStencilFormat) {
return false;
}
// Check sample count
if (a->mSampleCount != b->mSampleCount) {
return false;
}
return true;
} }
AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint) // Check depth stencil format
: AttachmentStateBlueprint(blueprint), ObjectBase(device) { if (a->mDepthStencilFormat != b->mDepthStencilFormat) {
return false;
} }
AttachmentState::~AttachmentState() { // Check sample count
GetDevice()->UncacheAttachmentState(this); if (a->mSampleCount != b->mSampleCount) {
return false;
} }
size_t AttachmentState::ComputeContentHash() { return true;
// TODO(dawn:549): skip this traversal and reuse the blueprint. }
return AttachmentStateBlueprint::HashFunc()(this);
}
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
AttachmentState::GetColorAttachmentsMask() const { : AttachmentStateBlueprint(blueprint), ObjectBase(device) {}
return mColorAttachmentsSet;
}
wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat( AttachmentState::~AttachmentState() {
ColorAttachmentIndex index) const { GetDevice()->UncacheAttachmentState(this);
ASSERT(mColorAttachmentsSet[index]); }
return mColorFormats[index];
}
bool AttachmentState::HasDepthStencilAttachment() const { size_t AttachmentState::ComputeContentHash() {
return mDepthStencilFormat != wgpu::TextureFormat::Undefined; // TODO(dawn:549): skip this traversal and reuse the blueprint.
} return AttachmentStateBlueprint::HashFunc()(this);
}
wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const { ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> AttachmentState::GetColorAttachmentsMask()
ASSERT(HasDepthStencilAttachment()); const {
return mDepthStencilFormat; return mColorAttachmentsSet;
} }
uint32_t AttachmentState::GetSampleCount() const { wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(ColorAttachmentIndex index) const {
return mSampleCount; ASSERT(mColorAttachmentsSet[index]);
} return mColorFormats[index];
}
bool AttachmentState::HasDepthStencilAttachment() const {
return mDepthStencilFormat != wgpu::TextureFormat::Undefined;
}
wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
ASSERT(HasDepthStencilAttachment());
return mDepthStencilFormat;
}
uint32_t AttachmentState::GetSampleCount() const {
return mSampleCount;
}
} // namespace dawn::native } // namespace dawn::native

View File

@ -29,54 +29,53 @@
namespace dawn::native { namespace dawn::native {
class DeviceBase; class DeviceBase;
// AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState // AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState
// can be constructed by copying the blueprint state instead of traversing descriptors. // can be constructed by copying the blueprint state instead of traversing descriptors.
// Also, AttachmentStateBlueprint does not need a refcount like AttachmentState. // Also, AttachmentStateBlueprint does not need a refcount like AttachmentState.
class AttachmentStateBlueprint { class AttachmentStateBlueprint {
public: public:
// Note: Descriptors must be validated before the AttachmentState is constructed. // Note: Descriptors must be validated before the AttachmentState is constructed.
explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor); explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor);
explicit AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor); explicit AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor);
explicit AttachmentStateBlueprint(const RenderPassDescriptor* descriptor); explicit AttachmentStateBlueprint(const RenderPassDescriptor* descriptor);
AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs); AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs);
// Functors necessary for the unordered_set<AttachmentState*>-based cache. // Functors necessary for the unordered_set<AttachmentState*>-based cache.
struct HashFunc { struct HashFunc {
size_t operator()(const AttachmentStateBlueprint* attachmentState) const; size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
}; };
struct EqualityFunc { struct EqualityFunc {
bool operator()(const AttachmentStateBlueprint* a, bool operator()(const AttachmentStateBlueprint* a, const AttachmentStateBlueprint* b) const;
const AttachmentStateBlueprint* b) const;
};
protected:
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
// Default (texture format Undefined) indicates there is no depth stencil attachment.
wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
uint32_t mSampleCount = 0;
}; };
class AttachmentState final : public AttachmentStateBlueprint, protected:
public ObjectBase, ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
public CachedObject { ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
public: // Default (texture format Undefined) indicates there is no depth stencil attachment.
AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint); wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
uint32_t mSampleCount = 0;
};
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const; class AttachmentState final : public AttachmentStateBlueprint,
wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const; public ObjectBase,
bool HasDepthStencilAttachment() const; public CachedObject {
wgpu::TextureFormat GetDepthStencilFormat() const; public:
uint32_t GetSampleCount() const; AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint);
size_t ComputeContentHash() override; ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const;
bool HasDepthStencilAttachment() const;
wgpu::TextureFormat GetDepthStencilFormat() const;
uint32_t GetSampleCount() const;
private: size_t ComputeContentHash() override;
~AttachmentState() override;
}; private:
~AttachmentState() override;
};
} // namespace dawn::native } // namespace dawn::native

View File

@ -16,21 +16,20 @@
namespace dawn::native { namespace dawn::native {
BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type) BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
: mInstance(instance), mType(type) { : mInstance(instance), mType(type) {}
}
wgpu::BackendType BackendConnection::GetType() const { wgpu::BackendType BackendConnection::GetType() const {
return mType; return mType;
} }
InstanceBase* BackendConnection::GetInstance() const { InstanceBase* BackendConnection::GetInstance() const {
return mInstance; return mInstance;
} }
ResultOrError<std::vector<Ref<AdapterBase>>> BackendConnection::DiscoverAdapters( ResultOrError<std::vector<Ref<AdapterBase>>> BackendConnection::DiscoverAdapters(
const AdapterDiscoveryOptionsBase* options) { const AdapterDiscoveryOptionsBase* options) {
return DAWN_FORMAT_VALIDATION_ERROR("DiscoverAdapters not implemented for this backend."); return DAWN_FORMAT_VALIDATION_ERROR("DiscoverAdapters not implemented for this backend.");
} }
} // namespace dawn::native } // namespace dawn::native

View File

@ -23,28 +23,28 @@
namespace dawn::native { namespace dawn::native {
// An common interface for all backends. Mostly used to create adapters for a particular // An common interface for all backends. Mostly used to create adapters for a particular
// backend. // backend.
class BackendConnection { class BackendConnection {
public: public:
BackendConnection(InstanceBase* instance, wgpu::BackendType type); BackendConnection(InstanceBase* instance, wgpu::BackendType type);
virtual ~BackendConnection() = default; virtual ~BackendConnection() = default;
wgpu::BackendType GetType() const; wgpu::BackendType GetType() const;
InstanceBase* GetInstance() const; InstanceBase* GetInstance() const;
// Returns all the adapters for the system that can be created by the backend, without extra // Returns all the adapters for the system that can be created by the backend, without extra
// options (such as debug adapters, custom driver libraries, etc.) // options (such as debug adapters, custom driver libraries, etc.)
virtual std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() = 0; virtual std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() = 0;
// Returns new adapters created with the backend-specific options. // Returns new adapters created with the backend-specific options.
virtual ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters( virtual ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
const AdapterDiscoveryOptionsBase* options); const AdapterDiscoveryOptionsBase* options);
private: private:
InstanceBase* mInstance = nullptr; InstanceBase* mInstance = nullptr;
wgpu::BackendType mType; wgpu::BackendType mType;
}; };
} // namespace dawn::native } // namespace dawn::native

View File

@ -29,517 +29,498 @@
namespace dawn::native { namespace dawn::native {
namespace { namespace {
// Helper functions to perform binding-type specific validation // Helper functions to perform binding-type specific validation
MaybeError ValidateBufferBinding(const DeviceBase* device, MaybeError ValidateBufferBinding(const DeviceBase* device,
const BindGroupEntry& entry, const BindGroupEntry& entry,
const BindingInfo& bindingInfo) { const BindingInfo& bindingInfo) {
DAWN_INVALID_IF(entry.buffer == nullptr, "Binding entry buffer not set."); DAWN_INVALID_IF(entry.buffer == nullptr, "Binding entry buffer not set.");
DAWN_INVALID_IF(entry.sampler != nullptr || entry.textureView != nullptr, DAWN_INVALID_IF(entry.sampler != nullptr || entry.textureView != nullptr,
"Expected only buffer to be set for binding entry."); "Expected only buffer to be set for binding entry.");
DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr."); DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
DAWN_TRY(device->ValidateObject(entry.buffer)); DAWN_TRY(device->ValidateObject(entry.buffer));
ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer); ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
wgpu::BufferUsage requiredUsage; wgpu::BufferUsage requiredUsage;
uint64_t maxBindingSize; uint64_t maxBindingSize;
uint64_t requiredBindingAlignment; uint64_t requiredBindingAlignment;
switch (bindingInfo.buffer.type) { switch (bindingInfo.buffer.type) {
case wgpu::BufferBindingType::Uniform: case wgpu::BufferBindingType::Uniform:
requiredUsage = wgpu::BufferUsage::Uniform; requiredUsage = wgpu::BufferUsage::Uniform;
maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize; maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
requiredBindingAlignment = requiredBindingAlignment = device->GetLimits().v1.minUniformBufferOffsetAlignment;
device->GetLimits().v1.minUniformBufferOffsetAlignment; break;
break; case wgpu::BufferBindingType::Storage:
case wgpu::BufferBindingType::Storage: case wgpu::BufferBindingType::ReadOnlyStorage:
case wgpu::BufferBindingType::ReadOnlyStorage: requiredUsage = wgpu::BufferUsage::Storage;
requiredUsage = wgpu::BufferUsage::Storage; maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize; requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
requiredBindingAlignment = break;
device->GetLimits().v1.minStorageBufferOffsetAlignment; case kInternalStorageBufferBinding:
break; requiredUsage = kInternalStorageBuffer;
case kInternalStorageBufferBinding: maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
requiredUsage = kInternalStorageBuffer; requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize; break;
requiredBindingAlignment = case wgpu::BufferBindingType::Undefined:
device->GetLimits().v1.minStorageBufferOffsetAlignment; UNREACHABLE();
break; }
case wgpu::BufferBindingType::Undefined:
UNREACHABLE();
}
uint64_t bufferSize = entry.buffer->GetSize(); uint64_t bufferSize = entry.buffer->GetSize();
// Handle wgpu::WholeSize, avoiding overflows. // Handle wgpu::WholeSize, avoiding overflows.
DAWN_INVALID_IF(entry.offset > bufferSize, DAWN_INVALID_IF(entry.offset > bufferSize,
"Binding offset (%u) is larger than the size (%u) of %s.", entry.offset, "Binding offset (%u) is larger than the size (%u) of %s.", entry.offset,
bufferSize, entry.buffer); bufferSize, entry.buffer);
uint64_t bindingSize = uint64_t bindingSize =
(entry.size == wgpu::kWholeSize) ? bufferSize - entry.offset : entry.size; (entry.size == wgpu::kWholeSize) ? bufferSize - entry.offset : entry.size;
DAWN_INVALID_IF(bindingSize > bufferSize, DAWN_INVALID_IF(bindingSize > bufferSize,
"Binding size (%u) is larger than the size (%u) of %s.", bindingSize, "Binding size (%u) is larger than the size (%u) of %s.", bindingSize,
bufferSize, entry.buffer); bufferSize, entry.buffer);
DAWN_INVALID_IF(bindingSize == 0, "Binding size is zero"); DAWN_INVALID_IF(bindingSize == 0, "Binding size is zero");
// Note that no overflow can happen because we already checked that // Note that no overflow can happen because we already checked that
// bufferSize >= bindingSize // bufferSize >= bindingSize
DAWN_INVALID_IF( DAWN_INVALID_IF(entry.offset > bufferSize - bindingSize,
entry.offset > bufferSize - bindingSize, "Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
"Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.", entry.offset, bufferSize, bindingSize, entry.buffer);
entry.offset, bufferSize, bindingSize, entry.buffer);
DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment), DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
"Offset (%u) does not satisfy the minimum %s alignment (%u).", "Offset (%u) does not satisfy the minimum %s alignment (%u).", entry.offset,
entry.offset, bindingInfo.buffer.type, requiredBindingAlignment); bindingInfo.buffer.type, requiredBindingAlignment);
DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage), DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
"Binding usage (%s) of %s doesn't match expected usage (%s).", "Binding usage (%s) of %s doesn't match expected usage (%s).",
entry.buffer->GetUsageExternalOnly(), entry.buffer, requiredUsage); entry.buffer->GetUsageExternalOnly(), entry.buffer, requiredUsage);
DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize, DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
"Binding size (%u) is smaller than the minimum binding size (%u).", "Binding size (%u) is smaller than the minimum binding size (%u).", bindingSize,
bindingSize, bindingInfo.buffer.minBindingSize); bindingInfo.buffer.minBindingSize);
DAWN_INVALID_IF(bindingSize > maxBindingSize, DAWN_INVALID_IF(bindingSize > maxBindingSize,
"Binding size (%u) is larger than the maximum binding size (%u).", "Binding size (%u) is larger than the maximum binding size (%u).", bindingSize,
bindingSize, maxBindingSize); maxBindingSize);
return {}; return {};
} }
MaybeError ValidateTextureBinding(DeviceBase* device, MaybeError ValidateTextureBinding(DeviceBase* device,
const BindGroupEntry& entry, const BindGroupEntry& entry,
const BindingInfo& bindingInfo) { const BindingInfo& bindingInfo) {
DAWN_INVALID_IF(entry.textureView == nullptr, "Binding entry textureView not set."); DAWN_INVALID_IF(entry.textureView == nullptr, "Binding entry textureView not set.");
DAWN_INVALID_IF(entry.sampler != nullptr || entry.buffer != nullptr, DAWN_INVALID_IF(entry.sampler != nullptr || entry.buffer != nullptr,
"Expected only textureView to be set for binding entry."); "Expected only textureView to be set for binding entry.");
DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr."); DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
DAWN_TRY(device->ValidateObject(entry.textureView)); DAWN_TRY(device->ValidateObject(entry.textureView));
TextureViewBase* view = entry.textureView; TextureViewBase* view = entry.textureView;
Aspect aspect = view->GetAspects(); Aspect aspect = view->GetAspects();
DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect, DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect, view);
view);
TextureBase* texture = view->GetTexture(); TextureBase* texture = view->GetTexture();
switch (bindingInfo.bindingType) { switch (bindingInfo.bindingType) {
case BindingInfoType::Texture: { case BindingInfoType::Texture: {
SampleTypeBit supportedTypes = SampleTypeBit supportedTypes =
texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes; texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
SampleTypeBit requiredType = SampleTypeBit requiredType = SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
DAWN_INVALID_IF( DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
!(texture->GetUsage() & wgpu::TextureUsage::TextureBinding), "Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
"Usage (%s) of %s doesn't include TextureUsage::TextureBinding.", texture->GetUsage(), texture);
texture->GetUsage(), texture);
DAWN_INVALID_IF( DAWN_INVALID_IF(texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
texture->IsMultisampledTexture() != bindingInfo.texture.multisampled, "Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
"Sample count (%u) of %s doesn't match expectation (multisampled: %d).", texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
DAWN_INVALID_IF(
(supportedTypes & requiredType) == 0,
"None of the supported sample types (%s) of %s match the expected sample "
"types (%s).",
supportedTypes, texture, requiredType);
DAWN_INVALID_IF(
entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
"Dimension (%s) of %s doesn't match the expected dimension (%s).",
entry.textureView->GetDimension(), entry.textureView,
bindingInfo.texture.viewDimension);
break;
}
case BindingInfoType::StorageTexture: {
DAWN_INVALID_IF(
!(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
"Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
texture->GetUsage(), texture);
ASSERT(!texture->IsMultisampledTexture());
DAWN_INVALID_IF(
texture->GetFormat().format != bindingInfo.storageTexture.format,
"Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
texture, bindingInfo.storageTexture.format);
DAWN_INVALID_IF(
entry.textureView->GetDimension() !=
bindingInfo.storageTexture.viewDimension,
"Dimension (%s) of %s doesn't match the expected dimension (%s).",
entry.textureView->GetDimension(), entry.textureView,
bindingInfo.storageTexture.viewDimension);
DAWN_INVALID_IF(entry.textureView->GetLevelCount() != 1,
"mipLevelCount (%u) of %s expected to be 1.",
entry.textureView->GetLevelCount(), entry.textureView);
break;
}
default:
UNREACHABLE();
break;
}
return {};
}
MaybeError ValidateSamplerBinding(const DeviceBase* device,
const BindGroupEntry& entry,
const BindingInfo& bindingInfo) {
DAWN_INVALID_IF(entry.sampler == nullptr, "Binding entry sampler not set.");
DAWN_INVALID_IF(entry.textureView != nullptr || entry.buffer != nullptr,
"Expected only sampler to be set for binding entry.");
DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
DAWN_TRY(device->ValidateObject(entry.sampler));
ASSERT(bindingInfo.bindingType == BindingInfoType::Sampler);
switch (bindingInfo.sampler.type) {
case wgpu::SamplerBindingType::NonFiltering:
DAWN_INVALID_IF(
entry.sampler->IsFiltering(),
"Filtering sampler %s is incompatible with non-filtering sampler "
"binding.",
entry.sampler);
[[fallthrough]];
case wgpu::SamplerBindingType::Filtering:
DAWN_INVALID_IF(
entry.sampler->IsComparison(),
"Comparison sampler %s is incompatible with non-comparison sampler "
"binding.",
entry.sampler);
break;
case wgpu::SamplerBindingType::Comparison:
DAWN_INVALID_IF(
!entry.sampler->IsComparison(),
"Non-comparison sampler %s is imcompatible with comparison sampler "
"binding.",
entry.sampler);
break;
default:
UNREACHABLE();
break;
}
return {};
}
MaybeError ValidateExternalTextureBinding(
const DeviceBase* device,
const BindGroupEntry& entry,
const ExternalTextureBindingEntry* externalTextureBindingEntry,
const ExternalTextureBindingExpansionMap& expansions) {
DAWN_INVALID_IF(externalTextureBindingEntry == nullptr,
"Binding entry external texture not set.");
DAWN_INVALID_IF( DAWN_INVALID_IF(
entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr, (supportedTypes & requiredType) == 0,
"Expected only external texture to be set for binding entry."); "None of the supported sample types (%s) of %s match the expected sample "
"types (%s).",
supportedTypes, texture, requiredType);
DAWN_INVALID_IF(entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
"Dimension (%s) of %s doesn't match the expected dimension (%s).",
entry.textureView->GetDimension(), entry.textureView,
bindingInfo.texture.viewDimension);
break;
}
case BindingInfoType::StorageTexture: {
DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
"Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
texture->GetUsage(), texture);
ASSERT(!texture->IsMultisampledTexture());
DAWN_INVALID_IF(texture->GetFormat().format != bindingInfo.storageTexture.format,
"Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
texture, bindingInfo.storageTexture.format);
DAWN_INVALID_IF( DAWN_INVALID_IF(
expansions.find(BindingNumber(entry.binding)) == expansions.end(), entry.textureView->GetDimension() != bindingInfo.storageTexture.viewDimension,
"External texture binding entry %u is not present in the bind group layout.", "Dimension (%s) of %s doesn't match the expected dimension (%s).",
entry.binding); entry.textureView->GetDimension(), entry.textureView,
bindingInfo.storageTexture.viewDimension);
DAWN_TRY(ValidateSingleSType(externalTextureBindingEntry->nextInChain, DAWN_INVALID_IF(entry.textureView->GetLevelCount() != 1,
wgpu::SType::ExternalTextureBindingEntry)); "mipLevelCount (%u) of %s expected to be 1.",
entry.textureView->GetLevelCount(), entry.textureView);
break;
}
default:
UNREACHABLE();
break;
}
DAWN_TRY(device->ValidateObject(externalTextureBindingEntry->externalTexture)); return {};
}
return {}; MaybeError ValidateSamplerBinding(const DeviceBase* device,
const BindGroupEntry& entry,
const BindingInfo& bindingInfo) {
DAWN_INVALID_IF(entry.sampler == nullptr, "Binding entry sampler not set.");
DAWN_INVALID_IF(entry.textureView != nullptr || entry.buffer != nullptr,
"Expected only sampler to be set for binding entry.");
DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
DAWN_TRY(device->ValidateObject(entry.sampler));
ASSERT(bindingInfo.bindingType == BindingInfoType::Sampler);
switch (bindingInfo.sampler.type) {
case wgpu::SamplerBindingType::NonFiltering:
DAWN_INVALID_IF(entry.sampler->IsFiltering(),
"Filtering sampler %s is incompatible with non-filtering sampler "
"binding.",
entry.sampler);
[[fallthrough]];
case wgpu::SamplerBindingType::Filtering:
DAWN_INVALID_IF(entry.sampler->IsComparison(),
"Comparison sampler %s is incompatible with non-comparison sampler "
"binding.",
entry.sampler);
break;
case wgpu::SamplerBindingType::Comparison:
DAWN_INVALID_IF(!entry.sampler->IsComparison(),
"Non-comparison sampler %s is imcompatible with comparison sampler "
"binding.",
entry.sampler);
break;
default:
UNREACHABLE();
break;
}
return {};
}
MaybeError ValidateExternalTextureBinding(
const DeviceBase* device,
const BindGroupEntry& entry,
const ExternalTextureBindingEntry* externalTextureBindingEntry,
const ExternalTextureBindingExpansionMap& expansions) {
DAWN_INVALID_IF(externalTextureBindingEntry == nullptr,
"Binding entry external texture not set.");
DAWN_INVALID_IF(
entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
"Expected only external texture to be set for binding entry.");
DAWN_INVALID_IF(expansions.find(BindingNumber(entry.binding)) == expansions.end(),
"External texture binding entry %u is not present in the bind group layout.",
entry.binding);
DAWN_TRY(ValidateSingleSType(externalTextureBindingEntry->nextInChain,
wgpu::SType::ExternalTextureBindingEntry));
DAWN_TRY(device->ValidateObject(externalTextureBindingEntry->externalTexture));
return {};
}
} // anonymous namespace
MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor) {
DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
DAWN_TRY(device->ValidateObject(descriptor->layout));
DAWN_INVALID_IF(
descriptor->entryCount != descriptor->layout->GetUnexpandedBindingCount(),
"Number of entries (%u) did not match the number of entries (%u) specified in %s."
"\nExpected layout: %s",
descriptor->entryCount, static_cast<uint32_t>(descriptor->layout->GetBindingCount()),
descriptor->layout, descriptor->layout->EntriesToString());
const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout> bindingsSet;
for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
const BindGroupEntry& entry = descriptor->entries[i];
const auto& it = bindingMap.find(BindingNumber(entry.binding));
DAWN_INVALID_IF(it == bindingMap.end(),
"In entries[%u], binding index %u not present in the bind group layout."
"\nExpected layout: %s",
i, entry.binding, descriptor->layout->EntriesToString());
BindingIndex bindingIndex = it->second;
ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
DAWN_INVALID_IF(bindingsSet[bindingIndex],
"In entries[%u], binding index %u already used by a previous entry", i,
entry.binding);
bindingsSet.set(bindingIndex);
// Below this block we validate entries based on the bind group layout, in which
// external textures have been expanded into their underlying contents. For this reason
// we must identify external texture binding entries by checking the bind group entry
// itself.
// TODO(dawn:1293): Store external textures in
// BindGroupLayoutBase::BindingDataPointers::bindings so checking external textures can
// be moved in the switch below.
const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
FindInChain(entry.nextInChain, &externalTextureBindingEntry);
if (externalTextureBindingEntry != nullptr) {
DAWN_TRY(ValidateExternalTextureBinding(
device, entry, externalTextureBindingEntry,
descriptor->layout->GetExternalTextureBindingExpansionMap()));
continue;
} }
} // anonymous namespace const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
MaybeError ValidateBindGroupDescriptor(DeviceBase* device, // Perform binding-type specific validation.
const BindGroupDescriptor* descriptor) { switch (bindingInfo.bindingType) {
DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr."); case BindingInfoType::Buffer:
DAWN_TRY_CONTEXT(ValidateBufferBinding(device, entry, bindingInfo),
"validating entries[%u] as a Buffer."
"\nExpected entry layout: %s",
i, bindingInfo);
break;
case BindingInfoType::Texture:
case BindingInfoType::StorageTexture:
DAWN_TRY_CONTEXT(ValidateTextureBinding(device, entry, bindingInfo),
"validating entries[%u] as a Texture."
"\nExpected entry layout: %s",
i, bindingInfo);
break;
case BindingInfoType::Sampler:
DAWN_TRY_CONTEXT(ValidateSamplerBinding(device, entry, bindingInfo),
"validating entries[%u] as a Sampler."
"\nExpected entry layout: %s",
i, bindingInfo);
break;
case BindingInfoType::ExternalTexture:
UNREACHABLE();
break;
}
}
DAWN_TRY(device->ValidateObject(descriptor->layout)); // This should always be true because
// - numBindings has to match between the bind group and its layout.
// - Each binding must be set at most once
//
// We don't validate the equality because it wouldn't be possible to cover it with a test.
ASSERT(bindingsSet.count() == descriptor->layout->GetUnexpandedBindingCount());
DAWN_INVALID_IF( return {};
descriptor->entryCount != descriptor->layout->GetUnexpandedBindingCount(), } // anonymous namespace
"Number of entries (%u) did not match the number of entries (%u) specified in %s."
"\nExpected layout: %s",
descriptor->entryCount, static_cast<uint32_t>(descriptor->layout->GetBindingCount()),
descriptor->layout, descriptor->layout->EntriesToString());
const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap(); // BindGroup
ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout> bindingsSet; BindGroupBase::BindGroupBase(DeviceBase* device,
for (uint32_t i = 0; i < descriptor->entryCount; ++i) { const BindGroupDescriptor* descriptor,
const BindGroupEntry& entry = descriptor->entries[i]; void* bindingDataStart)
: ApiObjectBase(device, descriptor->label),
mLayout(descriptor->layout),
mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
// TODO(enga): Shouldn't be needed when bindings are tightly packed.
// This is to fill Ref<ObjectBase> holes with nullptrs.
new (&mBindingData.bindings[i]) Ref<ObjectBase>();
}
const auto& it = bindingMap.find(BindingNumber(entry.binding)); for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
DAWN_INVALID_IF(it == bindingMap.end(), const BindGroupEntry& entry = descriptor->entries[i];
"In entries[%u], binding index %u not present in the bind group layout."
"\nExpected layout: %s",
i, entry.binding, descriptor->layout->EntriesToString());
BindingIndex bindingIndex = it->second; BindingIndex bindingIndex =
ASSERT(bindingIndex < descriptor->layout->GetBindingCount()); descriptor->layout->GetBindingIndex(BindingNumber(entry.binding));
ASSERT(bindingIndex < mLayout->GetBindingCount());
DAWN_INVALID_IF(bindingsSet[bindingIndex], // Only a single binding type should be set, so once we found it we can skip to the
"In entries[%u], binding index %u already used by a previous entry", i, // next loop iteration.
entry.binding);
bindingsSet.set(bindingIndex); if (entry.buffer != nullptr) {
ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
// Below this block we validate entries based on the bind group layout, in which mBindingData.bindings[bindingIndex] = entry.buffer;
// external textures have been expanded into their underlying contents. For this reason mBindingData.bufferData[bindingIndex].offset = entry.offset;
// we must identify external texture binding entries by checking the bind group entry uint64_t bufferSize = (entry.size == wgpu::kWholeSize)
// itself. ? entry.buffer->GetSize() - entry.offset
// TODO(dawn:1293): Store external textures in : entry.size;
// BindGroupLayoutBase::BindingDataPointers::bindings so checking external textures can mBindingData.bufferData[bindingIndex].size = bufferSize;
// be moved in the switch below. continue;
const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
FindInChain(entry.nextInChain, &externalTextureBindingEntry);
if (externalTextureBindingEntry != nullptr) {
DAWN_TRY(ValidateExternalTextureBinding(
device, entry, externalTextureBindingEntry,
descriptor->layout->GetExternalTextureBindingExpansionMap()));
continue;
}
const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
// Perform binding-type specific validation.
switch (bindingInfo.bindingType) {
case BindingInfoType::Buffer:
DAWN_TRY_CONTEXT(ValidateBufferBinding(device, entry, bindingInfo),
"validating entries[%u] as a Buffer."
"\nExpected entry layout: %s",
i, bindingInfo);
break;
case BindingInfoType::Texture:
case BindingInfoType::StorageTexture:
DAWN_TRY_CONTEXT(ValidateTextureBinding(device, entry, bindingInfo),
"validating entries[%u] as a Texture."
"\nExpected entry layout: %s",
i, bindingInfo);
break;
case BindingInfoType::Sampler:
DAWN_TRY_CONTEXT(ValidateSamplerBinding(device, entry, bindingInfo),
"validating entries[%u] as a Sampler."
"\nExpected entry layout: %s",
i, bindingInfo);
break;
case BindingInfoType::ExternalTexture:
UNREACHABLE();
break;
}
} }
// This should always be true because if (entry.textureView != nullptr) {
// - numBindings has to match between the bind group and its layout. ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
// - Each binding must be set at most once mBindingData.bindings[bindingIndex] = entry.textureView;
// continue;
// We don't validate the equality because it wouldn't be possible to cover it with a test. }
ASSERT(bindingsSet.count() == descriptor->layout->GetUnexpandedBindingCount());
return {}; if (entry.sampler != nullptr) {
} // anonymous namespace ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
mBindingData.bindings[bindingIndex] = entry.sampler;
continue;
}
// BindGroup // Here we unpack external texture bindings into multiple additional bindings for the
// external texture's contents. New binding locations previously determined in the bind
// group layout are created in this bind group and filled with the external texture's
// underlying resources.
const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
FindInChain(entry.nextInChain, &externalTextureBindingEntry);
if (externalTextureBindingEntry != nullptr) {
mBoundExternalTextures.push_back(externalTextureBindingEntry->externalTexture);
BindGroupBase::BindGroupBase(DeviceBase* device, ExternalTextureBindingExpansionMap expansions =
const BindGroupDescriptor* descriptor, mLayout->GetExternalTextureBindingExpansionMap();
void* bindingDataStart) ExternalTextureBindingExpansionMap::iterator it =
: ApiObjectBase(device, descriptor->label), expansions.find(BindingNumber(entry.binding));
mLayout(descriptor->layout),
mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) { ASSERT(it != expansions.end());
BindingIndex plane0BindingIndex =
descriptor->layout->GetBindingIndex(it->second.plane0);
BindingIndex plane1BindingIndex =
descriptor->layout->GetBindingIndex(it->second.plane1);
BindingIndex paramsBindingIndex =
descriptor->layout->GetBindingIndex(it->second.params);
ASSERT(mBindingData.bindings[plane0BindingIndex] == nullptr);
mBindingData.bindings[plane0BindingIndex] =
externalTextureBindingEntry->externalTexture->GetTextureViews()[0];
ASSERT(mBindingData.bindings[plane1BindingIndex] == nullptr);
mBindingData.bindings[plane1BindingIndex] =
externalTextureBindingEntry->externalTexture->GetTextureViews()[1];
ASSERT(mBindingData.bindings[paramsBindingIndex] == nullptr);
mBindingData.bindings[paramsBindingIndex] =
externalTextureBindingEntry->externalTexture->GetParamsBuffer();
mBindingData.bufferData[paramsBindingIndex].offset = 0;
mBindingData.bufferData[paramsBindingIndex].size =
sizeof(dawn_native::ExternalTextureParams);
continue;
}
}
uint32_t packedIdx = 0;
for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
++bindingIndex) {
if (descriptor->layout->GetBindingInfo(bindingIndex).buffer.minBindingSize == 0) {
mBindingData.unverifiedBufferSizes[packedIdx] =
mBindingData.bufferData[bindingIndex].size;
++packedIdx;
}
}
TrackInDevice();
}
BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
TrackInDevice();
}
BindGroupBase::~BindGroupBase() = default;
void BindGroupBase::DestroyImpl() {
if (mLayout != nullptr) {
ASSERT(!IsError());
for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) { for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
// TODO(enga): Shouldn't be needed when bindings are tightly packed. mBindingData.bindings[i].~Ref<ObjectBase>();
// This is to fill Ref<ObjectBase> holes with nullptrs.
new (&mBindingData.bindings[i]) Ref<ObjectBase>();
}
for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
const BindGroupEntry& entry = descriptor->entries[i];
BindingIndex bindingIndex =
descriptor->layout->GetBindingIndex(BindingNumber(entry.binding));
ASSERT(bindingIndex < mLayout->GetBindingCount());
// Only a single binding type should be set, so once we found it we can skip to the
// next loop iteration.
if (entry.buffer != nullptr) {
ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
mBindingData.bindings[bindingIndex] = entry.buffer;
mBindingData.bufferData[bindingIndex].offset = entry.offset;
uint64_t bufferSize = (entry.size == wgpu::kWholeSize)
? entry.buffer->GetSize() - entry.offset
: entry.size;
mBindingData.bufferData[bindingIndex].size = bufferSize;
continue;
}
if (entry.textureView != nullptr) {
ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
mBindingData.bindings[bindingIndex] = entry.textureView;
continue;
}
if (entry.sampler != nullptr) {
ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
mBindingData.bindings[bindingIndex] = entry.sampler;
continue;
}
// Here we unpack external texture bindings into multiple additional bindings for the
// external texture's contents. New binding locations previously determined in the bind
// group layout are created in this bind group and filled with the external texture's
// underlying resources.
const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
FindInChain(entry.nextInChain, &externalTextureBindingEntry);
if (externalTextureBindingEntry != nullptr) {
mBoundExternalTextures.push_back(externalTextureBindingEntry->externalTexture);
ExternalTextureBindingExpansionMap expansions =
mLayout->GetExternalTextureBindingExpansionMap();
ExternalTextureBindingExpansionMap::iterator it =
expansions.find(BindingNumber(entry.binding));
ASSERT(it != expansions.end());
BindingIndex plane0BindingIndex =
descriptor->layout->GetBindingIndex(it->second.plane0);
BindingIndex plane1BindingIndex =
descriptor->layout->GetBindingIndex(it->second.plane1);
BindingIndex paramsBindingIndex =
descriptor->layout->GetBindingIndex(it->second.params);
ASSERT(mBindingData.bindings[plane0BindingIndex] == nullptr);
mBindingData.bindings[plane0BindingIndex] =
externalTextureBindingEntry->externalTexture->GetTextureViews()[0];
ASSERT(mBindingData.bindings[plane1BindingIndex] == nullptr);
mBindingData.bindings[plane1BindingIndex] =
externalTextureBindingEntry->externalTexture->GetTextureViews()[1];
ASSERT(mBindingData.bindings[paramsBindingIndex] == nullptr);
mBindingData.bindings[paramsBindingIndex] =
externalTextureBindingEntry->externalTexture->GetParamsBuffer();
mBindingData.bufferData[paramsBindingIndex].offset = 0;
mBindingData.bufferData[paramsBindingIndex].size =
sizeof(dawn_native::ExternalTextureParams);
continue;
}
}
uint32_t packedIdx = 0;
for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
++bindingIndex) {
if (descriptor->layout->GetBindingInfo(bindingIndex).buffer.minBindingSize == 0) {
mBindingData.unverifiedBufferSizes[packedIdx] =
mBindingData.bufferData[bindingIndex].size;
++packedIdx;
}
}
TrackInDevice();
}
BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
TrackInDevice();
}
BindGroupBase::~BindGroupBase() = default;
void BindGroupBase::DestroyImpl() {
if (mLayout != nullptr) {
ASSERT(!IsError());
for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
mBindingData.bindings[i].~Ref<ObjectBase>();
}
} }
} }
}
void BindGroupBase::DeleteThis() { void BindGroupBase::DeleteThis() {
// Add another ref to the layout so that if this is the last ref, the layout // Add another ref to the layout so that if this is the last ref, the layout
// is destroyed after the bind group. The bind group is slab-allocated inside // is destroyed after the bind group. The bind group is slab-allocated inside
// memory owned by the layout (except for the null backend). // memory owned by the layout (except for the null backend).
Ref<BindGroupLayoutBase> layout = mLayout; Ref<BindGroupLayoutBase> layout = mLayout;
ApiObjectBase::DeleteThis(); ApiObjectBase::DeleteThis();
} }
BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag) BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
: ApiObjectBase(device, tag), mBindingData() { : ApiObjectBase(device, tag), mBindingData() {}
}
// static // static
BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) { BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
return new BindGroupBase(device, ObjectBase::kError); return new BindGroupBase(device, ObjectBase::kError);
} }
ObjectType BindGroupBase::GetType() const { ObjectType BindGroupBase::GetType() const {
return ObjectType::BindGroup; return ObjectType::BindGroup;
} }
BindGroupLayoutBase* BindGroupBase::GetLayout() { BindGroupLayoutBase* BindGroupBase::GetLayout() {
ASSERT(!IsError()); ASSERT(!IsError());
return mLayout.Get(); return mLayout.Get();
} }
const BindGroupLayoutBase* BindGroupBase::GetLayout() const { const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
ASSERT(!IsError()); ASSERT(!IsError());
return mLayout.Get(); return mLayout.Get();
} }
const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const { const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
ASSERT(!IsError()); ASSERT(!IsError());
return mBindingData.unverifiedBufferSizes; return mBindingData.unverifiedBufferSizes;
} }
BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) { BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
ASSERT(!IsError()); ASSERT(!IsError());
ASSERT(bindingIndex < mLayout->GetBindingCount()); ASSERT(bindingIndex < mLayout->GetBindingCount());
ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Buffer); ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Buffer);
BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get()); BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get());
return {buffer, mBindingData.bufferData[bindingIndex].offset, return {buffer, mBindingData.bufferData[bindingIndex].offset,
mBindingData.bufferData[bindingIndex].size}; mBindingData.bufferData[bindingIndex].size};
} }
SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const { SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
ASSERT(!IsError()); ASSERT(!IsError());
ASSERT(bindingIndex < mLayout->GetBindingCount()); ASSERT(bindingIndex < mLayout->GetBindingCount());
ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Sampler); ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Sampler);
return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get()); return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get());
} }
TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) { TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
ASSERT(!IsError()); ASSERT(!IsError());
ASSERT(bindingIndex < mLayout->GetBindingCount()); ASSERT(bindingIndex < mLayout->GetBindingCount());
ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture || ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
mLayout->GetBindingInfo(bindingIndex).bindingType == mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::StorageTexture);
BindingInfoType::StorageTexture); return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get()); }
}
const std::vector<Ref<ExternalTextureBase>>& BindGroupBase::GetBoundExternalTextures() const { const std::vector<Ref<ExternalTextureBase>>& BindGroupBase::GetBoundExternalTextures() const {
return mBoundExternalTextures; return mBoundExternalTextures;
} }
} // namespace dawn::native } // namespace dawn::native

View File

@ -29,68 +29,67 @@
namespace dawn::native { namespace dawn::native {
class DeviceBase; class DeviceBase;
MaybeError ValidateBindGroupDescriptor(DeviceBase* device, MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor);
const BindGroupDescriptor* descriptor);
struct BufferBinding { struct BufferBinding {
BufferBase* buffer; BufferBase* buffer;
uint64_t offset; uint64_t offset;
uint64_t size; uint64_t size;
}; };
class BindGroupBase : public ApiObjectBase { class BindGroupBase : public ApiObjectBase {
public: public:
static BindGroupBase* MakeError(DeviceBase* device); static BindGroupBase* MakeError(DeviceBase* device);
ObjectType GetType() const override; ObjectType GetType() const override;
BindGroupLayoutBase* GetLayout(); BindGroupLayoutBase* GetLayout();
const BindGroupLayoutBase* GetLayout() const; const BindGroupLayoutBase* GetLayout() const;
BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex); BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const; SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const;
TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex); TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const; const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const;
const std::vector<Ref<ExternalTextureBase>>& GetBoundExternalTextures() const; const std::vector<Ref<ExternalTextureBase>>& GetBoundExternalTextures() const;
protected: protected:
// To save memory, the size of a bind group is dynamically determined and the bind group is // To save memory, the size of a bind group is dynamically determined and the bind group is
// placement-allocated into memory big enough to hold the bind group with its // placement-allocated into memory big enough to hold the bind group with its
// dynamically-sized bindings after it. The pointer of the memory of the beginning of the // dynamically-sized bindings after it. The pointer of the memory of the beginning of the
// binding data should be passed as |bindingDataStart|. // binding data should be passed as |bindingDataStart|.
BindGroupBase(DeviceBase* device, BindGroupBase(DeviceBase* device,
const BindGroupDescriptor* descriptor, const BindGroupDescriptor* descriptor,
void* bindingDataStart); void* bindingDataStart);
// Helper to instantiate BindGroupBase. We pass in |derived| because BindGroupBase may not // Helper to instantiate BindGroupBase. We pass in |derived| because BindGroupBase may not
// be first in the allocation. The binding data is stored after the Derived class. // be first in the allocation. The binding data is stored after the Derived class.
template <typename Derived> template <typename Derived>
BindGroupBase(Derived* derived, DeviceBase* device, const BindGroupDescriptor* descriptor) BindGroupBase(Derived* derived, DeviceBase* device, const BindGroupDescriptor* descriptor)
: BindGroupBase(device, : BindGroupBase(device,
descriptor, descriptor,
AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived), AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived),
descriptor->layout->GetBindingDataAlignment())) { descriptor->layout->GetBindingDataAlignment())) {
static_assert(std::is_base_of<BindGroupBase, Derived>::value); static_assert(std::is_base_of<BindGroupBase, Derived>::value);
} }
// Constructor used only for mocking and testing. // Constructor used only for mocking and testing.
explicit BindGroupBase(DeviceBase* device); explicit BindGroupBase(DeviceBase* device);
void DestroyImpl() override; void DestroyImpl() override;
~BindGroupBase() override; ~BindGroupBase() override;
private: private:
BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag); BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag);
void DeleteThis() override; void DeleteThis() override;
Ref<BindGroupLayoutBase> mLayout; Ref<BindGroupLayoutBase> mLayout;
BindGroupLayoutBase::BindingDataPointers mBindingData; BindGroupLayoutBase::BindingDataPointers mBindingData;
// TODO(dawn:1293): Store external textures in // TODO(dawn:1293): Store external textures in
// BindGroupLayoutBase::BindingDataPointers::bindings // BindGroupLayoutBase::BindingDataPointers::bindings
std::vector<Ref<ExternalTextureBase>> mBoundExternalTextures; std::vector<Ref<ExternalTextureBase>> mBoundExternalTextures;
}; };
} // namespace dawn::native } // namespace dawn::native

File diff suppressed because it is too large Load Diff

View File

@ -34,139 +34,137 @@
#include "dawn/native/dawn_platform.h" #include "dawn/native/dawn_platform.h"
namespace dawn::native { namespace dawn::native {
// TODO(dawn:1082): Minor optimization to use BindingIndex instead of BindingNumber // TODO(dawn:1082): Minor optimization to use BindingIndex instead of BindingNumber
struct ExternalTextureBindingExpansion { struct ExternalTextureBindingExpansion {
BindingNumber plane0; BindingNumber plane0;
BindingNumber plane1; BindingNumber plane1;
BindingNumber params; BindingNumber params;
};
using ExternalTextureBindingExpansionMap = std::map<BindingNumber, ExternalTextureBindingExpansion>;
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor,
bool allowInternalBinding = false);
// Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
// These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
// into a packed range of |BindingIndex| integers.
class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
public:
BindGroupLayoutBase(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor,
PipelineCompatibilityToken pipelineCompatibilityToken,
ApiObjectBase::UntrackedByDeviceTag tag);
BindGroupLayoutBase(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor,
PipelineCompatibilityToken pipelineCompatibilityToken);
~BindGroupLayoutBase() override;
static BindGroupLayoutBase* MakeError(DeviceBase* device);
ObjectType GetType() const override;
// A map from the BindingNumber to its packed BindingIndex.
using BindingMap = std::map<BindingNumber, BindingIndex>;
const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
ASSERT(!IsError());
ASSERT(bindingIndex < mBindingInfo.size());
return mBindingInfo[bindingIndex];
}
const BindingMap& GetBindingMap() const;
bool HasBinding(BindingNumber bindingNumber) const;
BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
// Functions necessary for the unordered_set<BGLBase*>-based cache.
size_t ComputeContentHash() override;
struct EqualityFunc {
bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
}; };
using ExternalTextureBindingExpansionMap = BindingIndex GetBindingCount() const;
std::map<BindingNumber, ExternalTextureBindingExpansion>; // Returns |BindingIndex| because buffers are packed at the front.
BindingIndex GetBufferCount() const;
// Returns |BindingIndex| because dynamic buffers are packed at the front.
BindingIndex GetDynamicBufferCount() const;
uint32_t GetUnverifiedBufferCount() const;
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device, // Used to get counts and validate them in pipeline layout creation. Other getters
const BindGroupLayoutDescriptor* descriptor, // should be used to get typed integer counts.
bool allowInternalBinding = false); const BindingCounts& GetBindingCountInfo() const;
// Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor. uint32_t GetExternalTextureBindingCount() const;
// These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
// into a packed range of |BindingIndex| integers.
class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
public:
BindGroupLayoutBase(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor,
PipelineCompatibilityToken pipelineCompatibilityToken,
ApiObjectBase::UntrackedByDeviceTag tag);
BindGroupLayoutBase(DeviceBase* device,
const BindGroupLayoutDescriptor* descriptor,
PipelineCompatibilityToken pipelineCompatibilityToken);
~BindGroupLayoutBase() override;
static BindGroupLayoutBase* MakeError(DeviceBase* device); // Used to specify unpacked external texture binding slots when transforming shader modules.
const ExternalTextureBindingExpansionMap& GetExternalTextureBindingExpansionMap() const;
ObjectType GetType() const override; uint32_t GetUnexpandedBindingCount() const;
// A map from the BindingNumber to its packed BindingIndex. // Tests that the BindingInfo of two bind groups are equal,
using BindingMap = std::map<BindingNumber, BindingIndex>; // ignoring their compatibility groups.
bool IsLayoutEqual(const BindGroupLayoutBase* other,
bool excludePipelineCompatibiltyToken = false) const;
PipelineCompatibilityToken GetPipelineCompatibilityToken() const;
const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const { struct BufferBindingData {
ASSERT(!IsError()); uint64_t offset;
ASSERT(bindingIndex < mBindingInfo.size()); uint64_t size;
return mBindingInfo[bindingIndex];
}
const BindingMap& GetBindingMap() const;
bool HasBinding(BindingNumber bindingNumber) const;
BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
// Functions necessary for the unordered_set<BGLBase*>-based cache.
size_t ComputeContentHash() override;
struct EqualityFunc {
bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
};
BindingIndex GetBindingCount() const;
// Returns |BindingIndex| because buffers are packed at the front.
BindingIndex GetBufferCount() const;
// Returns |BindingIndex| because dynamic buffers are packed at the front.
BindingIndex GetDynamicBufferCount() const;
uint32_t GetUnverifiedBufferCount() const;
// Used to get counts and validate them in pipeline layout creation. Other getters
// should be used to get typed integer counts.
const BindingCounts& GetBindingCountInfo() const;
uint32_t GetExternalTextureBindingCount() const;
// Used to specify unpacked external texture binding slots when transforming shader modules.
const ExternalTextureBindingExpansionMap& GetExternalTextureBindingExpansionMap() const;
uint32_t GetUnexpandedBindingCount() const;
// Tests that the BindingInfo of two bind groups are equal,
// ignoring their compatibility groups.
bool IsLayoutEqual(const BindGroupLayoutBase* other,
bool excludePipelineCompatibiltyToken = false) const;
PipelineCompatibilityToken GetPipelineCompatibilityToken() const;
struct BufferBindingData {
uint64_t offset;
uint64_t size;
};
struct BindingDataPointers {
ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
};
// Compute the amount of space / alignment required to store bindings for a bind group of
// this layout.
size_t GetBindingDataSize() const;
static constexpr size_t GetBindingDataAlignment() {
static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData));
return alignof(BufferBindingData);
}
BindingDataPointers ComputeBindingDataPointers(void* dataStart) const;
bool IsStorageBufferBinding(BindingIndex bindingIndex) const;
// Returns a detailed string representation of the layout entries for use in error messages.
std::string EntriesToString() const;
protected:
// Constructor used only for mocking and testing.
explicit BindGroupLayoutBase(DeviceBase* device);
void DestroyImpl() override;
template <typename BindGroup>
SlabAllocator<BindGroup> MakeFrontendBindGroupAllocator(size_t size) {
return SlabAllocator<BindGroup>(
size, // bytes
Align(sizeof(BindGroup), GetBindingDataAlignment()) + GetBindingDataSize(), // size
std::max(alignof(BindGroup), GetBindingDataAlignment()) // alignment
);
}
private:
BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
BindingCounts mBindingCounts = {};
ityp::vector<BindingIndex, BindingInfo> mBindingInfo;
// Map from BindGroupLayoutEntry.binding to packed indices.
BindingMap mBindingMap;
ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
// Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
const PipelineCompatibilityToken mPipelineCompatibilityToken =
PipelineCompatibilityToken(0);
uint32_t mUnexpandedBindingCount;
}; };
struct BindingDataPointers {
ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
};
// Compute the amount of space / alignment required to store bindings for a bind group of
// this layout.
size_t GetBindingDataSize() const;
static constexpr size_t GetBindingDataAlignment() {
static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData));
return alignof(BufferBindingData);
}
BindingDataPointers ComputeBindingDataPointers(void* dataStart) const;
bool IsStorageBufferBinding(BindingIndex bindingIndex) const;
// Returns a detailed string representation of the layout entries for use in error messages.
std::string EntriesToString() const;
protected:
// Constructor used only for mocking and testing.
explicit BindGroupLayoutBase(DeviceBase* device);
void DestroyImpl() override;
template <typename BindGroup>
SlabAllocator<BindGroup> MakeFrontendBindGroupAllocator(size_t size) {
return SlabAllocator<BindGroup>(
size, // bytes
Align(sizeof(BindGroup), GetBindingDataAlignment()) + GetBindingDataSize(), // size
std::max(alignof(BindGroup), GetBindingDataAlignment()) // alignment
);
}
private:
BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
BindingCounts mBindingCounts = {};
ityp::vector<BindingIndex, BindingInfo> mBindingInfo;
// Map from BindGroupLayoutEntry.binding to packed indices.
BindingMap mBindingMap;
ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
// Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
const PipelineCompatibilityToken mPipelineCompatibilityToken = PipelineCompatibilityToken(0);
uint32_t mUnexpandedBindingCount;
};
} // namespace dawn::native } // namespace dawn::native
#endif // SRC_DAWN_NATIVE_BINDGROUPLAYOUT_H_ #endif // SRC_DAWN_NATIVE_BINDGROUPLAYOUT_H_

View File

@ -25,119 +25,117 @@
namespace dawn::native { namespace dawn::native {
// Keeps track of the dirty bind groups so they can be lazily applied when we know the // Keeps track of the dirty bind groups so they can be lazily applied when we know the
// pipeline state or it changes. // pipeline state or it changes.
// |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t // |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
// in other backends. // in other backends.
template <bool CanInheritBindGroups, typename DynamicOffset> template <bool CanInheritBindGroups, typename DynamicOffset>
class BindGroupTrackerBase { class BindGroupTrackerBase {
public: public:
void OnSetBindGroup(BindGroupIndex index, void OnSetBindGroup(BindGroupIndex index,
BindGroupBase* bindGroup, BindGroupBase* bindGroup,
uint32_t dynamicOffsetCount, uint32_t dynamicOffsetCount,
uint32_t* dynamicOffsets) { uint32_t* dynamicOffsets) {
ASSERT(index < kMaxBindGroupsTyped); ASSERT(index < kMaxBindGroupsTyped);
if (mBindGroupLayoutsMask[index]) { if (mBindGroupLayoutsMask[index]) {
// It is okay to only dirty bind groups that are used by the current pipeline // It is okay to only dirty bind groups that are used by the current pipeline
// layout. If the pipeline layout changes, then the bind groups it uses will // layout. If the pipeline layout changes, then the bind groups it uses will
// become dirty. // become dirty.
if (mBindGroups[index] != bindGroup) { if (mBindGroups[index] != bindGroup) {
mDirtyBindGroups.set(index); mDirtyBindGroups.set(index);
mDirtyBindGroupsObjectChangedOrIsDynamic.set(index); mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
}
if (dynamicOffsetCount > 0) {
mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
}
} }
mBindGroups[index] = bindGroup;
mDynamicOffsetCounts[index] = dynamicOffsetCount;
SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
}
void OnSetPipeline(PipelineBase* pipeline) {
mPipelineLayout = pipeline->GetLayout();
}
protected:
// The Derived class should call this before it applies bind groups.
void BeforeApply() {
if (mLastAppliedPipelineLayout == mPipelineLayout) {
return;
}
// Use the bind group layout mask to avoid marking unused bind groups as dirty.
mBindGroupLayoutsMask = mPipelineLayout->GetBindGroupLayoutsMask();
// Changing the pipeline layout sets bind groups as dirty. If CanInheritBindGroups,
// the first |k| matching bind groups may be inherited.
if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
// Dirty bind groups that cannot be inherited.
BindGroupLayoutMask dirtiedGroups =
~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
mDirtyBindGroups |= dirtiedGroups;
mDirtyBindGroupsObjectChangedOrIsDynamic |= dirtiedGroups;
// Clear any bind groups not in the mask.
mDirtyBindGroups &= mBindGroupLayoutsMask;
mDirtyBindGroupsObjectChangedOrIsDynamic &= mBindGroupLayoutsMask;
} else {
mDirtyBindGroups = mBindGroupLayoutsMask;
mDirtyBindGroupsObjectChangedOrIsDynamic = mBindGroupLayoutsMask;
}
}
// The Derived class should call this after it applies bind groups.
void AfterApply() {
// Reset all dirty bind groups. Dirty bind groups not in the bind group layout mask
// will be dirtied again by the next pipeline change.
mDirtyBindGroups.reset();
mDirtyBindGroupsObjectChangedOrIsDynamic.reset();
// Keep track of the last applied pipeline layout. This allows us to avoid computing
// the intersection of the dirty bind groups and bind group layout mask in next Draw
// or Dispatch (which is very hot code) until the layout is changed again.
mLastAppliedPipelineLayout = mPipelineLayout;
}
BindGroupLayoutMask mDirtyBindGroups = 0;
BindGroupLayoutMask mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
BindGroupLayoutMask mBindGroupLayoutsMask = 0;
ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
ityp::array<BindGroupIndex,
std::array<DynamicOffset, kMaxDynamicBuffersPerPipelineLayout>,
kMaxBindGroups>
mDynamicOffsets = {};
// |mPipelineLayout| is the current pipeline layout set on the command buffer.
// |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
// to the bind group bindings.
PipelineLayoutBase* mPipelineLayout = nullptr;
PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
private:
// We have two overloads here because offsets in Vulkan are uint32_t but uint64_t
// in other backends.
static void SetDynamicOffsets(uint64_t* data,
uint32_t dynamicOffsetCount,
uint32_t* dynamicOffsets) {
for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
data[i] = static_cast<uint64_t>(dynamicOffsets[i]);
}
}
static void SetDynamicOffsets(uint32_t* data,
uint32_t dynamicOffsetCount,
uint32_t* dynamicOffsets) {
if (dynamicOffsetCount > 0) { if (dynamicOffsetCount > 0) {
memcpy(data, dynamicOffsets, sizeof(uint32_t) * dynamicOffsetCount); mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
} }
} }
};
mBindGroups[index] = bindGroup;
mDynamicOffsetCounts[index] = dynamicOffsetCount;
SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
}
void OnSetPipeline(PipelineBase* pipeline) { mPipelineLayout = pipeline->GetLayout(); }
protected:
// The Derived class should call this before it applies bind groups.
void BeforeApply() {
if (mLastAppliedPipelineLayout == mPipelineLayout) {
return;
}
// Use the bind group layout mask to avoid marking unused bind groups as dirty.
mBindGroupLayoutsMask = mPipelineLayout->GetBindGroupLayoutsMask();
// Changing the pipeline layout sets bind groups as dirty. If CanInheritBindGroups,
// the first |k| matching bind groups may be inherited.
if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
// Dirty bind groups that cannot be inherited.
BindGroupLayoutMask dirtiedGroups =
~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
mDirtyBindGroups |= dirtiedGroups;
mDirtyBindGroupsObjectChangedOrIsDynamic |= dirtiedGroups;
// Clear any bind groups not in the mask.
mDirtyBindGroups &= mBindGroupLayoutsMask;
mDirtyBindGroupsObjectChangedOrIsDynamic &= mBindGroupLayoutsMask;
} else {
mDirtyBindGroups = mBindGroupLayoutsMask;
mDirtyBindGroupsObjectChangedOrIsDynamic = mBindGroupLayoutsMask;
}
}
// The Derived class should call this after it applies bind groups.
void AfterApply() {
// Reset all dirty bind groups. Dirty bind groups not in the bind group layout mask
// will be dirtied again by the next pipeline change.
mDirtyBindGroups.reset();
mDirtyBindGroupsObjectChangedOrIsDynamic.reset();
// Keep track of the last applied pipeline layout. This allows us to avoid computing
// the intersection of the dirty bind groups and bind group layout mask in next Draw
// or Dispatch (which is very hot code) until the layout is changed again.
mLastAppliedPipelineLayout = mPipelineLayout;
}
BindGroupLayoutMask mDirtyBindGroups = 0;
BindGroupLayoutMask mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
BindGroupLayoutMask mBindGroupLayoutsMask = 0;
ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
ityp::array<BindGroupIndex,
std::array<DynamicOffset, kMaxDynamicBuffersPerPipelineLayout>,
kMaxBindGroups>
mDynamicOffsets = {};
// |mPipelineLayout| is the current pipeline layout set on the command buffer.
// |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
// to the bind group bindings.
PipelineLayoutBase* mPipelineLayout = nullptr;
PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
private:
// We have two overloads here because offsets in Vulkan are uint32_t but uint64_t
// in other backends.
static void SetDynamicOffsets(uint64_t* data,
uint32_t dynamicOffsetCount,
uint32_t* dynamicOffsets) {
for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
data[i] = static_cast<uint64_t>(dynamicOffsets[i]);
}
}
static void SetDynamicOffsets(uint32_t* data,
uint32_t dynamicOffsetCount,
uint32_t* dynamicOffsets) {
if (dynamicOffsetCount > 0) {
memcpy(data, dynamicOffsets, sizeof(uint32_t) * dynamicOffsetCount);
}
}
};
} // namespace dawn::native } // namespace dawn::native

View File

@ -18,178 +18,172 @@
namespace dawn::native { namespace dawn::native {
void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) { void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
bindingCounts->totalCount += 1; bindingCounts->totalCount += 1;
uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr; uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr;
if (entry.buffer.type != wgpu::BufferBindingType::Undefined) { if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
++bindingCounts->bufferCount; ++bindingCounts->bufferCount;
const BufferBindingLayout& buffer = entry.buffer; const BufferBindingLayout& buffer = entry.buffer;
if (buffer.minBindingSize == 0) { if (buffer.minBindingSize == 0) {
++bindingCounts->unverifiedBufferCount; ++bindingCounts->unverifiedBufferCount;
}
switch (buffer.type) {
case wgpu::BufferBindingType::Uniform:
if (buffer.hasDynamicOffset) {
++bindingCounts->dynamicUniformBufferCount;
}
perStageBindingCountMember = &PerStageBindingCounts::uniformBufferCount;
break;
case wgpu::BufferBindingType::Storage:
case kInternalStorageBufferBinding:
case wgpu::BufferBindingType::ReadOnlyStorage:
if (buffer.hasDynamicOffset) {
++bindingCounts->dynamicStorageBufferCount;
}
perStageBindingCountMember = &PerStageBindingCounts::storageBufferCount;
break;
case wgpu::BufferBindingType::Undefined:
// Can't get here due to the enclosing if statement.
UNREACHABLE();
break;
}
} else if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
perStageBindingCountMember = &PerStageBindingCounts::samplerCount;
} else if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
perStageBindingCountMember = &PerStageBindingCounts::sampledTextureCount;
} else if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
} else {
const ExternalTextureBindingLayout* externalTextureBindingLayout;
FindInChain(entry.nextInChain, &externalTextureBindingLayout);
if (externalTextureBindingLayout != nullptr) {
perStageBindingCountMember = &PerStageBindingCounts::externalTextureCount;
}
} }
ASSERT(perStageBindingCountMember != nullptr); switch (buffer.type) {
for (SingleShaderStage stage : IterateStages(entry.visibility)) { case wgpu::BufferBindingType::Uniform:
++(bindingCounts->perStage[stage].*perStageBindingCountMember); if (buffer.hasDynamicOffset) {
++bindingCounts->dynamicUniformBufferCount;
}
perStageBindingCountMember = &PerStageBindingCounts::uniformBufferCount;
break;
case wgpu::BufferBindingType::Storage:
case kInternalStorageBufferBinding:
case wgpu::BufferBindingType::ReadOnlyStorage:
if (buffer.hasDynamicOffset) {
++bindingCounts->dynamicStorageBufferCount;
}
perStageBindingCountMember = &PerStageBindingCounts::storageBufferCount;
break;
case wgpu::BufferBindingType::Undefined:
// Can't get here due to the enclosing if statement.
UNREACHABLE();
break;
}
} else if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
perStageBindingCountMember = &PerStageBindingCounts::samplerCount;
} else if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
perStageBindingCountMember = &PerStageBindingCounts::sampledTextureCount;
} else if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
} else {
const ExternalTextureBindingLayout* externalTextureBindingLayout;
FindInChain(entry.nextInChain, &externalTextureBindingLayout);
if (externalTextureBindingLayout != nullptr) {
perStageBindingCountMember = &PerStageBindingCounts::externalTextureCount;
} }
} }
void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) { ASSERT(perStageBindingCountMember != nullptr);
bindingCounts->totalCount += rhs.totalCount; for (SingleShaderStage stage : IterateStages(entry.visibility)) {
bindingCounts->bufferCount += rhs.bufferCount; ++(bindingCounts->perStage[stage].*perStageBindingCountMember);
bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
bindingCounts->dynamicUniformBufferCount += rhs.dynamicUniformBufferCount;
bindingCounts->dynamicStorageBufferCount += rhs.dynamicStorageBufferCount;
for (SingleShaderStage stage : IterateStages(kAllStages)) {
bindingCounts->perStage[stage].sampledTextureCount +=
rhs.perStage[stage].sampledTextureCount;
bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
bindingCounts->perStage[stage].storageBufferCount +=
rhs.perStage[stage].storageBufferCount;
bindingCounts->perStage[stage].storageTextureCount +=
rhs.perStage[stage].storageTextureCount;
bindingCounts->perStage[stage].uniformBufferCount +=
rhs.perStage[stage].uniformBufferCount;
bindingCounts->perStage[stage].externalTextureCount +=
rhs.perStage[stage].externalTextureCount;
}
} }
}
MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) { void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
bindingCounts->totalCount += rhs.totalCount;
bindingCounts->bufferCount += rhs.bufferCount;
bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
bindingCounts->dynamicUniformBufferCount += rhs.dynamicUniformBufferCount;
bindingCounts->dynamicStorageBufferCount += rhs.dynamicStorageBufferCount;
for (SingleShaderStage stage : IterateStages(kAllStages)) {
bindingCounts->perStage[stage].sampledTextureCount +=
rhs.perStage[stage].sampledTextureCount;
bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
bindingCounts->perStage[stage].storageBufferCount += rhs.perStage[stage].storageBufferCount;
bindingCounts->perStage[stage].storageTextureCount +=
rhs.perStage[stage].storageTextureCount;
bindingCounts->perStage[stage].uniformBufferCount += rhs.perStage[stage].uniformBufferCount;
bindingCounts->perStage[stage].externalTextureCount +=
rhs.perStage[stage].externalTextureCount;
}
}
MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
DAWN_INVALID_IF(
bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout,
"The number of dynamic uniform buffers (%u) exceeds the maximum per-pipeline-layout "
"limit (%u).",
bindingCounts.dynamicUniformBufferCount, kMaxDynamicUniformBuffersPerPipelineLayout);
DAWN_INVALID_IF(
bindingCounts.dynamicStorageBufferCount > kMaxDynamicStorageBuffersPerPipelineLayout,
"The number of dynamic storage buffers (%u) exceeds the maximum per-pipeline-layout "
"limit (%u).",
bindingCounts.dynamicStorageBufferCount, kMaxDynamicStorageBuffersPerPipelineLayout);
for (SingleShaderStage stage : IterateStages(kAllStages)) {
DAWN_INVALID_IF( DAWN_INVALID_IF(
bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout, bindingCounts.perStage[stage].sampledTextureCount > kMaxSampledTexturesPerShaderStage,
"The number of dynamic uniform buffers (%u) exceeds the maximum per-pipeline-layout " "The number of sampled textures (%u) in the %s stage exceeds the maximum "
"limit (%u).", "per-stage limit (%u).",
bindingCounts.dynamicUniformBufferCount, kMaxDynamicUniformBuffersPerPipelineLayout); bindingCounts.perStage[stage].sampledTextureCount, stage,
kMaxSampledTexturesPerShaderStage);
// The per-stage number of external textures is bound by the maximum sampled textures
// per stage.
DAWN_INVALID_IF(bindingCounts.perStage[stage].externalTextureCount >
kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
"The number of external textures (%u) in the %s stage exceeds the maximum "
"per-stage limit (%u).",
bindingCounts.perStage[stage].externalTextureCount, stage,
kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture);
DAWN_INVALID_IF( DAWN_INVALID_IF(
bindingCounts.dynamicStorageBufferCount > kMaxDynamicStorageBuffersPerPipelineLayout, bindingCounts.perStage[stage].sampledTextureCount +
"The number of dynamic storage buffers (%u) exceeds the maximum per-pipeline-layout " (bindingCounts.perStage[stage].externalTextureCount *
kSampledTexturesPerExternalTexture) >
kMaxSampledTexturesPerShaderStage,
"The combination of sampled textures (%u) and external textures (%u) in the %s "
"stage exceeds the maximum per-stage limit (%u).",
bindingCounts.perStage[stage].sampledTextureCount,
bindingCounts.perStage[stage].externalTextureCount, stage,
kMaxSampledTexturesPerShaderStage);
DAWN_INVALID_IF(
bindingCounts.perStage[stage].samplerCount > kMaxSamplersPerShaderStage,
"The number of samplers (%u) in the %s stage exceeds the maximum per-stage limit "
"(%u).",
bindingCounts.perStage[stage].samplerCount, stage, kMaxSamplersPerShaderStage);
DAWN_INVALID_IF(
bindingCounts.perStage[stage].samplerCount +
(bindingCounts.perStage[stage].externalTextureCount *
kSamplersPerExternalTexture) >
kMaxSamplersPerShaderStage,
"The combination of samplers (%u) and external textures (%u) in the %s stage "
"exceeds the maximum per-stage limit (%u).",
bindingCounts.perStage[stage].samplerCount,
bindingCounts.perStage[stage].externalTextureCount, stage, kMaxSamplersPerShaderStage);
DAWN_INVALID_IF(
bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
"The number of storage buffers (%u) in the %s stage exceeds the maximum per-stage "
"limit (%u).", "limit (%u).",
bindingCounts.dynamicStorageBufferCount, kMaxDynamicStorageBuffersPerPipelineLayout); bindingCounts.perStage[stage].storageBufferCount, stage,
kMaxStorageBuffersPerShaderStage);
for (SingleShaderStage stage : IterateStages(kAllStages)) { DAWN_INVALID_IF(
DAWN_INVALID_IF( bindingCounts.perStage[stage].storageTextureCount > kMaxStorageTexturesPerShaderStage,
bindingCounts.perStage[stage].sampledTextureCount > "The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
kMaxSampledTexturesPerShaderStage, "limit (%u).",
"The number of sampled textures (%u) in the %s stage exceeds the maximum " bindingCounts.perStage[stage].storageTextureCount, stage,
"per-stage limit (%u).", kMaxStorageTexturesPerShaderStage);
bindingCounts.perStage[stage].sampledTextureCount, stage,
kMaxSampledTexturesPerShaderStage);
// The per-stage number of external textures is bound by the maximum sampled textures DAWN_INVALID_IF(
// per stage. bindingCounts.perStage[stage].uniformBufferCount > kMaxUniformBuffersPerShaderStage,
DAWN_INVALID_IF( "The number of uniform buffers (%u) in the %s stage exceeds the maximum per-stage "
bindingCounts.perStage[stage].externalTextureCount > "limit (%u).",
kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture, bindingCounts.perStage[stage].uniformBufferCount, stage,
"The number of external textures (%u) in the %s stage exceeds the maximum " kMaxUniformBuffersPerShaderStage);
"per-stage limit (%u).",
bindingCounts.perStage[stage].externalTextureCount, stage,
kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture);
DAWN_INVALID_IF( DAWN_INVALID_IF(
bindingCounts.perStage[stage].sampledTextureCount + bindingCounts.perStage[stage].uniformBufferCount +
(bindingCounts.perStage[stage].externalTextureCount * (bindingCounts.perStage[stage].externalTextureCount *
kSampledTexturesPerExternalTexture) > kUniformsPerExternalTexture) >
kMaxSampledTexturesPerShaderStage, kMaxUniformBuffersPerShaderStage,
"The combination of sampled textures (%u) and external textures (%u) in the %s " "The combination of uniform buffers (%u) and external textures (%u) in the %s "
"stage exceeds the maximum per-stage limit (%u).", "stage exceeds the maximum per-stage limit (%u).",
bindingCounts.perStage[stage].sampledTextureCount, bindingCounts.perStage[stage].uniformBufferCount,
bindingCounts.perStage[stage].externalTextureCount, stage, bindingCounts.perStage[stage].externalTextureCount, stage,
kMaxSampledTexturesPerShaderStage); kMaxUniformBuffersPerShaderStage);
DAWN_INVALID_IF(
bindingCounts.perStage[stage].samplerCount > kMaxSamplersPerShaderStage,
"The number of samplers (%u) in the %s stage exceeds the maximum per-stage limit "
"(%u).",
bindingCounts.perStage[stage].samplerCount, stage, kMaxSamplersPerShaderStage);
DAWN_INVALID_IF(
bindingCounts.perStage[stage].samplerCount +
(bindingCounts.perStage[stage].externalTextureCount *
kSamplersPerExternalTexture) >
kMaxSamplersPerShaderStage,
"The combination of samplers (%u) and external textures (%u) in the %s stage "
"exceeds the maximum per-stage limit (%u).",
bindingCounts.perStage[stage].samplerCount,
bindingCounts.perStage[stage].externalTextureCount, stage,
kMaxSamplersPerShaderStage);
DAWN_INVALID_IF(
bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
"The number of storage buffers (%u) in the %s stage exceeds the maximum per-stage "
"limit (%u).",
bindingCounts.perStage[stage].storageBufferCount, stage,
kMaxStorageBuffersPerShaderStage);
DAWN_INVALID_IF(
bindingCounts.perStage[stage].storageTextureCount >
kMaxStorageTexturesPerShaderStage,
"The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
"limit (%u).",
bindingCounts.perStage[stage].storageTextureCount, stage,
kMaxStorageTexturesPerShaderStage);
DAWN_INVALID_IF(
bindingCounts.perStage[stage].uniformBufferCount > kMaxUniformBuffersPerShaderStage,
"The number of uniform buffers (%u) in the %s stage exceeds the maximum per-stage "
"limit (%u).",
bindingCounts.perStage[stage].uniformBufferCount, stage,
kMaxUniformBuffersPerShaderStage);
DAWN_INVALID_IF(
bindingCounts.perStage[stage].uniformBufferCount +
(bindingCounts.perStage[stage].externalTextureCount *
kUniformsPerExternalTexture) >
kMaxUniformBuffersPerShaderStage,
"The combination of uniform buffers (%u) and external textures (%u) in the %s "
"stage exceeds the maximum per-stage limit (%u).",
bindingCounts.perStage[stage].uniformBufferCount,
bindingCounts.perStage[stage].externalTextureCount, stage,
kMaxUniformBuffersPerShaderStage);
}
return {};
} }
return {};
}
} // namespace dawn::native } // namespace dawn::native

View File

@ -29,70 +29,70 @@
namespace dawn::native { namespace dawn::native {
// Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations. // Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout = static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout; kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout;
static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped = static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
BindingIndex(kMaxDynamicBuffersPerPipelineLayout); BindingIndex(kMaxDynamicBuffersPerPipelineLayout);
// Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the // Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
// API. There should never be more bindings than the max per stage, for each stage. // API. There should never be more bindings than the max per stage, for each stage.
static constexpr uint32_t kMaxBindingsPerPipelineLayout = static constexpr uint32_t kMaxBindingsPerPipelineLayout =
3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage + 3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage +
kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage + kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage +
kMaxUniformBuffersPerShaderStage); kMaxUniformBuffersPerShaderStage);
static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped = static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
BindingIndex(kMaxBindingsPerPipelineLayout); BindingIndex(kMaxBindingsPerPipelineLayout);
// TODO(enga): Figure out a good number for this. // TODO(enga): Figure out a good number for this.
static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32; static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture }; enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture };
struct BindingInfo { struct BindingInfo {
BindingNumber binding; BindingNumber binding;
wgpu::ShaderStage visibility; wgpu::ShaderStage visibility;
BindingInfoType bindingType; BindingInfoType bindingType;
// TODO(dawn:527): These four values could be made into a union. // TODO(dawn:527): These four values could be made into a union.
BufferBindingLayout buffer; BufferBindingLayout buffer;
SamplerBindingLayout sampler; SamplerBindingLayout sampler;
TextureBindingLayout texture; TextureBindingLayout texture;
StorageTextureBindingLayout storageTexture; StorageTextureBindingLayout storageTexture;
}; };
struct BindingSlot { struct BindingSlot {
BindGroupIndex group; BindGroupIndex group;
BindingNumber binding; BindingNumber binding;
}; };
struct PerStageBindingCounts { struct PerStageBindingCounts {
uint32_t sampledTextureCount; uint32_t sampledTextureCount;
uint32_t samplerCount; uint32_t samplerCount;
uint32_t storageBufferCount; uint32_t storageBufferCount;
uint32_t storageTextureCount; uint32_t storageTextureCount;
uint32_t uniformBufferCount; uint32_t uniformBufferCount;
uint32_t externalTextureCount; uint32_t externalTextureCount;
}; };
struct BindingCounts { struct BindingCounts {
uint32_t totalCount; uint32_t totalCount;
uint32_t bufferCount; uint32_t bufferCount;
uint32_t unverifiedBufferCount; // Buffers with minimum buffer size unspecified uint32_t unverifiedBufferCount; // Buffers with minimum buffer size unspecified
uint32_t dynamicUniformBufferCount; uint32_t dynamicUniformBufferCount;
uint32_t dynamicStorageBufferCount; uint32_t dynamicStorageBufferCount;
PerStage<PerStageBindingCounts> perStage; PerStage<PerStageBindingCounts> perStage;
}; };
void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry); void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs); void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts); MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
// For buffer size validation // For buffer size validation
using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>; using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
} // namespace dawn::native } // namespace dawn::native

View File

@ -21,73 +21,72 @@
namespace dawn::native { namespace dawn::native {
CachedBlob::CachedBlob(size_t size) { CachedBlob::CachedBlob(size_t size) {
if (size != 0) { if (size != 0) {
Reset(size); Reset(size);
}
} }
}
bool CachedBlob::Empty() const { bool CachedBlob::Empty() const {
return mSize == 0; return mSize == 0;
} }
const uint8_t* CachedBlob::Data() const { const uint8_t* CachedBlob::Data() const {
return mData.get(); return mData.get();
} }
uint8_t* CachedBlob::Data() { uint8_t* CachedBlob::Data() {
return mData.get(); return mData.get();
} }
size_t CachedBlob::Size() const { size_t CachedBlob::Size() const {
return mSize; return mSize;
} }
void CachedBlob::Reset(size_t size) { void CachedBlob::Reset(size_t size) {
mSize = size; mSize = size;
mData = std::make_unique<uint8_t[]>(size); mData = std::make_unique<uint8_t[]>(size);
} }
BlobCache::BlobCache(dawn::platform::CachingInterface* cachingInterface) BlobCache::BlobCache(dawn::platform::CachingInterface* cachingInterface)
: mCache(cachingInterface) { : mCache(cachingInterface) {}
}
CachedBlob BlobCache::Load(const CacheKey& key) { CachedBlob BlobCache::Load(const CacheKey& key) {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
return LoadInternal(key); return LoadInternal(key);
} }
void BlobCache::Store(const CacheKey& key, size_t valueSize, const void* value) { void BlobCache::Store(const CacheKey& key, size_t valueSize, const void* value) {
std::lock_guard<std::mutex> lock(mMutex); std::lock_guard<std::mutex> lock(mMutex);
StoreInternal(key, valueSize, value); StoreInternal(key, valueSize, value);
} }
void BlobCache::Store(const CacheKey& key, const CachedBlob& value) { void BlobCache::Store(const CacheKey& key, const CachedBlob& value) {
Store(key, value.Size(), value.Data()); Store(key, value.Size(), value.Data());
} }
CachedBlob BlobCache::LoadInternal(const CacheKey& key) { CachedBlob BlobCache::LoadInternal(const CacheKey& key) {
CachedBlob result; CachedBlob result;
if (mCache == nullptr) { if (mCache == nullptr) {
return result;
}
const size_t expectedSize = mCache->LoadData(nullptr, key.data(), key.size(), nullptr, 0);
if (expectedSize > 0) {
result.Reset(expectedSize);
const size_t actualSize =
mCache->LoadData(nullptr, key.data(), key.size(), result.Data(), expectedSize);
ASSERT(expectedSize == actualSize);
}
return result; return result;
} }
const size_t expectedSize = mCache->LoadData(nullptr, key.data(), key.size(), nullptr, 0);
void BlobCache::StoreInternal(const CacheKey& key, size_t valueSize, const void* value) { if (expectedSize > 0) {
ASSERT(value != nullptr); result.Reset(expectedSize);
ASSERT(valueSize > 0); const size_t actualSize =
if (mCache == nullptr) { mCache->LoadData(nullptr, key.data(), key.size(), result.Data(), expectedSize);
return; ASSERT(expectedSize == actualSize);
}
mCache->StoreData(nullptr, key.data(), key.size(), value, valueSize);
} }
return result;
}
void BlobCache::StoreInternal(const CacheKey& key, size_t valueSize, const void* value) {
ASSERT(value != nullptr);
ASSERT(valueSize > 0);
if (mCache == nullptr) {
return;
}
mCache->StoreData(nullptr, key.data(), key.size(), value, valueSize);
}
} // namespace dawn::native } // namespace dawn::native

View File

@ -19,57 +19,57 @@
#include <mutex> #include <mutex>
namespace dawn::platform { namespace dawn::platform {
class CachingInterface; class CachingInterface;
} }
namespace dawn::native { namespace dawn::native {
class BlobCache; class BlobCache;
class CacheKey; class CacheKey;
class InstanceBase; class InstanceBase;
class CachedBlob { class CachedBlob {
public: public:
explicit CachedBlob(size_t size = 0); explicit CachedBlob(size_t size = 0);
bool Empty() const; bool Empty() const;
const uint8_t* Data() const; const uint8_t* Data() const;
uint8_t* Data(); uint8_t* Data();
size_t Size() const; size_t Size() const;
void Reset(size_t size); void Reset(size_t size);
private: private:
std::unique_ptr<uint8_t[]> mData = nullptr; std::unique_ptr<uint8_t[]> mData = nullptr;
size_t mSize = 0; size_t mSize = 0;
}; };
// This class should always be thread-safe because it may be called asynchronously. Its purpose // This class should always be thread-safe because it may be called asynchronously. Its purpose
// is to wrap the CachingInterface provided via a platform. // is to wrap the CachingInterface provided via a platform.
class BlobCache { class BlobCache {
public: public:
explicit BlobCache(dawn::platform::CachingInterface* cachingInterface = nullptr); explicit BlobCache(dawn::platform::CachingInterface* cachingInterface = nullptr);
// Returns empty blob if the key is not found in the cache. // Returns empty blob if the key is not found in the cache.
CachedBlob Load(const CacheKey& key); CachedBlob Load(const CacheKey& key);
// Value to store must be non-empty/non-null. // Value to store must be non-empty/non-null.
void Store(const CacheKey& key, size_t valueSize, const void* value); void Store(const CacheKey& key, size_t valueSize, const void* value);
void Store(const CacheKey& key, const CachedBlob& value); void Store(const CacheKey& key, const CachedBlob& value);
private: private:
// Non-thread safe internal implementations of load and store. Exposed callers that use // Non-thread safe internal implementations of load and store. Exposed callers that use
// these helpers need to make sure that these are entered with `mMutex` held. // these helpers need to make sure that these are entered with `mMutex` held.
CachedBlob LoadInternal(const CacheKey& key); CachedBlob LoadInternal(const CacheKey& key);
void StoreInternal(const CacheKey& key, size_t valueSize, const void* value); void StoreInternal(const CacheKey& key, size_t valueSize, const void* value);
// Protects thread safety of access to mCache. // Protects thread safety of access to mCache.
std::mutex mMutex; std::mutex mMutex;
// TODO(dawn:549): Current CachingInterface declaration requires passing a device to each // TODO(dawn:549): Current CachingInterface declaration requires passing a device to each
// call, but this might be unnecessary. This class just passes nullptr for those calls // call, but this might be unnecessary. This class just passes nullptr for those calls
// right now. Eventually we can just change the interface to be more generic. // right now. Eventually we can just change the interface to be more generic.
dawn::platform::CachingInterface* mCache; dawn::platform::CachingInterface* mCache;
}; };
} // namespace dawn::native } // namespace dawn::native

View File

@ -19,246 +19,246 @@
namespace dawn::native { namespace dawn::native {
BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) { BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
ASSERT(IsPowerOfTwo(maxSize)); ASSERT(IsPowerOfTwo(maxSize));
mFreeLists.resize(Log2(mMaxBlockSize) + 1); mFreeLists.resize(Log2(mMaxBlockSize) + 1);
// Insert the level0 free block. // Insert the level0 free block.
mRoot = new BuddyBlock(maxSize, /*offset*/ 0); mRoot = new BuddyBlock(maxSize, /*offset*/ 0);
mFreeLists[0] = {mRoot}; mFreeLists[0] = {mRoot};
}
BuddyAllocator::~BuddyAllocator() {
if (mRoot) {
DeleteBlock(mRoot);
} }
}
BuddyAllocator::~BuddyAllocator() { uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const {
if (mRoot) { return ComputeNumOfFreeBlocks(mRoot);
DeleteBlock(mRoot); }
uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const {
if (block->mState == BlockState::Free) {
return 1;
} else if (block->mState == BlockState::Split) {
return ComputeNumOfFreeBlocks(block->split.pLeft) +
ComputeNumOfFreeBlocks(block->split.pLeft->pBuddy);
}
return 0;
}
uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const {
// Every level in the buddy system can be indexed by order-n where n = log2(blockSize).
// However, mFreeList zero-indexed by level.
// For example, blockSize=4 is Level1 if MAX_BLOCK is 8.
return Log2(mMaxBlockSize) - Log2(blockSize);
}
uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel,
uint64_t alignment) const {
ASSERT(IsPowerOfTwo(alignment));
// The current level is the level that corresponds to the allocation size. The free list may
// not contain a block at that level until a larger one gets allocated (and splits).
// Continue to go up the tree until such a larger block exists.
//
// Even if the block exists at the level, it cannot be used if it's offset is unaligned.
// When the alignment is also a power-of-two, we simply use the next free block whose size
// is greater than or equal to the alignment value.
//
// After one 8-byte allocation:
//
// Level --------------------------------
// 0 32 | S |
// --------------------------------
// 1 16 | S | F2 | S - split
// -------------------------------- F - free
// 2 8 | Aa | F1 | | A - allocated
// --------------------------------
//
// Allocate(size=8, alignment=8) will be satisfied by using F1.
// Allocate(size=8, alignment=4) will be satified by using F1.
// Allocate(size=8, alignment=16) will be satisified by using F2.
//
for (size_t ii = 0; ii <= allocationBlockLevel; ++ii) {
size_t currLevel = allocationBlockLevel - ii;
BuddyBlock* freeBlock = mFreeLists[currLevel].head;
if (freeBlock && (freeBlock->mOffset % alignment == 0)) {
return currLevel;
} }
} }
return kInvalidOffset; // No free block exists at any level.
}
uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const { // Inserts existing free block into the free-list.
return ComputeNumOfFreeBlocks(mRoot); // Called by allocate upon splitting to insert a child block into a free-list.
// Note: Always insert into the head of the free-list. As when a larger free block at a lower
// level was split, there were no smaller free blocks at a higher level to allocate.
void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
ASSERT(block->mState == BlockState::Free);
// Inserted block is now the front (no prev).
block->free.pPrev = nullptr;
// Old head is now the inserted block's next.
block->free.pNext = mFreeLists[level].head;
// Block already in HEAD position (ex. right child was inserted first).
if (mFreeLists[level].head != nullptr) {
// Old head's previous is the inserted block.
mFreeLists[level].head->free.pPrev = block;
} }
uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const { mFreeLists[level].head = block;
if (block->mState == BlockState::Free) { }
return 1;
} else if (block->mState == BlockState::Split) {
return ComputeNumOfFreeBlocks(block->split.pLeft) +
ComputeNumOfFreeBlocks(block->split.pLeft->pBuddy);
}
return 0;
}
uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const { void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
// Every level in the buddy system can be indexed by order-n where n = log2(blockSize). ASSERT(block->mState == BlockState::Free);
// However, mFreeList zero-indexed by level.
// For example, blockSize=4 is Level1 if MAX_BLOCK is 8.
return Log2(mMaxBlockSize) - Log2(blockSize);
}
uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel, if (mFreeLists[level].head == block) {
uint64_t alignment) const { // Block is in HEAD position.
ASSERT(IsPowerOfTwo(alignment)); mFreeLists[level].head = mFreeLists[level].head->free.pNext;
// The current level is the level that corresponds to the allocation size. The free list may } else {
// not contain a block at that level until a larger one gets allocated (and splits). // Block is after HEAD position.
// Continue to go up the tree until such a larger block exists. BuddyBlock* pPrev = block->free.pPrev;
// BuddyBlock* pNext = block->free.pNext;
// Even if the block exists at the level, it cannot be used if it's offset is unaligned.
// When the alignment is also a power-of-two, we simply use the next free block whose size
// is greater than or equal to the alignment value.
//
// After one 8-byte allocation:
//
// Level --------------------------------
// 0 32 | S |
// --------------------------------
// 1 16 | S | F2 | S - split
// -------------------------------- F - free
// 2 8 | Aa | F1 | | A - allocated
// --------------------------------
//
// Allocate(size=8, alignment=8) will be satisfied by using F1.
// Allocate(size=8, alignment=4) will be satified by using F1.
// Allocate(size=8, alignment=16) will be satisified by using F2.
//
for (size_t ii = 0; ii <= allocationBlockLevel; ++ii) {
size_t currLevel = allocationBlockLevel - ii;
BuddyBlock* freeBlock = mFreeLists[currLevel].head;
if (freeBlock && (freeBlock->mOffset % alignment == 0)) {
return currLevel;
}
}
return kInvalidOffset; // No free block exists at any level.
}
// Inserts existing free block into the free-list. ASSERT(pPrev != nullptr);
// Called by allocate upon splitting to insert a child block into a free-list. ASSERT(pPrev->mState == BlockState::Free);
// Note: Always insert into the head of the free-list. As when a larger free block at a lower
// level was split, there were no smaller free blocks at a higher level to allocate.
void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
ASSERT(block->mState == BlockState::Free);
// Inserted block is now the front (no prev). pPrev->free.pNext = pNext;
block->free.pPrev = nullptr;
// Old head is now the inserted block's next. if (pNext != nullptr) {
block->free.pNext = mFreeLists[level].head; ASSERT(pNext->mState == BlockState::Free);
pNext->free.pPrev = pPrev;
// Block already in HEAD position (ex. right child was inserted first).
if (mFreeLists[level].head != nullptr) {
// Old head's previous is the inserted block.
mFreeLists[level].head->free.pPrev = block;
}
mFreeLists[level].head = block;
}
void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
ASSERT(block->mState == BlockState::Free);
if (mFreeLists[level].head == block) {
// Block is in HEAD position.
mFreeLists[level].head = mFreeLists[level].head->free.pNext;
} else {
// Block is after HEAD position.
BuddyBlock* pPrev = block->free.pPrev;
BuddyBlock* pNext = block->free.pNext;
ASSERT(pPrev != nullptr);
ASSERT(pPrev->mState == BlockState::Free);
pPrev->free.pNext = pNext;
if (pNext != nullptr) {
ASSERT(pNext->mState == BlockState::Free);
pNext->free.pPrev = pPrev;
}
} }
} }
}
uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) { uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
if (allocationSize == 0 || allocationSize > mMaxBlockSize) { if (allocationSize == 0 || allocationSize > mMaxBlockSize) {
return kInvalidOffset; return kInvalidOffset;
} }
// Compute the level // Compute the level
const uint32_t allocationSizeToLevel = ComputeLevelFromBlockSize(allocationSize); const uint32_t allocationSizeToLevel = ComputeLevelFromBlockSize(allocationSize);
ASSERT(allocationSizeToLevel < mFreeLists.size()); ASSERT(allocationSizeToLevel < mFreeLists.size());
uint64_t currBlockLevel = GetNextFreeAlignedBlock(allocationSizeToLevel, alignment); uint64_t currBlockLevel = GetNextFreeAlignedBlock(allocationSizeToLevel, alignment);
// Error when no free blocks exist (allocator is full) // Error when no free blocks exist (allocator is full)
if (currBlockLevel == kInvalidOffset) { if (currBlockLevel == kInvalidOffset) {
return kInvalidOffset; return kInvalidOffset;
} }
// Split free blocks level-by-level. // Split free blocks level-by-level.
// Terminate when the current block level is equal to the computed level of the requested // Terminate when the current block level is equal to the computed level of the requested
// allocation. // allocation.
BuddyBlock* currBlock = mFreeLists[currBlockLevel].head; BuddyBlock* currBlock = mFreeLists[currBlockLevel].head;
for (; currBlockLevel < allocationSizeToLevel; currBlockLevel++) { for (; currBlockLevel < allocationSizeToLevel; currBlockLevel++) {
ASSERT(currBlock->mState == BlockState::Free); ASSERT(currBlock->mState == BlockState::Free);
// Remove curr block (about to be split). // Remove curr block (about to be split).
RemoveFreeBlock(currBlock, currBlockLevel);
// Create two free child blocks (the buddies).
const uint64_t nextLevelSize = currBlock->mSize / 2;
BuddyBlock* leftChildBlock = new BuddyBlock(nextLevelSize, currBlock->mOffset);
BuddyBlock* rightChildBlock =
new BuddyBlock(nextLevelSize, currBlock->mOffset + nextLevelSize);
// Remember the parent to merge these back upon de-allocation.
rightChildBlock->pParent = currBlock;
leftChildBlock->pParent = currBlock;
// Make them buddies.
leftChildBlock->pBuddy = rightChildBlock;
rightChildBlock->pBuddy = leftChildBlock;
// Insert the children back into the free list into the next level.
// The free list does not require a specific order. However, an order is specified as
// it's ideal to allocate lower addresses first by having the leftmost child in HEAD.
InsertFreeBlock(rightChildBlock, currBlockLevel + 1);
InsertFreeBlock(leftChildBlock, currBlockLevel + 1);
// Curr block is now split.
currBlock->mState = BlockState::Split;
currBlock->split.pLeft = leftChildBlock;
// Decend down into the next level.
currBlock = leftChildBlock;
}
// Remove curr block from free-list (now allocated).
RemoveFreeBlock(currBlock, currBlockLevel); RemoveFreeBlock(currBlock, currBlockLevel);
currBlock->mState = BlockState::Allocated;
return currBlock->mOffset; // Create two free child blocks (the buddies).
const uint64_t nextLevelSize = currBlock->mSize / 2;
BuddyBlock* leftChildBlock = new BuddyBlock(nextLevelSize, currBlock->mOffset);
BuddyBlock* rightChildBlock =
new BuddyBlock(nextLevelSize, currBlock->mOffset + nextLevelSize);
// Remember the parent to merge these back upon de-allocation.
rightChildBlock->pParent = currBlock;
leftChildBlock->pParent = currBlock;
// Make them buddies.
leftChildBlock->pBuddy = rightChildBlock;
rightChildBlock->pBuddy = leftChildBlock;
// Insert the children back into the free list into the next level.
// The free list does not require a specific order. However, an order is specified as
// it's ideal to allocate lower addresses first by having the leftmost child in HEAD.
InsertFreeBlock(rightChildBlock, currBlockLevel + 1);
InsertFreeBlock(leftChildBlock, currBlockLevel + 1);
// Curr block is now split.
currBlock->mState = BlockState::Split;
currBlock->split.pLeft = leftChildBlock;
// Decend down into the next level.
currBlock = leftChildBlock;
} }
void BuddyAllocator::Deallocate(uint64_t offset) { // Remove curr block from free-list (now allocated).
BuddyBlock* curr = mRoot; RemoveFreeBlock(currBlock, currBlockLevel);
currBlock->mState = BlockState::Allocated;
// TODO(crbug.com/dawn/827): Optimize de-allocation. return currBlock->mOffset;
// Passing allocationSize directly will avoid the following level-by-level search; }
// however, it requires the size information to be stored outside the allocator.
// Search for the free block node that corresponds to the block offset. void BuddyAllocator::Deallocate(uint64_t offset) {
size_t currBlockLevel = 0; BuddyBlock* curr = mRoot;
while (curr->mState == BlockState::Split) {
if (offset < curr->split.pLeft->pBuddy->mOffset) {
curr = curr->split.pLeft;
} else {
curr = curr->split.pLeft->pBuddy;
}
currBlockLevel++; // TODO(crbug.com/dawn/827): Optimize de-allocation.
// Passing allocationSize directly will avoid the following level-by-level search;
// however, it requires the size information to be stored outside the allocator.
// Search for the free block node that corresponds to the block offset.
size_t currBlockLevel = 0;
while (curr->mState == BlockState::Split) {
if (offset < curr->split.pLeft->pBuddy->mOffset) {
curr = curr->split.pLeft;
} else {
curr = curr->split.pLeft->pBuddy;
} }
ASSERT(curr->mState == BlockState::Allocated); currBlockLevel++;
// Ensure the block is at the correct level
ASSERT(currBlockLevel == ComputeLevelFromBlockSize(curr->mSize));
// Mark curr free so we can merge.
curr->mState = BlockState::Free;
// Merge the buddies (LevelN-to-Level0).
while (currBlockLevel > 0 && curr->pBuddy->mState == BlockState::Free) {
// Remove the buddy.
RemoveFreeBlock(curr->pBuddy, currBlockLevel);
BuddyBlock* parent = curr->pParent;
// The buddies were inserted in a specific order but
// could be deleted in any order.
DeleteBlock(curr->pBuddy);
DeleteBlock(curr);
// Parent is now free.
parent->mState = BlockState::Free;
// Ascend up to the next level (parent block).
curr = parent;
currBlockLevel--;
}
InsertFreeBlock(curr, currBlockLevel);
} }
// Helper which deletes a block in the tree recursively (post-order). ASSERT(curr->mState == BlockState::Allocated);
void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
ASSERT(block != nullptr);
if (block->mState == BlockState::Split) { // Ensure the block is at the correct level
// Delete the pair in same order we inserted. ASSERT(currBlockLevel == ComputeLevelFromBlockSize(curr->mSize));
DeleteBlock(block->split.pLeft->pBuddy);
DeleteBlock(block->split.pLeft); // Mark curr free so we can merge.
} curr->mState = BlockState::Free;
delete block;
// Merge the buddies (LevelN-to-Level0).
while (currBlockLevel > 0 && curr->pBuddy->mState == BlockState::Free) {
// Remove the buddy.
RemoveFreeBlock(curr->pBuddy, currBlockLevel);
BuddyBlock* parent = curr->pParent;
// The buddies were inserted in a specific order but
// could be deleted in any order.
DeleteBlock(curr->pBuddy);
DeleteBlock(curr);
// Parent is now free.
parent->mState = BlockState::Free;
// Ascend up to the next level (parent block).
curr = parent;
currBlockLevel--;
} }
InsertFreeBlock(curr, currBlockLevel);
}
// Helper which deletes a block in the tree recursively (post-order).
void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
ASSERT(block != nullptr);
if (block->mState == BlockState::Split) {
// Delete the pair in same order we inserted.
DeleteBlock(block->split.pLeft->pBuddy);
DeleteBlock(block->split.pLeft);
}
delete block;
}
} // namespace dawn::native } // namespace dawn::native

View File

@ -22,96 +22,96 @@
namespace dawn::native { namespace dawn::native {
// Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request. // Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
// Memory is split into halves until just large enough to fit to the request. This // Memory is split into halves until just large enough to fit to the request. This
// requires the allocation size to be a power-of-two value. The allocator "allocates" a block by // requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
// returning the starting offset whose size is guaranteed to be greater than or equal to the // returning the starting offset whose size is guaranteed to be greater than or equal to the
// allocation size. To deallocate, the same offset is used to find the corresponding block. // allocation size. To deallocate, the same offset is used to find the corresponding block.
// //
// Internally, it manages a free list to track free blocks in a full binary tree. // Internally, it manages a free list to track free blocks in a full binary tree.
// Every index in the free list corresponds to a level in the tree. That level also determines // Every index in the free list corresponds to a level in the tree. That level also determines
// the size of the block to be used to satisfy the request. The first level (index=0) represents // the size of the block to be used to satisfy the request. The first level (index=0) represents
// the root whose size is also called the max block size. // the root whose size is also called the max block size.
// //
class BuddyAllocator { class BuddyAllocator {
public: public:
explicit BuddyAllocator(uint64_t maxSize); explicit BuddyAllocator(uint64_t maxSize);
~BuddyAllocator(); ~BuddyAllocator();
// Required methods. // Required methods.
uint64_t Allocate(uint64_t allocationSize, uint64_t alignment = 1); uint64_t Allocate(uint64_t allocationSize, uint64_t alignment = 1);
void Deallocate(uint64_t offset); void Deallocate(uint64_t offset);
// For testing purposes only. // For testing purposes only.
uint64_t ComputeTotalNumOfFreeBlocksForTesting() const; uint64_t ComputeTotalNumOfFreeBlocksForTesting() const;
static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max(); static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
private: private:
uint32_t ComputeLevelFromBlockSize(uint64_t blockSize) const; uint32_t ComputeLevelFromBlockSize(uint64_t blockSize) const;
uint64_t GetNextFreeAlignedBlock(size_t allocationBlockLevel, uint64_t alignment) const; uint64_t GetNextFreeAlignedBlock(size_t allocationBlockLevel, uint64_t alignment) const;
enum class BlockState { Free, Split, Allocated }; enum class BlockState { Free, Split, Allocated };
struct BuddyBlock { struct BuddyBlock {
BuddyBlock(uint64_t size, uint64_t offset) BuddyBlock(uint64_t size, uint64_t offset)
: mOffset(offset), mSize(size), mState(BlockState::Free) { : mOffset(offset), mSize(size), mState(BlockState::Free) {
free.pPrev = nullptr; free.pPrev = nullptr;
free.pNext = nullptr; free.pNext = nullptr;
} }
uint64_t mOffset; uint64_t mOffset;
uint64_t mSize; uint64_t mSize;
// Pointer to this block's buddy, iff parent is split. // Pointer to this block's buddy, iff parent is split.
// Used to quickly merge buddy blocks upon de-allocate. // Used to quickly merge buddy blocks upon de-allocate.
BuddyBlock* pBuddy = nullptr; BuddyBlock* pBuddy = nullptr;
BuddyBlock* pParent = nullptr; BuddyBlock* pParent = nullptr;
// Track whether this block has been split or not. // Track whether this block has been split or not.
BlockState mState; BlockState mState;
struct FreeLinks { struct FreeLinks {
BuddyBlock* pPrev; BuddyBlock* pPrev;
BuddyBlock* pNext; BuddyBlock* pNext;
};
struct SplitLink {
BuddyBlock* pLeft;
};
union {
// Used upon allocation.
// Avoids searching for the next free block.
FreeLinks free;
// Used upon de-allocation.
// Had this block split upon allocation, it and it's buddy is to be deleted.
SplitLink split;
};
}; };
void InsertFreeBlock(BuddyBlock* block, size_t level); struct SplitLink {
void RemoveFreeBlock(BuddyBlock* block, size_t level); BuddyBlock* pLeft;
void DeleteBlock(BuddyBlock* block);
uint64_t ComputeNumOfFreeBlocks(BuddyBlock* block) const;
// Keep track the head and tail (for faster insertion/removal).
struct BlockList {
BuddyBlock* head = nullptr; // First free block in level.
// TODO(crbug.com/dawn/827): Track the tail.
}; };
BuddyBlock* mRoot = nullptr; // Used to deallocate non-free blocks. union {
// Used upon allocation.
// Avoids searching for the next free block.
FreeLinks free;
uint64_t mMaxBlockSize = 0; // Used upon de-allocation.
// Had this block split upon allocation, it and it's buddy is to be deleted.
// List of linked-lists of free blocks where the index is a level that SplitLink split;
// corresponds to a power-of-two sized block. };
std::vector<BlockList> mFreeLists;
}; };
void InsertFreeBlock(BuddyBlock* block, size_t level);
void RemoveFreeBlock(BuddyBlock* block, size_t level);
void DeleteBlock(BuddyBlock* block);
uint64_t ComputeNumOfFreeBlocks(BuddyBlock* block) const;
// Keep track the head and tail (for faster insertion/removal).
struct BlockList {
BuddyBlock* head = nullptr; // First free block in level.
// TODO(crbug.com/dawn/827): Track the tail.
};
BuddyBlock* mRoot = nullptr; // Used to deallocate non-free blocks.
uint64_t mMaxBlockSize = 0;
// List of linked-lists of free blocks where the index is a level that
// corresponds to a power-of-two sized block.
std::vector<BlockList> mFreeLists;
};
} // namespace dawn::native } // namespace dawn::native
#endif // SRC_DAWN_NATIVE_BUDDYALLOCATOR_H_ #endif // SRC_DAWN_NATIVE_BUDDYALLOCATOR_H_

View File

@ -21,102 +21,102 @@
namespace dawn::native { namespace dawn::native {
BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize, BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
uint64_t memoryBlockSize, uint64_t memoryBlockSize,
ResourceHeapAllocator* heapAllocator) ResourceHeapAllocator* heapAllocator)
: mMemoryBlockSize(memoryBlockSize), : mMemoryBlockSize(memoryBlockSize),
mBuddyBlockAllocator(maxSystemSize), mBuddyBlockAllocator(maxSystemSize),
mHeapAllocator(heapAllocator) { mHeapAllocator(heapAllocator) {
ASSERT(memoryBlockSize <= maxSystemSize); ASSERT(memoryBlockSize <= maxSystemSize);
ASSERT(IsPowerOfTwo(mMemoryBlockSize)); ASSERT(IsPowerOfTwo(mMemoryBlockSize));
ASSERT(maxSystemSize % mMemoryBlockSize == 0); ASSERT(maxSystemSize % mMemoryBlockSize == 0);
mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize); mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize);
}
uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
ASSERT(offset != BuddyAllocator::kInvalidOffset);
return offset / mMemoryBlockSize;
}
ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
uint64_t alignment) {
ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
if (allocationSize == 0) {
return std::move(invalidAllocation);
} }
uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const { // Check the unaligned size to avoid overflowing NextPowerOfTwo.
ASSERT(offset != BuddyAllocator::kInvalidOffset); if (allocationSize > mMemoryBlockSize) {
return offset / mMemoryBlockSize; return std::move(invalidAllocation);
} }
ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize, // Round allocation size to nearest power-of-two.
uint64_t alignment) { allocationSize = NextPowerOfTwo(allocationSize);
ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
if (allocationSize == 0) { // Allocation cannot exceed the memory size.
return std::move(invalidAllocation); if (allocationSize > mMemoryBlockSize) {
} return std::move(invalidAllocation);
// Check the unaligned size to avoid overflowing NextPowerOfTwo.
if (allocationSize > mMemoryBlockSize) {
return std::move(invalidAllocation);
}
// Round allocation size to nearest power-of-two.
allocationSize = NextPowerOfTwo(allocationSize);
// Allocation cannot exceed the memory size.
if (allocationSize > mMemoryBlockSize) {
return std::move(invalidAllocation);
}
// Attempt to sub-allocate a block of the requested size.
const uint64_t blockOffset = mBuddyBlockAllocator.Allocate(allocationSize, alignment);
if (blockOffset == BuddyAllocator::kInvalidOffset) {
return std::move(invalidAllocation);
}
const uint64_t memoryIndex = GetMemoryIndex(blockOffset);
if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
// Transfer ownership to this allocator
std::unique_ptr<ResourceHeapBase> memory;
DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(mMemoryBlockSize));
mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
}
mTrackedSubAllocations[memoryIndex].refcount++;
AllocationInfo info;
info.mBlockOffset = blockOffset;
info.mMethod = AllocationMethod::kSubAllocated;
// Allocation offset is always local to the memory.
const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
return ResourceMemoryAllocation{
info, memoryOffset, mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
} }
void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) { // Attempt to sub-allocate a block of the requested size.
const AllocationInfo info = allocation.GetInfo(); const uint64_t blockOffset = mBuddyBlockAllocator.Allocate(allocationSize, alignment);
if (blockOffset == BuddyAllocator::kInvalidOffset) {
return std::move(invalidAllocation);
}
ASSERT(info.mMethod == AllocationMethod::kSubAllocated); const uint64_t memoryIndex = GetMemoryIndex(blockOffset);
if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
// Transfer ownership to this allocator
std::unique_ptr<ResourceHeapBase> memory;
DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(mMemoryBlockSize));
mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
}
const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset); mTrackedSubAllocations[memoryIndex].refcount++;
ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0); AllocationInfo info;
mTrackedSubAllocations[memoryIndex].refcount--; info.mBlockOffset = blockOffset;
info.mMethod = AllocationMethod::kSubAllocated;
if (mTrackedSubAllocations[memoryIndex].refcount == 0) { // Allocation offset is always local to the memory.
mHeapAllocator->DeallocateResourceHeap( const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
return ResourceMemoryAllocation{info, memoryOffset,
mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
}
void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
const AllocationInfo info = allocation.GetInfo();
ASSERT(info.mMethod == AllocationMethod::kSubAllocated);
const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset);
ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0);
mTrackedSubAllocations[memoryIndex].refcount--;
if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
mHeapAllocator->DeallocateResourceHeap(
std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
}
mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
}
uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
return mMemoryBlockSize;
}
uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
uint64_t count = 0;
for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
if (allocation.refcount > 0) {
count++;
} }
mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
}
uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
return mMemoryBlockSize;
}
uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
uint64_t count = 0;
for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
if (allocation.refcount > 0) {
count++;
}
}
return count;
} }
return count;
}
} // namespace dawn::native } // namespace dawn::native

View File

@ -24,51 +24,50 @@
namespace dawn::native { namespace dawn::native {
class ResourceHeapAllocator; class ResourceHeapAllocator;
// BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device // BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
// memory created by MemoryAllocator clients. It creates a very large buddy system // memory created by MemoryAllocator clients. It creates a very large buddy system
// where backing device memory blocks equal a specified level in the system. // where backing device memory blocks equal a specified level in the system.
// //
// Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding // Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
// memory index and should the memory not exist, it is created. If two sub-allocations share the // memory index and should the memory not exist, it is created. If two sub-allocations share the
// same memory index, the memory refcount is incremented to ensure de-allocating one doesn't // same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
// release the other prematurely. // release the other prematurely.
// //
// The MemoryAllocator should return ResourceHeaps that are all compatible with each other. // The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
// It should also outlive all the resources that are in the buddy allocator. // It should also outlive all the resources that are in the buddy allocator.
class BuddyMemoryAllocator { class BuddyMemoryAllocator {
public: public:
BuddyMemoryAllocator(uint64_t maxSystemSize, BuddyMemoryAllocator(uint64_t maxSystemSize,
uint64_t memoryBlockSize, uint64_t memoryBlockSize,
ResourceHeapAllocator* heapAllocator); ResourceHeapAllocator* heapAllocator);
~BuddyMemoryAllocator() = default; ~BuddyMemoryAllocator() = default;
ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize, ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize, uint64_t alignment);
uint64_t alignment); void Deallocate(const ResourceMemoryAllocation& allocation);
void Deallocate(const ResourceMemoryAllocation& allocation);
uint64_t GetMemoryBlockSize() const; uint64_t GetMemoryBlockSize() const;
// For testing purposes. // For testing purposes.
uint64_t ComputeTotalNumOfHeapsForTesting() const; uint64_t ComputeTotalNumOfHeapsForTesting() const;
private: private:
uint64_t GetMemoryIndex(uint64_t offset) const; uint64_t GetMemoryIndex(uint64_t offset) const;
uint64_t mMemoryBlockSize = 0; uint64_t mMemoryBlockSize = 0;
BuddyAllocator mBuddyBlockAllocator; BuddyAllocator mBuddyBlockAllocator;
ResourceHeapAllocator* mHeapAllocator; ResourceHeapAllocator* mHeapAllocator;
struct TrackedSubAllocations { struct TrackedSubAllocations {
size_t refcount = 0; size_t refcount = 0;
std::unique_ptr<ResourceHeapBase> mMemoryAllocation; std::unique_ptr<ResourceHeapBase> mMemoryAllocation;
};
std::vector<TrackedSubAllocations> mTrackedSubAllocations;
}; };
std::vector<TrackedSubAllocations> mTrackedSubAllocations;
};
} // namespace dawn::native } // namespace dawn::native
#endif // SRC_DAWN_NATIVE_BUDDYMEMORYALLOCATOR_H_ #endif // SRC_DAWN_NATIVE_BUDDYMEMORYALLOCATOR_H_

File diff suppressed because it is too large Load Diff

View File

@ -26,114 +26,112 @@
namespace dawn::native { namespace dawn::native {
struct CopyTextureToBufferCmd; struct CopyTextureToBufferCmd;
enum class MapType : uint32_t; enum class MapType : uint32_t;
MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor); MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
static constexpr wgpu::BufferUsage kReadOnlyBufferUsages = static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index | wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer | wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer |
wgpu::BufferUsage::Indirect; wgpu::BufferUsage::Indirect;
static constexpr wgpu::BufferUsage kMappableBufferUsages = static constexpr wgpu::BufferUsage kMappableBufferUsages =
wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite; wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
class BufferBase : public ApiObjectBase { class BufferBase : public ApiObjectBase {
public: public:
enum class BufferState { enum class BufferState {
Unmapped, Unmapped,
Mapped, Mapped,
MappedAtCreation, MappedAtCreation,
Destroyed, Destroyed,
};
BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
ObjectType GetType() const override;
uint64_t GetSize() const;
uint64_t GetAllocatedSize() const;
// |GetUsageExternalOnly| returns the usage with which the buffer was created using the
// base WebGPU API. Additional usages may be added for internal state tracking. |GetUsage|
// returns the union of base usage and the usages added internally.
wgpu::BufferUsage GetUsage() const;
wgpu::BufferUsage GetUsageExternalOnly() const;
MaybeError MapAtCreation();
void OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
MaybeError ValidateCanUseOnQueueNow() const;
bool IsFullBufferRange(uint64_t offset, uint64_t size) const;
bool NeedsInitialization() const;
bool IsDataInitialized() const;
void SetIsDataInitialized();
void* GetMappedRange(size_t offset, size_t size, bool writable = true);
void Unmap();
// Dawn API
void APIMapAsync(wgpu::MapMode mode,
size_t offset,
size_t size,
WGPUBufferMapCallback callback,
void* userdata);
void* APIGetMappedRange(size_t offset, size_t size);
const void* APIGetConstMappedRange(size_t offset, size_t size);
void APIUnmap();
void APIDestroy();
protected:
BufferBase(DeviceBase* device,
const BufferDescriptor* descriptor,
ObjectBase::ErrorTag tag);
// Constructor used only for mocking and testing.
BufferBase(DeviceBase* device, BufferState state);
void DestroyImpl() override;
~BufferBase() override;
MaybeError MapAtCreationInternal();
uint64_t mAllocatedSize = 0;
private:
virtual MaybeError MapAtCreationImpl() = 0;
virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
virtual void UnmapImpl() = 0;
virtual void* GetMappedPointerImpl() = 0;
virtual bool IsCPUWritableAtCreation() const = 0;
MaybeError CopyFromStagingBuffer();
void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
MaybeError ValidateMapAsync(wgpu::MapMode mode,
size_t offset,
size_t size,
WGPUBufferMapAsyncStatus* status) const;
MaybeError ValidateUnmap() const;
bool CanGetMappedRange(bool writable, size_t offset, size_t size) const;
void UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus);
uint64_t mSize = 0;
wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
BufferState mState;
bool mIsDataInitialized = false;
std::unique_ptr<StagingBufferBase> mStagingBuffer;
WGPUBufferMapCallback mMapCallback = nullptr;
void* mMapUserdata = 0;
MapRequestID mLastMapID = MapRequestID(0);
wgpu::MapMode mMapMode = wgpu::MapMode::None;
size_t mMapOffset = 0;
size_t mMapSize = 0;
}; };
BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
ObjectType GetType() const override;
uint64_t GetSize() const;
uint64_t GetAllocatedSize() const;
// |GetUsageExternalOnly| returns the usage with which the buffer was created using the
// base WebGPU API. Additional usages may be added for internal state tracking. |GetUsage|
// returns the union of base usage and the usages added internally.
wgpu::BufferUsage GetUsage() const;
wgpu::BufferUsage GetUsageExternalOnly() const;
MaybeError MapAtCreation();
void OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
MaybeError ValidateCanUseOnQueueNow() const;
bool IsFullBufferRange(uint64_t offset, uint64_t size) const;
bool NeedsInitialization() const;
bool IsDataInitialized() const;
void SetIsDataInitialized();
void* GetMappedRange(size_t offset, size_t size, bool writable = true);
void Unmap();
// Dawn API
void APIMapAsync(wgpu::MapMode mode,
size_t offset,
size_t size,
WGPUBufferMapCallback callback,
void* userdata);
void* APIGetMappedRange(size_t offset, size_t size);
const void* APIGetConstMappedRange(size_t offset, size_t size);
void APIUnmap();
void APIDestroy();
protected:
BufferBase(DeviceBase* device, const BufferDescriptor* descriptor, ObjectBase::ErrorTag tag);
// Constructor used only for mocking and testing.
BufferBase(DeviceBase* device, BufferState state);
void DestroyImpl() override;
~BufferBase() override;
MaybeError MapAtCreationInternal();
uint64_t mAllocatedSize = 0;
private:
virtual MaybeError MapAtCreationImpl() = 0;
virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
virtual void UnmapImpl() = 0;
virtual void* GetMappedPointerImpl() = 0;
virtual bool IsCPUWritableAtCreation() const = 0;
MaybeError CopyFromStagingBuffer();
void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
MaybeError ValidateMapAsync(wgpu::MapMode mode,
size_t offset,
size_t size,
WGPUBufferMapAsyncStatus* status) const;
MaybeError ValidateUnmap() const;
bool CanGetMappedRange(bool writable, size_t offset, size_t size) const;
void UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus);
uint64_t mSize = 0;
wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
BufferState mState;
bool mIsDataInitialized = false;
std::unique_ptr<StagingBufferBase> mStagingBuffer;
WGPUBufferMapCallback mMapCallback = nullptr;
void* mMapUserdata = 0;
MapRequestID mLastMapID = MapRequestID(0);
wgpu::MapMode mMapMode = wgpu::MapMode::None;
size_t mMapOffset = 0;
size_t mMapSize = 0;
};
} // namespace dawn::native } // namespace dawn::native

View File

@ -18,26 +18,26 @@
namespace dawn::native { namespace dawn::native {
std::ostream& operator<<(std::ostream& os, const CacheKey& key) { std::ostream& operator<<(std::ostream& os, const CacheKey& key) {
os << std::hex; os << std::hex;
for (const int b : key) { for (const int b : key) {
os << std::setfill('0') << std::setw(2) << b << " "; os << std::setfill('0') << std::setw(2) << b << " ";
}
os << std::dec;
return os;
} }
os << std::dec;
return os;
}
template <> template <>
void CacheKeySerializer<std::string>::Serialize(CacheKey* key, const std::string& t) { void CacheKeySerializer<std::string>::Serialize(CacheKey* key, const std::string& t) {
key->Record(static_cast<size_t>(t.length())); key->Record(static_cast<size_t>(t.length()));
key->insert(key->end(), t.begin(), t.end()); key->insert(key->end(), t.begin(), t.end());
} }
template <> template <>
void CacheKeySerializer<CacheKey>::Serialize(CacheKey* key, const CacheKey& t) { void CacheKeySerializer<CacheKey>::Serialize(CacheKey* key, const CacheKey& t) {
// For nested cache keys, we do not record the length, and just copy the key so that it // For nested cache keys, we do not record the length, and just copy the key so that it
// appears we just flatten the keys into a single key. // appears we just flatten the keys into a single key.
key->insert(key->end(), t.begin(), t.end()); key->insert(key->end(), t.begin(), t.end());
} }
} // namespace dawn::native } // namespace dawn::native

View File

@ -27,179 +27,175 @@
namespace dawn::native { namespace dawn::native {
// Forward declare classes because of co-dependency. // Forward declare classes because of co-dependency.
class CacheKey; class CacheKey;
class CachedObject; class CachedObject;
// Stream operator for CacheKey for debugging. // Stream operator for CacheKey for debugging.
std::ostream& operator<<(std::ostream& os, const CacheKey& key); std::ostream& operator<<(std::ostream& os, const CacheKey& key);
// Overridable serializer struct that should be implemented for cache key serializable // Overridable serializer struct that should be implemented for cache key serializable
// types/classes. // types/classes.
template <typename T, typename SFINAE = void> template <typename T, typename SFINAE = void>
class CacheKeySerializer { class CacheKeySerializer {
public: public:
static void Serialize(CacheKey* key, const T& t); static void Serialize(CacheKey* key, const T& t);
}; };
class CacheKey : public std::vector<uint8_t> { class CacheKey : public std::vector<uint8_t> {
public: public:
using std::vector<uint8_t>::vector; using std::vector<uint8_t>::vector;
enum class Type { ComputePipeline, RenderPipeline, Shader }; enum class Type { ComputePipeline, RenderPipeline, Shader };
template <typename T>
CacheKey& Record(const T& t) {
CacheKeySerializer<T>::Serialize(this, t);
return *this;
}
template <typename T, typename... Args>
CacheKey& Record(const T& t, const Args&... args) {
CacheKeySerializer<T>::Serialize(this, t);
return Record(args...);
}
// Records iterables by prepending the number of elements. Some common iterables are have a
// CacheKeySerializer implemented to avoid needing to split them out when recording, i.e.
// strings and CacheKeys, but they fundamentally do the same as this function.
template <typename IterableT>
CacheKey& RecordIterable(const IterableT& iterable) {
// Always record the size of generic iterables as a size_t for now.
Record(static_cast<size_t>(iterable.size()));
for (auto it = iterable.begin(); it != iterable.end(); ++it) {
Record(*it);
}
return *this;
}
template <typename Index, typename Value, size_t Size>
CacheKey& RecordIterable(const ityp::array<Index, Value, Size>& iterable) {
Record(static_cast<Index>(iterable.size()));
for (auto it = iterable.begin(); it != iterable.end(); ++it) {
Record(*it);
}
return *this;
}
template <typename Ptr>
CacheKey& RecordIterable(const Ptr* ptr, size_t n) {
Record(n);
for (size_t i = 0; i < n; ++i) {
Record(ptr[i]);
}
return *this;
}
};
// Specialized overload for fundamental types.
template <typename T> template <typename T>
class CacheKeySerializer<T, std::enable_if_t<std::is_fundamental_v<T>>> { CacheKey& Record(const T& t) {
public: CacheKeySerializer<T>::Serialize(this, t);
static void Serialize(CacheKey* key, const T t) { return *this;
const char* it = reinterpret_cast<const char*>(&t); }
key->insert(key->end(), it, (it + sizeof(T))); template <typename T, typename... Args>
} CacheKey& Record(const T& t, const Args&... args) {
}; CacheKeySerializer<T>::Serialize(this, t);
return Record(args...);
}
// Specialized overload for bitsets that are smaller than 64. // Records iterables by prepending the number of elements. Some common iterables are have a
template <size_t N> // CacheKeySerializer implemented to avoid needing to split them out when recording, i.e.
class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> { // strings and CacheKeys, but they fundamentally do the same as this function.
public: template <typename IterableT>
static void Serialize(CacheKey* key, const std::bitset<N>& t) { CacheKey& RecordIterable(const IterableT& iterable) {
key->Record(t.to_ullong()); // Always record the size of generic iterables as a size_t for now.
Record(static_cast<size_t>(iterable.size()));
for (auto it = iterable.begin(); it != iterable.end(); ++it) {
Record(*it);
} }
}; return *this;
}
template <typename Index, typename Value, size_t Size>
CacheKey& RecordIterable(const ityp::array<Index, Value, Size>& iterable) {
Record(static_cast<Index>(iterable.size()));
for (auto it = iterable.begin(); it != iterable.end(); ++it) {
Record(*it);
}
return *this;
}
template <typename Ptr>
CacheKey& RecordIterable(const Ptr* ptr, size_t n) {
Record(n);
for (size_t i = 0; i < n; ++i) {
Record(ptr[i]);
}
return *this;
}
};
// Specialized overload for bitsets since using the built-in to_ullong have a size limit. // Specialized overload for fundamental types.
template <size_t N> template <typename T>
class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N > 64)>> { class CacheKeySerializer<T, std::enable_if_t<std::is_fundamental_v<T>>> {
public: public:
static void Serialize(CacheKey* key, const std::bitset<N>& t) { static void Serialize(CacheKey* key, const T t) {
// Serializes the bitset into series of uint8_t, along with recording the size. const char* it = reinterpret_cast<const char*>(&t);
static_assert(N > 0); key->insert(key->end(), it, (it + sizeof(T)));
key->Record(static_cast<size_t>(N)); }
uint8_t value = 0; };
for (size_t i = 0; i < N; i++) {
value <<= 1; // Specialized overload for bitsets that are smaller than 64.
// Explicitly convert to numeric since MSVC doesn't like mixing of bools. template <size_t N>
value |= t[i] ? 1 : 0; class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> {
if (i % 8 == 7) { public:
// Whenever we fill an 8 bit value, record it and zero it out. static void Serialize(CacheKey* key, const std::bitset<N>& t) { key->Record(t.to_ullong()); }
key->Record(value); };
value = 0;
} // Specialized overload for bitsets since using the built-in to_ullong have a size limit.
} template <size_t N>
// Serialize the last value if we are not a multiple of 8. class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N > 64)>> {
if (N % 8 != 0) { public:
static void Serialize(CacheKey* key, const std::bitset<N>& t) {
// Serializes the bitset into series of uint8_t, along with recording the size.
static_assert(N > 0);
key->Record(static_cast<size_t>(N));
uint8_t value = 0;
for (size_t i = 0; i < N; i++) {
value <<= 1;
// Explicitly convert to numeric since MSVC doesn't like mixing of bools.
value |= t[i] ? 1 : 0;
if (i % 8 == 7) {
// Whenever we fill an 8 bit value, record it and zero it out.
key->Record(value); key->Record(value);
value = 0;
} }
} }
}; // Serialize the last value if we are not a multiple of 8.
if (N % 8 != 0) {
// Specialized overload for enums. key->Record(value);
template <typename T>
class CacheKeySerializer<T, std::enable_if_t<std::is_enum_v<T>>> {
public:
static void Serialize(CacheKey* key, const T t) {
CacheKeySerializer<std::underlying_type_t<T>>::Serialize(
key, static_cast<std::underlying_type_t<T>>(t));
} }
}; }
};
// Specialized overload for TypedInteger. // Specialized overload for enums.
template <typename Tag, typename Integer> template <typename T>
class CacheKeySerializer<::detail::TypedIntegerImpl<Tag, Integer>> { class CacheKeySerializer<T, std::enable_if_t<std::is_enum_v<T>>> {
public: public:
static void Serialize(CacheKey* key, const ::detail::TypedIntegerImpl<Tag, Integer> t) { static void Serialize(CacheKey* key, const T t) {
CacheKeySerializer<Integer>::Serialize(key, static_cast<Integer>(t)); CacheKeySerializer<std::underlying_type_t<T>>::Serialize(
} key, static_cast<std::underlying_type_t<T>>(t));
}; }
};
// Specialized overload for pointers. Since we are serializing for a cache key, we always // Specialized overload for TypedInteger.
// serialize via value, not by pointer. To handle nullptr scenarios, we always serialize whether template <typename Tag, typename Integer>
// the pointer was nullptr followed by the contents if applicable. class CacheKeySerializer<::detail::TypedIntegerImpl<Tag, Integer>> {
template <typename T> public:
class CacheKeySerializer<T, std::enable_if_t<std::is_pointer_v<T>>> { static void Serialize(CacheKey* key, const ::detail::TypedIntegerImpl<Tag, Integer> t) {
public: CacheKeySerializer<Integer>::Serialize(key, static_cast<Integer>(t));
static void Serialize(CacheKey* key, const T t) { }
key->Record(t == nullptr); };
if (t != nullptr) {
CacheKeySerializer<std::remove_cv_t<std::remove_pointer_t<T>>>::Serialize(key, *t);
}
}
};
// Specialized overload for fixed arrays of primitives. // Specialized overload for pointers. Since we are serializing for a cache key, we always
template <typename T, size_t N> // serialize via value, not by pointer. To handle nullptr scenarios, we always serialize whether
class CacheKeySerializer<T[N], std::enable_if_t<std::is_fundamental_v<T>>> { // the pointer was nullptr followed by the contents if applicable.
public: template <typename T>
static void Serialize(CacheKey* key, const T (&t)[N]) { class CacheKeySerializer<T, std::enable_if_t<std::is_pointer_v<T>>> {
static_assert(N > 0); public:
key->Record(static_cast<size_t>(N)); static void Serialize(CacheKey* key, const T t) {
const char* it = reinterpret_cast<const char*>(t); key->Record(t == nullptr);
key->insert(key->end(), it, it + sizeof(t)); if (t != nullptr) {
CacheKeySerializer<std::remove_cv_t<std::remove_pointer_t<T>>>::Serialize(key, *t);
} }
}; }
};
// Specialized overload for fixed arrays of non-primitives. // Specialized overload for fixed arrays of primitives.
template <typename T, size_t N> template <typename T, size_t N>
class CacheKeySerializer<T[N], std::enable_if_t<!std::is_fundamental_v<T>>> { class CacheKeySerializer<T[N], std::enable_if_t<std::is_fundamental_v<T>>> {
public: public:
static void Serialize(CacheKey* key, const T (&t)[N]) { static void Serialize(CacheKey* key, const T (&t)[N]) {
static_assert(N > 0); static_assert(N > 0);
key->Record(static_cast<size_t>(N)); key->Record(static_cast<size_t>(N));
for (size_t i = 0; i < N; i++) { const char* it = reinterpret_cast<const char*>(t);
key->Record(t[i]); key->insert(key->end(), it, it + sizeof(t));
} }
} };
};
// Specialized overload for CachedObjects. // Specialized overload for fixed arrays of non-primitives.
template <typename T> template <typename T, size_t N>
class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> { class CacheKeySerializer<T[N], std::enable_if_t<!std::is_fundamental_v<T>>> {
public: public:
static void Serialize(CacheKey* key, const T& t) { static void Serialize(CacheKey* key, const T (&t)[N]) {
key->Record(t.GetCacheKey()); static_assert(N > 0);
key->Record(static_cast<size_t>(N));
for (size_t i = 0; i < N; i++) {
key->Record(t[i]);
} }
}; }
};
// Specialized overload for CachedObjects.
template <typename T>
class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> {
public:
static void Serialize(CacheKey* key, const T& t) { key->Record(t.GetCacheKey()); }
};
} // namespace dawn::native } // namespace dawn::native

View File

@ -19,35 +19,35 @@
namespace dawn::native { namespace dawn::native {
bool CachedObject::IsCachedReference() const { bool CachedObject::IsCachedReference() const {
return mIsCachedReference; return mIsCachedReference;
} }
void CachedObject::SetIsCachedReference() { void CachedObject::SetIsCachedReference() {
mIsCachedReference = true; mIsCachedReference = true;
} }
size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const { size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
return obj->GetContentHash(); return obj->GetContentHash();
} }
size_t CachedObject::GetContentHash() const { size_t CachedObject::GetContentHash() const {
ASSERT(mIsContentHashInitialized); ASSERT(mIsContentHashInitialized);
return mContentHash; return mContentHash;
} }
void CachedObject::SetContentHash(size_t contentHash) { void CachedObject::SetContentHash(size_t contentHash) {
ASSERT(!mIsContentHashInitialized); ASSERT(!mIsContentHashInitialized);
mContentHash = contentHash; mContentHash = contentHash;
mIsContentHashInitialized = true; mIsContentHashInitialized = true;
} }
const CacheKey& CachedObject::GetCacheKey() const { const CacheKey& CachedObject::GetCacheKey() const {
return mCacheKey; return mCacheKey;
} }
CacheKey* CachedObject::GetCacheKey() { CacheKey* CachedObject::GetCacheKey() {
return &mCacheKey; return &mCacheKey;
} }
} // namespace dawn::native } // namespace dawn::native

View File

@ -23,43 +23,43 @@
namespace dawn::native { namespace dawn::native {
// Some objects are cached so that instead of creating new duplicate objects, // Some objects are cached so that instead of creating new duplicate objects,
// we increase the refcount of an existing object. // we increase the refcount of an existing object.
// When an object is successfully created, the device should call // When an object is successfully created, the device should call
// SetIsCachedReference() and insert the object into the cache. // SetIsCachedReference() and insert the object into the cache.
class CachedObject { class CachedObject {
public: public:
bool IsCachedReference() const; bool IsCachedReference() const;
// Functor necessary for the unordered_set<CachedObject*>-based cache. // Functor necessary for the unordered_set<CachedObject*>-based cache.
struct HashFunc { struct HashFunc {
size_t operator()(const CachedObject* obj) const; size_t operator()(const CachedObject* obj) const;
};
size_t GetContentHash() const;
void SetContentHash(size_t contentHash);
// Returns the cache key for the object only, i.e. without device/adapter information.
const CacheKey& GetCacheKey() const;
protected:
// Protected accessor for derived classes to access and modify the key.
CacheKey* GetCacheKey();
private:
friend class DeviceBase;
void SetIsCachedReference();
bool mIsCachedReference = false;
// Called by ObjectContentHasher upon creation to record the object.
virtual size_t ComputeContentHash() = 0;
size_t mContentHash = 0;
bool mIsContentHashInitialized = false;
CacheKey mCacheKey;
}; };
size_t GetContentHash() const;
void SetContentHash(size_t contentHash);
// Returns the cache key for the object only, i.e. without device/adapter information.
const CacheKey& GetCacheKey() const;
protected:
// Protected accessor for derived classes to access and modify the key.
CacheKey* GetCacheKey();
private:
friend class DeviceBase;
void SetIsCachedReference();
bool mIsCachedReference = false;
// Called by ObjectContentHasher upon creation to record the object.
virtual size_t ComputeContentHash() = 0;
size_t mContentHash = 0;
bool mIsContentHashInitialized = false;
CacheKey mCacheKey;
};
} // namespace dawn::native } // namespace dawn::native
#endif // SRC_DAWN_NATIVE_CACHEDOBJECT_H_ #endif // SRC_DAWN_NATIVE_CACHEDOBJECT_H_

View File

@ -18,22 +18,22 @@
namespace dawn::native { namespace dawn::native {
bool CallbackTaskManager::IsEmpty() { bool CallbackTaskManager::IsEmpty() {
std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex); std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
return mCallbackTaskQueue.empty(); return mCallbackTaskQueue.empty();
} }
std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() { std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex); std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
std::vector<std::unique_ptr<CallbackTask>> allTasks; std::vector<std::unique_ptr<CallbackTask>> allTasks;
allTasks.swap(mCallbackTaskQueue); allTasks.swap(mCallbackTaskQueue);
return allTasks; return allTasks;
} }
void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) { void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex); std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
mCallbackTaskQueue.push_back(std::move(callbackTask)); mCallbackTaskQueue.push_back(std::move(callbackTask));
} }
} // namespace dawn::native } // namespace dawn::native

View File

@ -21,24 +21,24 @@
namespace dawn::native { namespace dawn::native {
struct CallbackTask { struct CallbackTask {
public: public:
virtual ~CallbackTask() = default; virtual ~CallbackTask() = default;
virtual void Finish() = 0; virtual void Finish() = 0;
virtual void HandleShutDown() = 0; virtual void HandleShutDown() = 0;
virtual void HandleDeviceLoss() = 0; virtual void HandleDeviceLoss() = 0;
}; };
class CallbackTaskManager { class CallbackTaskManager {
public: public:
void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask); void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask);
bool IsEmpty(); bool IsEmpty();
std::vector<std::unique_ptr<CallbackTask>> AcquireCallbackTasks(); std::vector<std::unique_ptr<CallbackTask>> AcquireCallbackTasks();
private: private:
std::mutex mCallbackTaskQueueMutex; std::mutex mCallbackTaskQueueMutex;
std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue; std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue;
}; };
} // namespace dawn::native } // namespace dawn::native

View File

@ -24,205 +24,203 @@
namespace dawn::native { namespace dawn::native {
// TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator // TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
CommandIterator::CommandIterator() { CommandIterator::CommandIterator() {
Reset(); Reset();
}
CommandIterator::~CommandIterator() {
ASSERT(IsEmpty());
}
CommandIterator::CommandIterator(CommandIterator&& other) {
if (!other.IsEmpty()) {
mBlocks = std::move(other.mBlocks);
other.Reset();
} }
Reset();
}
CommandIterator::~CommandIterator() { CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
ASSERT(IsEmpty()); ASSERT(IsEmpty());
if (!other.IsEmpty()) {
mBlocks = std::move(other.mBlocks);
other.Reset();
} }
Reset();
return *this;
}
CommandIterator::CommandIterator(CommandIterator&& other) { CommandIterator::CommandIterator(CommandAllocator allocator) : mBlocks(allocator.AcquireBlocks()) {
if (!other.IsEmpty()) { Reset();
mBlocks = std::move(other.mBlocks); }
other.Reset();
}
Reset();
}
CommandIterator& CommandIterator::operator=(CommandIterator&& other) { void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
ASSERT(IsEmpty()); ASSERT(IsEmpty());
if (!other.IsEmpty()) { mBlocks.clear();
mBlocks = std::move(other.mBlocks); for (CommandAllocator& allocator : allocators) {
other.Reset(); CommandBlocks blocks = allocator.AcquireBlocks();
} if (!blocks.empty()) {
Reset(); mBlocks.reserve(mBlocks.size() + blocks.size());
return *this; for (BlockDef& block : blocks) {
} mBlocks.push_back(std::move(block));
CommandIterator::CommandIterator(CommandAllocator allocator)
: mBlocks(allocator.AcquireBlocks()) {
Reset();
}
void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
ASSERT(IsEmpty());
mBlocks.clear();
for (CommandAllocator& allocator : allocators) {
CommandBlocks blocks = allocator.AcquireBlocks();
if (!blocks.empty()) {
mBlocks.reserve(mBlocks.size() + blocks.size());
for (BlockDef& block : blocks) {
mBlocks.push_back(std::move(block));
}
} }
} }
}
Reset();
}
bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
mCurrentBlock++;
if (mCurrentBlock >= mBlocks.size()) {
Reset(); Reset();
*commandId = detail::kEndOfBlock;
return false;
}
mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
return NextCommandId(commandId);
}
void CommandIterator::Reset() {
mCurrentBlock = 0;
if (mBlocks.empty()) {
// This will case the first NextCommandId call to try to move to the next block and stop
// the iteration immediately, without special casing the initialization.
mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock);
mBlocks.emplace_back();
mBlocks[0].size = sizeof(mEndOfBlock);
mBlocks[0].block = mCurrentPtr;
} else {
mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t));
}
}
void CommandIterator::MakeEmptyAsDataWasDestroyed() {
if (IsEmpty()) {
return;
} }
bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) { for (BlockDef& block : mBlocks) {
mCurrentBlock++; free(block.block);
if (mCurrentBlock >= mBlocks.size()) {
Reset();
*commandId = detail::kEndOfBlock;
return false;
}
mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
return NextCommandId(commandId);
} }
mBlocks.clear();
Reset();
ASSERT(IsEmpty());
}
void CommandIterator::Reset() { bool CommandIterator::IsEmpty() const {
mCurrentBlock = 0; return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
}
if (mBlocks.empty()) { // Potential TODO(crbug.com/dawn/835):
// This will case the first NextCommandId call to try to move to the next block and stop // - Host the size and pointer to next block in the block itself to avoid having an allocation
// the iteration immediately, without special casing the initialization. // in the vector
mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock); // - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
mBlocks.emplace_back(); // in Allocate
mBlocks[0].size = sizeof(mEndOfBlock); // - Be able to optimize allocation to one block, for command buffers expected to live long to
mBlocks[0].block = mCurrentPtr; // avoid cache misses
} else { // - Better block allocation, maybe have Dawn API to say command buffer is going to have size
mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t)); // close to another
}
}
void CommandIterator::MakeEmptyAsDataWasDestroyed() { CommandAllocator::CommandAllocator() {
if (IsEmpty()) { ResetPointers();
return; }
}
for (BlockDef& block : mBlocks) { CommandAllocator::~CommandAllocator() {
free(block.block); Reset();
} }
mBlocks.clear();
Reset();
ASSERT(IsEmpty());
}
bool CommandIterator::IsEmpty() const { CommandAllocator::CommandAllocator(CommandAllocator&& other)
return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock); : mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) {
} other.mBlocks.clear();
if (!other.IsEmpty()) {
// Potential TODO(crbug.com/dawn/835): mCurrentPtr = other.mCurrentPtr;
// - Host the size and pointer to next block in the block itself to avoid having an allocation mEndPtr = other.mEndPtr;
// in the vector } else {
// - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
// in Allocate
// - Be able to optimize allocation to one block, for command buffers expected to live long to
// avoid cache misses
// - Better block allocation, maybe have Dawn API to say command buffer is going to have size
// close to another
CommandAllocator::CommandAllocator() {
ResetPointers(); ResetPointers();
} }
other.Reset();
}
CommandAllocator::~CommandAllocator() { CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
Reset(); Reset();
if (!other.IsEmpty()) {
std::swap(mBlocks, other.mBlocks);
mLastAllocationSize = other.mLastAllocationSize;
mCurrentPtr = other.mCurrentPtr;
mEndPtr = other.mEndPtr;
}
other.Reset();
return *this;
}
void CommandAllocator::Reset() {
for (BlockDef& block : mBlocks) {
free(block.block);
}
mBlocks.clear();
mLastAllocationSize = kDefaultBaseAllocationSize;
ResetPointers();
}
bool CommandAllocator::IsEmpty() const {
return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mPlaceholderEnum[0]);
}
CommandBlocks&& CommandAllocator::AcquireBlocks() {
ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
*reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
mCurrentPtr = nullptr;
mEndPtr = nullptr;
return std::move(mBlocks);
}
uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
size_t commandSize,
size_t commandAlignment) {
// When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
// to move to the next one. kEndOfBlock on the last block means the end of the commands.
uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
*idAlloc = detail::kEndOfBlock;
// We'll request a block that can contain at least the command ID, the command and an
// additional ID to contain the kEndOfBlock tag.
size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
// The computation of the request could overflow.
if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
return nullptr;
} }
CommandAllocator::CommandAllocator(CommandAllocator&& other) if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
: mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) { return nullptr;
other.mBlocks.clear(); }
if (!other.IsEmpty()) { return Allocate(commandId, commandSize, commandAlignment);
mCurrentPtr = other.mCurrentPtr; }
mEndPtr = other.mEndPtr;
} else { bool CommandAllocator::GetNewBlock(size_t minimumSize) {
ResetPointers(); // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
} mLastAllocationSize = std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
other.Reset();
uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
if (DAWN_UNLIKELY(block == nullptr)) {
return false;
} }
CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) { mBlocks.push_back({mLastAllocationSize, block});
Reset(); mCurrentPtr = AlignPtr(block, alignof(uint32_t));
if (!other.IsEmpty()) { mEndPtr = block + mLastAllocationSize;
std::swap(mBlocks, other.mBlocks); return true;
mLastAllocationSize = other.mLastAllocationSize; }
mCurrentPtr = other.mCurrentPtr;
mEndPtr = other.mEndPtr;
}
other.Reset();
return *this;
}
void CommandAllocator::Reset() { void CommandAllocator::ResetPointers() {
for (BlockDef& block : mBlocks) { mCurrentPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[0]);
free(block.block); mEndPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[1]);
} }
mBlocks.clear();
mLastAllocationSize = kDefaultBaseAllocationSize;
ResetPointers();
}
bool CommandAllocator::IsEmpty() const {
return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mPlaceholderEnum[0]);
}
CommandBlocks&& CommandAllocator::AcquireBlocks() {
ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
*reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
mCurrentPtr = nullptr;
mEndPtr = nullptr;
return std::move(mBlocks);
}
uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
size_t commandSize,
size_t commandAlignment) {
// When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
// to move to the next one. kEndOfBlock on the last block means the end of the commands.
uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
*idAlloc = detail::kEndOfBlock;
// We'll request a block that can contain at least the command ID, the command and an
// additional ID to contain the kEndOfBlock tag.
size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
// The computation of the request could overflow.
if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
return nullptr;
}
if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
return nullptr;
}
return Allocate(commandId, commandSize, commandAlignment);
}
bool CommandAllocator::GetNewBlock(size_t minimumSize) {
// Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
mLastAllocationSize =
std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
if (DAWN_UNLIKELY(block == nullptr)) {
return false;
}
mBlocks.push_back({mLastAllocationSize, block});
mCurrentPtr = AlignPtr(block, alignof(uint32_t));
mEndPtr = block + mLastAllocationSize;
return true;
}
void CommandAllocator::ResetPointers() {
mCurrentPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[0]);
mEndPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[1]);
}
} // namespace dawn::native } // namespace dawn::native

View File

@ -26,248 +26,246 @@
namespace dawn::native { namespace dawn::native {
// Allocation for command buffers should be fast. To avoid doing an allocation per command // Allocation for command buffers should be fast. To avoid doing an allocation per command
// or to avoid copying commands when reallocing, we use a linear allocator in a growing set // or to avoid copying commands when reallocing, we use a linear allocator in a growing set
// of large memory blocks. We also use this to have the format to be (u32 commandId, command), // of large memory blocks. We also use this to have the format to be (u32 commandId, command),
// so that iteration over the commands is easy. // so that iteration over the commands is easy.
// Usage of the allocator and iterator: // Usage of the allocator and iterator:
// CommandAllocator allocator; // CommandAllocator allocator;
// DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw); // DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
// // Fill command // // Fill command
// // Repeat allocation and filling commands // // Repeat allocation and filling commands
// //
// CommandIterator commands(allocator); // CommandIterator commands(allocator);
// CommandType type; // CommandType type;
// while(commands.NextCommandId(&type)) { // while(commands.NextCommandId(&type)) {
// switch(type) { // switch(type) {
// case CommandType::Draw: // case CommandType::Draw:
// DrawCommand* draw = commands.NextCommand<DrawCommand>(); // DrawCommand* draw = commands.NextCommand<DrawCommand>();
// // Do the draw // // Do the draw
// break; // break;
// // other cases // // other cases
// } // }
// } // }
// Note that you need to extract the commands from the CommandAllocator before destroying it // Note that you need to extract the commands from the CommandAllocator before destroying it
// and must tell the CommandIterator when the allocated commands have been processed for // and must tell the CommandIterator when the allocated commands have been processed for
// deletion. // deletion.
// These are the lists of blocks, should not be used directly, only through CommandAllocator // These are the lists of blocks, should not be used directly, only through CommandAllocator
// and CommandIterator // and CommandIterator
struct BlockDef { struct BlockDef {
size_t size; size_t size;
uint8_t* block; uint8_t* block;
}; };
using CommandBlocks = std::vector<BlockDef>; using CommandBlocks = std::vector<BlockDef>;
namespace detail { namespace detail {
constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max(); constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1; constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
} // namespace detail } // namespace detail
class CommandAllocator; class CommandAllocator;
class CommandIterator : public NonCopyable { class CommandIterator : public NonCopyable {
public: public:
CommandIterator(); CommandIterator();
~CommandIterator(); ~CommandIterator();
CommandIterator(CommandIterator&& other); CommandIterator(CommandIterator&& other);
CommandIterator& operator=(CommandIterator&& other); CommandIterator& operator=(CommandIterator&& other);
// Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator. // Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator.
explicit CommandIterator(CommandAllocator allocator); explicit CommandIterator(CommandAllocator allocator);
void AcquireCommandBlocks(std::vector<CommandAllocator> allocators); void AcquireCommandBlocks(std::vector<CommandAllocator> allocators);
template <typename E> template <typename E>
bool NextCommandId(E* commandId) { bool NextCommandId(E* commandId) {
return NextCommandId(reinterpret_cast<uint32_t*>(commandId)); return NextCommandId(reinterpret_cast<uint32_t*>(commandId));
}
template <typename T>
T* NextCommand() {
return static_cast<T*>(NextCommand(sizeof(T), alignof(T)));
}
template <typename T>
T* NextData(size_t count) {
return static_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
}
// Sets iterator to the beginning of the commands without emptying the list. This method can
// be used if iteration was stopped early and the iterator needs to be restarted.
void Reset();
// This method must to be called after commands have been deleted. This indicates that the
// commands have been submitted and they are no longer valid.
void MakeEmptyAsDataWasDestroyed();
private:
bool IsEmpty() const;
DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
ASSERT(idPtr + sizeof(uint32_t) <=
mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
if (id != detail::kEndOfBlock) {
mCurrentPtr = idPtr + sizeof(uint32_t);
*commandId = id;
return true;
} }
template <typename T> return NextCommandIdInNewBlock(commandId);
T* NextCommand() { }
return static_cast<T*>(NextCommand(sizeof(T), alignof(T)));
bool NextCommandIdInNewBlock(uint32_t* commandId);
DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) {
uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
ASSERT(commandPtr + sizeof(commandSize) <=
mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
mCurrentPtr = commandPtr + commandSize;
return commandPtr;
}
DAWN_FORCE_INLINE void* NextData(size_t dataSize, size_t dataAlignment) {
uint32_t id;
bool hasId = NextCommandId(&id);
ASSERT(hasId);
ASSERT(id == detail::kAdditionalData);
return NextCommand(dataSize, dataAlignment);
}
CommandBlocks mBlocks;
uint8_t* mCurrentPtr = nullptr;
size_t mCurrentBlock = 0;
// Used to avoid a special case for empty iterators.
uint32_t mEndOfBlock = detail::kEndOfBlock;
};
class CommandAllocator : public NonCopyable {
public:
CommandAllocator();
~CommandAllocator();
// NOTE: A moved-from CommandAllocator is reset to its initial empty state.
CommandAllocator(CommandAllocator&&);
CommandAllocator& operator=(CommandAllocator&&);
// Frees all blocks held by the allocator and restores it to its initial empty state.
void Reset();
bool IsEmpty() const;
template <typename T, typename E>
T* Allocate(E commandId) {
static_assert(sizeof(E) == sizeof(uint32_t));
static_assert(alignof(E) == alignof(uint32_t));
static_assert(alignof(T) <= kMaxSupportedAlignment);
T* result =
reinterpret_cast<T*>(Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
if (!result) {
return nullptr;
} }
template <typename T> new (result) T;
T* NextData(size_t count) { return result;
return static_cast<T*>(NextData(sizeof(T) * count, alignof(T))); }
template <typename T>
T* AllocateData(size_t count) {
static_assert(alignof(T) <= kMaxSupportedAlignment);
T* result = reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
if (!result) {
return nullptr;
} }
for (size_t i = 0; i < count; i++) {
// Sets iterator to the beginning of the commands without emptying the list. This method can new (result + i) T;
// be used if iteration was stopped early and the iterator needs to be restarted.
void Reset();
// This method must to be called after commands have been deleted. This indicates that the
// commands have been submitted and they are no longer valid.
void MakeEmptyAsDataWasDestroyed();
private:
bool IsEmpty() const;
DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
ASSERT(idPtr + sizeof(uint32_t) <=
mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
if (id != detail::kEndOfBlock) {
mCurrentPtr = idPtr + sizeof(uint32_t);
*commandId = id;
return true;
}
return NextCommandIdInNewBlock(commandId);
} }
return result;
}
bool NextCommandIdInNewBlock(uint32_t* commandId); private:
// This is used for some internal computations and can be any power of two as long as code
// using the CommandAllocator passes the static_asserts.
static constexpr size_t kMaxSupportedAlignment = 8;
DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) { // To avoid checking for overflows at every step of the computations we compute an upper
uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment); // bound of the space that will be needed in addition to the command data.
ASSERT(commandPtr + sizeof(commandSize) <= static constexpr size_t kWorstCaseAdditionalSize =
mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size); sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
mCurrentPtr = commandPtr + commandSize; // The default value of mLastAllocationSize.
return commandPtr; static constexpr size_t kDefaultBaseAllocationSize = 2048;
friend CommandIterator;
CommandBlocks&& AcquireBlocks();
DAWN_FORCE_INLINE uint8_t* Allocate(uint32_t commandId,
size_t commandSize,
size_t commandAlignment) {
ASSERT(mCurrentPtr != nullptr);
ASSERT(mEndPtr != nullptr);
ASSERT(commandId != detail::kEndOfBlock);
// It should always be possible to allocate one id, for kEndOfBlock tagging,
ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
ASSERT(mEndPtr >= mCurrentPtr);
ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
// The memory after the ID will contain the following:
// - the current ID
// - padding to align the command, maximum kMaxSupportedAlignment
// - the command of size commandSize
// - padding to align the next ID, maximum alignof(uint32_t)
// - the next ID of size sizeof(uint32_t)
// This can't overflow because by construction mCurrentPtr always has space for the next
// ID.
size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
// The good case were we have enough space for the command data and upper bound of the
// extra required space.
if ((remainingSize >= kWorstCaseAdditionalSize) &&
(remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
*idAlloc = commandId;
uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
return commandAlloc;
} }
return AllocateInNewBlock(commandId, commandSize, commandAlignment);
}
DAWN_FORCE_INLINE void* NextData(size_t dataSize, size_t dataAlignment) { uint8_t* AllocateInNewBlock(uint32_t commandId, size_t commandSize, size_t commandAlignment);
uint32_t id;
bool hasId = NextCommandId(&id);
ASSERT(hasId);
ASSERT(id == detail::kAdditionalData);
return NextCommand(dataSize, dataAlignment); DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
} return Allocate(detail::kAdditionalData, commandSize, commandAlignment);
}
CommandBlocks mBlocks; bool GetNewBlock(size_t minimumSize);
uint8_t* mCurrentPtr = nullptr;
size_t mCurrentBlock = 0;
// Used to avoid a special case for empty iterators.
uint32_t mEndOfBlock = detail::kEndOfBlock;
};
class CommandAllocator : public NonCopyable { void ResetPointers();
public:
CommandAllocator();
~CommandAllocator();
// NOTE: A moved-from CommandAllocator is reset to its initial empty state. CommandBlocks mBlocks;
CommandAllocator(CommandAllocator&&); size_t mLastAllocationSize = kDefaultBaseAllocationSize;
CommandAllocator& operator=(CommandAllocator&&);
// Frees all blocks held by the allocator and restores it to its initial empty state. // Data used for the block range at initialization so that the first call to Allocate sees
void Reset(); // there is not enough space and calls GetNewBlock. This avoids having to special case the
// initialization in Allocate.
uint32_t mPlaceholderEnum[1] = {0};
bool IsEmpty() const; // Pointers to the current range of allocation in the block. Guaranteed to allow for at
// least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
template <typename T, typename E> // be written. Nullptr iff the blocks were moved out.
T* Allocate(E commandId) { uint8_t* mCurrentPtr = nullptr;
static_assert(sizeof(E) == sizeof(uint32_t)); uint8_t* mEndPtr = nullptr;
static_assert(alignof(E) == alignof(uint32_t)); };
static_assert(alignof(T) <= kMaxSupportedAlignment);
T* result = reinterpret_cast<T*>(
Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
if (!result) {
return nullptr;
}
new (result) T;
return result;
}
template <typename T>
T* AllocateData(size_t count) {
static_assert(alignof(T) <= kMaxSupportedAlignment);
T* result = reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
if (!result) {
return nullptr;
}
for (size_t i = 0; i < count; i++) {
new (result + i) T;
}
return result;
}
private:
// This is used for some internal computations and can be any power of two as long as code
// using the CommandAllocator passes the static_asserts.
static constexpr size_t kMaxSupportedAlignment = 8;
// To avoid checking for overflows at every step of the computations we compute an upper
// bound of the space that will be needed in addition to the command data.
static constexpr size_t kWorstCaseAdditionalSize =
sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
// The default value of mLastAllocationSize.
static constexpr size_t kDefaultBaseAllocationSize = 2048;
friend CommandIterator;
CommandBlocks&& AcquireBlocks();
DAWN_FORCE_INLINE uint8_t* Allocate(uint32_t commandId,
size_t commandSize,
size_t commandAlignment) {
ASSERT(mCurrentPtr != nullptr);
ASSERT(mEndPtr != nullptr);
ASSERT(commandId != detail::kEndOfBlock);
// It should always be possible to allocate one id, for kEndOfBlock tagging,
ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
ASSERT(mEndPtr >= mCurrentPtr);
ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
// The memory after the ID will contain the following:
// - the current ID
// - padding to align the command, maximum kMaxSupportedAlignment
// - the command of size commandSize
// - padding to align the next ID, maximum alignof(uint32_t)
// - the next ID of size sizeof(uint32_t)
// This can't overflow because by construction mCurrentPtr always has space for the next
// ID.
size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
// The good case were we have enough space for the command data and upper bound of the
// extra required space.
if ((remainingSize >= kWorstCaseAdditionalSize) &&
(remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
*idAlloc = commandId;
uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
return commandAlloc;
}
return AllocateInNewBlock(commandId, commandSize, commandAlignment);
}
uint8_t* AllocateInNewBlock(uint32_t commandId,
size_t commandSize,
size_t commandAlignment);
DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
return Allocate(detail::kAdditionalData, commandSize, commandAlignment);
}
bool GetNewBlock(size_t minimumSize);
void ResetPointers();
CommandBlocks mBlocks;
size_t mLastAllocationSize = kDefaultBaseAllocationSize;
// Data used for the block range at initialization so that the first call to Allocate sees
// there is not enough space and calls GetNewBlock. This avoids having to special case the
// initialization in Allocate.
uint32_t mPlaceholderEnum[1] = {0};
// Pointers to the current range of allocation in the block. Guaranteed to allow for at
// least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
// be written. Nullptr iff the blocks were moved out.
uint8_t* mCurrentPtr = nullptr;
uint8_t* mEndPtr = nullptr;
};
} // namespace dawn::native } // namespace dawn::native

View File

@ -25,225 +25,221 @@
namespace dawn::native { namespace dawn::native {
CommandBufferBase::CommandBufferBase(CommandEncoder* encoder, CommandBufferBase::CommandBufferBase(CommandEncoder* encoder,
const CommandBufferDescriptor* descriptor) const CommandBufferDescriptor* descriptor)
: ApiObjectBase(encoder->GetDevice(), descriptor->label), : ApiObjectBase(encoder->GetDevice(), descriptor->label),
mCommands(encoder->AcquireCommands()), mCommands(encoder->AcquireCommands()),
mResourceUsages(encoder->AcquireResourceUsages()) { mResourceUsages(encoder->AcquireResourceUsages()) {
TrackInDevice(); TrackInDevice();
}
CommandBufferBase::CommandBufferBase(DeviceBase* device)
: ApiObjectBase(device, kLabelNotImplemented) {
TrackInDevice();
}
CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
: ApiObjectBase(device, tag) {}
// static
CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
return new CommandBufferBase(device, ObjectBase::kError);
}
ObjectType CommandBufferBase::GetType() const {
return ObjectType::CommandBuffer;
}
MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const {
ASSERT(!IsError());
DAWN_INVALID_IF(!IsAlive(), "%s cannot be submitted more than once.", this);
return {};
}
void CommandBufferBase::DestroyImpl() {
FreeCommands(&mCommands);
mResourceUsages = {};
}
const CommandBufferResourceUsage& CommandBufferBase::GetResourceUsages() const {
return mResourceUsages;
}
CommandIterator* CommandBufferBase::GetCommandIteratorForTesting() {
return &mCommands;
}
bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
const Extent3D copySize,
const uint32_t mipLevel) {
Extent3D extent = texture->GetMipLevelPhysicalSize(mipLevel);
switch (texture->GetDimension()) {
case wgpu::TextureDimension::e1D:
return extent.width == copySize.width;
case wgpu::TextureDimension::e2D:
return extent.width == copySize.width && extent.height == copySize.height;
case wgpu::TextureDimension::e3D:
return extent.width == copySize.width && extent.height == copySize.height &&
extent.depthOrArrayLayers == copySize.depthOrArrayLayers;
} }
CommandBufferBase::CommandBufferBase(DeviceBase* device) UNREACHABLE();
: ApiObjectBase(device, kLabelNotImplemented) { }
TrackInDevice();
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, const Extent3D& copySize) {
switch (copy.texture->GetDimension()) {
case wgpu::TextureDimension::e1D:
ASSERT(copy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
ASSERT(copy.mipLevel == 0);
return {copy.aspect, {0, 1}, {0, 1}};
case wgpu::TextureDimension::e2D:
return {copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
case wgpu::TextureDimension::e3D:
return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
} }
CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag) UNREACHABLE();
: ApiObjectBase(device, tag) { }
}
// static void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) {
CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) { for (ColorAttachmentIndex i :
return new CommandBufferBase(device, ObjectBase::kError); IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
} auto& attachmentInfo = renderPass->colorAttachments[i];
TextureViewBase* view = attachmentInfo.view.Get();
bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
ObjectType CommandBufferBase::GetType() const { ASSERT(view->GetLayerCount() == 1);
return ObjectType::CommandBuffer; ASSERT(view->GetLevelCount() == 1);
} SubresourceRange range = view->GetSubresourceRange();
MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const { // If the loadOp is Load, but the subresource is not initialized, use Clear instead.
ASSERT(!IsError()); if (attachmentInfo.loadOp == wgpu::LoadOp::Load &&
!view->GetTexture()->IsSubresourceContentInitialized(range)) {
DAWN_INVALID_IF(!IsAlive(), "%s cannot be submitted more than once.", this); attachmentInfo.loadOp = wgpu::LoadOp::Clear;
return {}; attachmentInfo.clearColor = {0.f, 0.f, 0.f, 0.f};
}
void CommandBufferBase::DestroyImpl() {
FreeCommands(&mCommands);
mResourceUsages = {};
}
const CommandBufferResourceUsage& CommandBufferBase::GetResourceUsages() const {
return mResourceUsages;
}
CommandIterator* CommandBufferBase::GetCommandIteratorForTesting() {
return &mCommands;
}
bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
const Extent3D copySize,
const uint32_t mipLevel) {
Extent3D extent = texture->GetMipLevelPhysicalSize(mipLevel);
switch (texture->GetDimension()) {
case wgpu::TextureDimension::e1D:
return extent.width == copySize.width;
case wgpu::TextureDimension::e2D:
return extent.width == copySize.width && extent.height == copySize.height;
case wgpu::TextureDimension::e3D:
return extent.width == copySize.width && extent.height == copySize.height &&
extent.depthOrArrayLayers == copySize.depthOrArrayLayers;
} }
UNREACHABLE(); if (hasResolveTarget) {
} // We need to set the resolve target to initialized so that it does not get
// cleared later in the pipeline. The texture will be resolved from the
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, // source color attachment, which will be correctly initialized.
const Extent3D& copySize) { TextureViewBase* resolveView = attachmentInfo.resolveTarget.Get();
switch (copy.texture->GetDimension()) { ASSERT(resolveView->GetLayerCount() == 1);
case wgpu::TextureDimension::e1D: ASSERT(resolveView->GetLevelCount() == 1);
ASSERT(copy.origin.z == 0 && copySize.depthOrArrayLayers == 1); resolveView->GetTexture()->SetIsSubresourceContentInitialized(
ASSERT(copy.mipLevel == 0); true, resolveView->GetSubresourceRange());
return {copy.aspect, {0, 1}, {0, 1}};
case wgpu::TextureDimension::e2D:
return {
copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
case wgpu::TextureDimension::e3D:
return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
} }
UNREACHABLE(); switch (attachmentInfo.storeOp) {
} case wgpu::StoreOp::Store:
view->GetTexture()->SetIsSubresourceContentInitialized(true, range);
break;
void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) { case wgpu::StoreOp::Discard:
for (ColorAttachmentIndex i : view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) { break;
auto& attachmentInfo = renderPass->colorAttachments[i];
TextureViewBase* view = attachmentInfo.view.Get();
bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
ASSERT(view->GetLayerCount() == 1); case wgpu::StoreOp::Undefined:
ASSERT(view->GetLevelCount() == 1); UNREACHABLE();
SubresourceRange range = view->GetSubresourceRange(); break;
// If the loadOp is Load, but the subresource is not initialized, use Clear instead.
if (attachmentInfo.loadOp == wgpu::LoadOp::Load &&
!view->GetTexture()->IsSubresourceContentInitialized(range)) {
attachmentInfo.loadOp = wgpu::LoadOp::Clear;
attachmentInfo.clearColor = {0.f, 0.f, 0.f, 0.f};
}
if (hasResolveTarget) {
// We need to set the resolve target to initialized so that it does not get
// cleared later in the pipeline. The texture will be resolved from the
// source color attachment, which will be correctly initialized.
TextureViewBase* resolveView = attachmentInfo.resolveTarget.Get();
ASSERT(resolveView->GetLayerCount() == 1);
ASSERT(resolveView->GetLevelCount() == 1);
resolveView->GetTexture()->SetIsSubresourceContentInitialized(
true, resolveView->GetSubresourceRange());
}
switch (attachmentInfo.storeOp) {
case wgpu::StoreOp::Store:
view->GetTexture()->SetIsSubresourceContentInitialized(true, range);
break;
case wgpu::StoreOp::Discard:
view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
break;
case wgpu::StoreOp::Undefined:
UNREACHABLE();
break;
}
}
if (renderPass->attachmentState->HasDepthStencilAttachment()) {
auto& attachmentInfo = renderPass->depthStencilAttachment;
TextureViewBase* view = attachmentInfo.view.Get();
ASSERT(view->GetLayerCount() == 1);
ASSERT(view->GetLevelCount() == 1);
SubresourceRange range = view->GetSubresourceRange();
SubresourceRange depthRange = range;
depthRange.aspects = range.aspects & Aspect::Depth;
SubresourceRange stencilRange = range;
stencilRange.aspects = range.aspects & Aspect::Stencil;
// If the depth stencil texture has not been initialized, we want to use loadop
// clear to init the contents to 0's
if (!view->GetTexture()->IsSubresourceContentInitialized(depthRange) &&
attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
attachmentInfo.clearDepth = 0.0f;
attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
}
if (!view->GetTexture()->IsSubresourceContentInitialized(stencilRange) &&
attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
attachmentInfo.clearStencil = 0u;
attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
}
view->GetTexture()->SetIsSubresourceContentInitialized(
attachmentInfo.depthStoreOp == wgpu::StoreOp::Store, depthRange);
view->GetTexture()->SetIsSubresourceContentInitialized(
attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store, stencilRange);
} }
} }
bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy) { if (renderPass->attachmentState->HasDepthStencilAttachment()) {
ASSERT(copy != nullptr); auto& attachmentInfo = renderPass->depthStencilAttachment;
TextureViewBase* view = attachmentInfo.view.Get();
ASSERT(view->GetLayerCount() == 1);
ASSERT(view->GetLevelCount() == 1);
SubresourceRange range = view->GetSubresourceRange();
if (copy->destination.offset > 0) { SubresourceRange depthRange = range;
// The copy doesn't touch the start of the buffer. depthRange.aspects = range.aspects & Aspect::Depth;
return false;
SubresourceRange stencilRange = range;
stencilRange.aspects = range.aspects & Aspect::Stencil;
// If the depth stencil texture has not been initialized, we want to use loadop
// clear to init the contents to 0's
if (!view->GetTexture()->IsSubresourceContentInitialized(depthRange) &&
attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
attachmentInfo.clearDepth = 0.0f;
attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
} }
const TextureBase* texture = copy->source.texture.Get(); if (!view->GetTexture()->IsSubresourceContentInitialized(stencilRange) &&
const TexelBlockInfo& blockInfo = attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
texture->GetFormat().GetAspectInfo(copy->source.aspect).block; attachmentInfo.clearStencil = 0u;
const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width; attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height;
const bool multiSlice = copy->copySize.depthOrArrayLayers > 1;
const bool multiRow = multiSlice || heightInBlocks > 1;
if (multiSlice && copy->destination.rowsPerImage > heightInBlocks) {
// There are gaps between slices that aren't overwritten
return false;
} }
const uint64_t copyTextureDataSizePerRow = widthInBlocks * blockInfo.byteSize; view->GetTexture()->SetIsSubresourceContentInitialized(
if (multiRow && copy->destination.bytesPerRow > copyTextureDataSizePerRow) { attachmentInfo.depthStoreOp == wgpu::StoreOp::Store, depthRange);
// There are gaps between rows that aren't overwritten
return false;
}
// After the above checks, we're sure the copy has no gaps. view->GetTexture()->SetIsSubresourceContentInitialized(
// Now, compute the total number of bytes written. attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store, stencilRange);
const uint64_t writtenBytes = }
ComputeRequiredBytesInCopy(blockInfo, copy->copySize, copy->destination.bytesPerRow, }
copy->destination.rowsPerImage)
.AcquireSuccess();
if (!copy->destination.buffer->IsFullBufferRange(copy->destination.offset, writtenBytes)) {
// The written bytes don't cover the whole buffer.
return false;
}
return true; bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy) {
ASSERT(copy != nullptr);
if (copy->destination.offset > 0) {
// The copy doesn't touch the start of the buffer.
return false;
} }
std::array<float, 4> ConvertToFloatColor(dawn::native::Color color) { const TextureBase* texture = copy->source.texture.Get();
const std::array<float, 4> outputValue = { const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
static_cast<float>(color.r), static_cast<float>(color.g), static_cast<float>(color.b), const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width;
static_cast<float>(color.a)}; const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height;
return outputValue; const bool multiSlice = copy->copySize.depthOrArrayLayers > 1;
} const bool multiRow = multiSlice || heightInBlocks > 1;
std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) {
const std::array<int32_t, 4> outputValue = { if (multiSlice && copy->destination.rowsPerImage > heightInBlocks) {
static_cast<int32_t>(color.r), static_cast<int32_t>(color.g), // There are gaps between slices that aren't overwritten
static_cast<int32_t>(color.b), static_cast<int32_t>(color.a)}; return false;
return outputValue;
} }
std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color) { const uint64_t copyTextureDataSizePerRow = widthInBlocks * blockInfo.byteSize;
const std::array<uint32_t, 4> outputValue = { if (multiRow && copy->destination.bytesPerRow > copyTextureDataSizePerRow) {
static_cast<uint32_t>(color.r), static_cast<uint32_t>(color.g), // There are gaps between rows that aren't overwritten
static_cast<uint32_t>(color.b), static_cast<uint32_t>(color.a)}; return false;
return outputValue;
} }
// After the above checks, we're sure the copy has no gaps.
// Now, compute the total number of bytes written.
const uint64_t writtenBytes =
ComputeRequiredBytesInCopy(blockInfo, copy->copySize, copy->destination.bytesPerRow,
copy->destination.rowsPerImage)
.AcquireSuccess();
if (!copy->destination.buffer->IsFullBufferRange(copy->destination.offset, writtenBytes)) {
// The written bytes don't cover the whole buffer.
return false;
}
return true;
}
std::array<float, 4> ConvertToFloatColor(dawn::native::Color color) {
const std::array<float, 4> outputValue = {
static_cast<float>(color.r), static_cast<float>(color.g), static_cast<float>(color.b),
static_cast<float>(color.a)};
return outputValue;
}
std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) {
const std::array<int32_t, 4> outputValue = {
static_cast<int32_t>(color.r), static_cast<int32_t>(color.g), static_cast<int32_t>(color.b),
static_cast<int32_t>(color.a)};
return outputValue;
}
std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color) {
const std::array<uint32_t, 4> outputValue = {
static_cast<uint32_t>(color.r), static_cast<uint32_t>(color.g),
static_cast<uint32_t>(color.b), static_cast<uint32_t>(color.a)};
return outputValue;
}
} // namespace dawn::native } // namespace dawn::native

View File

@ -26,50 +26,49 @@
namespace dawn::native { namespace dawn::native {
struct BeginRenderPassCmd; struct BeginRenderPassCmd;
struct CopyTextureToBufferCmd; struct CopyTextureToBufferCmd;
struct TextureCopy; struct TextureCopy;
class CommandBufferBase : public ApiObjectBase { class CommandBufferBase : public ApiObjectBase {
public: public:
CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor); CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
static CommandBufferBase* MakeError(DeviceBase* device); static CommandBufferBase* MakeError(DeviceBase* device);
ObjectType GetType() const override; ObjectType GetType() const override;
MaybeError ValidateCanUseInSubmitNow() const; MaybeError ValidateCanUseInSubmitNow() const;
const CommandBufferResourceUsage& GetResourceUsages() const; const CommandBufferResourceUsage& GetResourceUsages() const;
CommandIterator* GetCommandIteratorForTesting(); CommandIterator* GetCommandIteratorForTesting();
protected: protected:
// Constructor used only for mocking and testing. // Constructor used only for mocking and testing.
explicit CommandBufferBase(DeviceBase* device); explicit CommandBufferBase(DeviceBase* device);
void DestroyImpl() override; void DestroyImpl() override;
CommandIterator mCommands; CommandIterator mCommands;
private: private:
CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag); CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
CommandBufferResourceUsage mResourceUsages; CommandBufferResourceUsage mResourceUsages;
}; };
bool IsCompleteSubresourceCopiedTo(const TextureBase* texture, bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
const Extent3D copySize, const Extent3D copySize,
const uint32_t mipLevel); const uint32_t mipLevel);
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, const Extent3D& copySize);
const Extent3D& copySize);
void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass); void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy); bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy);
std::array<float, 4> ConvertToFloatColor(dawn::native::Color color); std::array<float, 4> ConvertToFloatColor(dawn::native::Color color);
std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color); std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color);
std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color); std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color);
} // namespace dawn::native } // namespace dawn::native

View File

@ -30,392 +30,385 @@
namespace dawn::native { namespace dawn::native {
namespace { namespace {
bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes, bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
const std::vector<uint64_t>& pipelineMinBufferSizes) { const std::vector<uint64_t>& pipelineMinBufferSizes) {
ASSERT(unverifiedBufferSizes.size() == pipelineMinBufferSizes.size()); ASSERT(unverifiedBufferSizes.size() == pipelineMinBufferSizes.size());
for (uint32_t i = 0; i < unverifiedBufferSizes.size(); ++i) { for (uint32_t i = 0; i < unverifiedBufferSizes.size(); ++i) {
if (unverifiedBufferSizes[i] < pipelineMinBufferSizes[i]) { if (unverifiedBufferSizes[i] < pipelineMinBufferSizes[i]) {
return false; return false;
}
}
return true;
} }
} // namespace
enum ValidationAspect {
VALIDATION_ASPECT_PIPELINE,
VALIDATION_ASPECT_BIND_GROUPS,
VALIDATION_ASPECT_VERTEX_BUFFERS,
VALIDATION_ASPECT_INDEX_BUFFER,
VALIDATION_ASPECT_COUNT
};
static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects);
static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS;
static constexpr CommandBufferStateTracker::ValidationAspects kDrawAspects =
1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
static constexpr CommandBufferStateTracker::ValidationAspects kDrawIndexedAspects =
1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
1 << VALIDATION_ASPECT_VERTEX_BUFFERS | 1 << VALIDATION_ASPECT_INDEX_BUFFER;
static constexpr CommandBufferStateTracker::ValidationAspects kLazyAspects =
1 << VALIDATION_ASPECT_BIND_GROUPS | 1 << VALIDATION_ASPECT_VERTEX_BUFFERS |
1 << VALIDATION_ASPECT_INDEX_BUFFER;
MaybeError CommandBufferStateTracker::ValidateCanDispatch() {
return ValidateOperation(kDispatchAspects);
} }
MaybeError CommandBufferStateTracker::ValidateCanDraw() { return true;
return ValidateOperation(kDrawAspects); }
} // namespace
enum ValidationAspect {
VALIDATION_ASPECT_PIPELINE,
VALIDATION_ASPECT_BIND_GROUPS,
VALIDATION_ASPECT_VERTEX_BUFFERS,
VALIDATION_ASPECT_INDEX_BUFFER,
VALIDATION_ASPECT_COUNT
};
static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects);
static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS;
static constexpr CommandBufferStateTracker::ValidationAspects kDrawAspects =
1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
static constexpr CommandBufferStateTracker::ValidationAspects kDrawIndexedAspects =
1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
1 << VALIDATION_ASPECT_VERTEX_BUFFERS | 1 << VALIDATION_ASPECT_INDEX_BUFFER;
static constexpr CommandBufferStateTracker::ValidationAspects kLazyAspects =
1 << VALIDATION_ASPECT_BIND_GROUPS | 1 << VALIDATION_ASPECT_VERTEX_BUFFERS |
1 << VALIDATION_ASPECT_INDEX_BUFFER;
MaybeError CommandBufferStateTracker::ValidateCanDispatch() {
return ValidateOperation(kDispatchAspects);
}
MaybeError CommandBufferStateTracker::ValidateCanDraw() {
return ValidateOperation(kDrawAspects);
}
MaybeError CommandBufferStateTracker::ValidateCanDrawIndexed() {
return ValidateOperation(kDrawIndexedAspects);
}
MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount,
uint32_t firstVertex) {
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& vertexBufferSlotsUsedAsVertexBuffer =
lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer();
for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
const VertexBufferInfo& vertexBuffer = lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
uint64_t arrayStride = vertexBuffer.arrayStride;
uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
if (arrayStride == 0) {
DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
"Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
"is smaller than the required size for all attributes (%u)",
bufferSize, static_cast<uint8_t>(usedSlotVertex),
vertexBuffer.usedBytesInStride);
} else {
uint64_t strideCount = static_cast<uint64_t>(firstVertex) + vertexCount;
if (strideCount != 0u) {
uint64_t requiredSize = (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
// firstVertex and vertexCount are in uint32_t,
// arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
// currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
// sizeof(attribute.format)) with attribute.offset being no larger than
// kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
// overflows.
DAWN_INVALID_IF(
requiredSize > bufferSize,
"Vertex range (first: %u, count: %u) requires a larger buffer (%u) than "
"the "
"bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
firstVertex, vertexCount, requiredSize, bufferSize,
static_cast<uint8_t>(usedSlotVertex), arrayStride);
}
}
} }
MaybeError CommandBufferStateTracker::ValidateCanDrawIndexed() { return {};
return ValidateOperation(kDrawIndexedAspects); }
MaybeError CommandBufferStateTracker::ValidateBufferInRangeForInstanceBuffer(
uint32_t instanceCount,
uint32_t firstInstance) {
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& vertexBufferSlotsUsedAsInstanceBuffer =
lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
const VertexBufferInfo& vertexBuffer =
lastRenderPipeline->GetVertexBuffer(usedSlotInstance);
uint64_t arrayStride = vertexBuffer.arrayStride;
uint64_t bufferSize = mVertexBufferSizes[usedSlotInstance];
if (arrayStride == 0) {
DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
"Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
"is smaller than the required size for all attributes (%u)",
bufferSize, static_cast<uint8_t>(usedSlotInstance),
vertexBuffer.usedBytesInStride);
} else {
uint64_t strideCount = static_cast<uint64_t>(firstInstance) + instanceCount;
if (strideCount != 0u) {
uint64_t requiredSize = (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
// firstInstance and instanceCount are in uint32_t,
// arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
// currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
// sizeof(attribute.format)) with attribute.offset being no larger than
// kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
// overflows.
DAWN_INVALID_IF(
requiredSize > bufferSize,
"Instance range (first: %u, count: %u) requires a larger buffer (%u) than "
"the "
"bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
firstInstance, instanceCount, requiredSize, bufferSize,
static_cast<uint8_t>(usedSlotInstance), arrayStride);
}
}
} }
MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer( return {};
uint32_t vertexCount, }
uint32_t firstVertex) {
MaybeError CommandBufferStateTracker::ValidateIndexBufferInRange(uint32_t indexCount,
uint32_t firstIndex) {
// Validate the range of index buffer
// firstIndex and indexCount are in uint32_t, while IndexFormatSize is 2 (for
// wgpu::IndexFormat::Uint16) or 4 (for wgpu::IndexFormat::Uint32), so by doing checks in
// uint64_t we avoid overflows.
DAWN_INVALID_IF(
(static_cast<uint64_t>(firstIndex) + indexCount) * IndexFormatSize(mIndexFormat) >
mIndexBufferSize,
"Index range (first: %u, count: %u, format: %s) does not fit in index buffer size "
"(%u).",
firstIndex, indexCount, mIndexFormat, mIndexBufferSize);
return {};
}
MaybeError CommandBufferStateTracker::ValidateOperation(ValidationAspects requiredAspects) {
// Fast return-true path if everything is good
ValidationAspects missingAspects = requiredAspects & ~mAspects;
if (missingAspects.none()) {
return {};
}
// Generate an error immediately if a non-lazy aspect is missing as computing lazy aspects
// requires the pipeline to be set.
DAWN_TRY(CheckMissingAspects(missingAspects & ~kLazyAspects));
RecomputeLazyAspects(missingAspects);
DAWN_TRY(CheckMissingAspects(requiredAspects & ~mAspects));
return {};
}
void CommandBufferStateTracker::RecomputeLazyAspects(ValidationAspects aspects) {
ASSERT(mAspects[VALIDATION_ASPECT_PIPELINE]);
ASSERT((aspects & ~kLazyAspects).none());
if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
bool matches = true;
for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
if (mBindgroups[i] == nullptr ||
mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout() ||
!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
(*mMinBufferSizes)[i])) {
matches = false;
break;
}
}
if (matches) {
mAspects.set(VALIDATION_ASPECT_BIND_GROUPS);
}
}
if (aspects[VALIDATION_ASPECT_VERTEX_BUFFERS]) {
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline(); RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& requiredVertexBuffers =
vertexBufferSlotsUsedAsVertexBuffer = lastRenderPipeline->GetVertexBufferSlotsUsed();
lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer(); if (IsSubset(requiredVertexBuffers, mVertexBufferSlotsUsed)) {
mAspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
const VertexBufferInfo& vertexBuffer =
lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
uint64_t arrayStride = vertexBuffer.arrayStride;
uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
if (arrayStride == 0) {
DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
"Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
"is smaller than the required size for all attributes (%u)",
bufferSize, static_cast<uint8_t>(usedSlotVertex),
vertexBuffer.usedBytesInStride);
} else {
uint64_t strideCount = static_cast<uint64_t>(firstVertex) + vertexCount;
if (strideCount != 0u) {
uint64_t requiredSize =
(strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
// firstVertex and vertexCount are in uint32_t,
// arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
// currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
// sizeof(attribute.format)) with attribute.offset being no larger than
// kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
// overflows.
DAWN_INVALID_IF(
requiredSize > bufferSize,
"Vertex range (first: %u, count: %u) requires a larger buffer (%u) than "
"the "
"bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
firstVertex, vertexCount, requiredSize, bufferSize,
static_cast<uint8_t>(usedSlotVertex), arrayStride);
}
}
} }
return {};
} }
MaybeError CommandBufferStateTracker::ValidateBufferInRangeForInstanceBuffer( if (aspects[VALIDATION_ASPECT_INDEX_BUFFER] && mIndexBufferSet) {
uint32_t instanceCount,
uint32_t firstInstance) {
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline(); RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
if (!IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology()) ||
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& mIndexFormat == lastRenderPipeline->GetStripIndexFormat()) {
vertexBufferSlotsUsedAsInstanceBuffer = mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
const VertexBufferInfo& vertexBuffer =
lastRenderPipeline->GetVertexBuffer(usedSlotInstance);
uint64_t arrayStride = vertexBuffer.arrayStride;
uint64_t bufferSize = mVertexBufferSizes[usedSlotInstance];
if (arrayStride == 0) {
DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
"Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
"is smaller than the required size for all attributes (%u)",
bufferSize, static_cast<uint8_t>(usedSlotInstance),
vertexBuffer.usedBytesInStride);
} else {
uint64_t strideCount = static_cast<uint64_t>(firstInstance) + instanceCount;
if (strideCount != 0u) {
uint64_t requiredSize =
(strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
// firstInstance and instanceCount are in uint32_t,
// arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
// currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
// sizeof(attribute.format)) with attribute.offset being no larger than
// kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
// overflows.
DAWN_INVALID_IF(
requiredSize > bufferSize,
"Instance range (first: %u, count: %u) requires a larger buffer (%u) than "
"the "
"bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
firstInstance, instanceCount, requiredSize, bufferSize,
static_cast<uint8_t>(usedSlotInstance), arrayStride);
}
}
} }
}
}
MaybeError CommandBufferStateTracker::CheckMissingAspects(ValidationAspects aspects) {
if (!aspects.any()) {
return {}; return {};
} }
MaybeError CommandBufferStateTracker::ValidateIndexBufferInRange(uint32_t indexCount, DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_PIPELINE], "No pipeline set.");
uint32_t firstIndex) {
// Validate the range of index buffer
// firstIndex and indexCount are in uint32_t, while IndexFormatSize is 2 (for
// wgpu::IndexFormat::Uint16) or 4 (for wgpu::IndexFormat::Uint32), so by doing checks in
// uint64_t we avoid overflows.
DAWN_INVALID_IF(
(static_cast<uint64_t>(firstIndex) + indexCount) * IndexFormatSize(mIndexFormat) >
mIndexBufferSize,
"Index range (first: %u, count: %u, format: %s) does not fit in index buffer size "
"(%u).",
firstIndex, indexCount, mIndexFormat, mIndexBufferSize);
return {};
}
MaybeError CommandBufferStateTracker::ValidateOperation(ValidationAspects requiredAspects) { if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_INDEX_BUFFER])) {
// Fast return-true path if everything is good DAWN_INVALID_IF(!mIndexBufferSet, "Index buffer was not set.");
ValidationAspects missingAspects = requiredAspects & ~mAspects;
if (missingAspects.none()) { RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
return {}; wgpu::IndexFormat pipelineIndexFormat = lastRenderPipeline->GetStripIndexFormat();
}
if (IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology())) {
// Generate an error immediately if a non-lazy aspect is missing as computing lazy aspects DAWN_INVALID_IF(
// requires the pipeline to be set. pipelineIndexFormat == wgpu::IndexFormat::Undefined,
DAWN_TRY(CheckMissingAspects(missingAspects & ~kLazyAspects)); "%s has a strip primitive topology (%s) but a strip index format of %s, which "
"prevents it for being used for indexed draw calls.",
RecomputeLazyAspects(missingAspects); lastRenderPipeline, lastRenderPipeline->GetPrimitiveTopology(),
pipelineIndexFormat);
DAWN_TRY(CheckMissingAspects(requiredAspects & ~mAspects));
DAWN_INVALID_IF(
return {}; mIndexFormat != pipelineIndexFormat,
} "Strip index format (%s) of %s does not match index buffer format (%s).",
pipelineIndexFormat, lastRenderPipeline, mIndexFormat);
void CommandBufferStateTracker::RecomputeLazyAspects(ValidationAspects aspects) {
ASSERT(mAspects[VALIDATION_ASPECT_PIPELINE]);
ASSERT((aspects & ~kLazyAspects).none());
if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
bool matches = true;
for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
if (mBindgroups[i] == nullptr ||
mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout() ||
!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
(*mMinBufferSizes)[i])) {
matches = false;
break;
}
}
if (matches) {
mAspects.set(VALIDATION_ASPECT_BIND_GROUPS);
}
}
if (aspects[VALIDATION_ASPECT_VERTEX_BUFFERS]) {
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& requiredVertexBuffers =
lastRenderPipeline->GetVertexBufferSlotsUsed();
if (IsSubset(requiredVertexBuffers, mVertexBufferSlotsUsed)) {
mAspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
}
}
if (aspects[VALIDATION_ASPECT_INDEX_BUFFER] && mIndexBufferSet) {
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
if (!IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology()) ||
mIndexFormat == lastRenderPipeline->GetStripIndexFormat()) {
mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
}
}
}
MaybeError CommandBufferStateTracker::CheckMissingAspects(ValidationAspects aspects) {
if (!aspects.any()) {
return {};
}
DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_PIPELINE], "No pipeline set.");
if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_INDEX_BUFFER])) {
DAWN_INVALID_IF(!mIndexBufferSet, "Index buffer was not set.");
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
wgpu::IndexFormat pipelineIndexFormat = lastRenderPipeline->GetStripIndexFormat();
if (IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology())) {
DAWN_INVALID_IF(
pipelineIndexFormat == wgpu::IndexFormat::Undefined,
"%s has a strip primitive topology (%s) but a strip index format of %s, which "
"prevents it for being used for indexed draw calls.",
lastRenderPipeline, lastRenderPipeline->GetPrimitiveTopology(),
pipelineIndexFormat);
DAWN_INVALID_IF(
mIndexFormat != pipelineIndexFormat,
"Strip index format (%s) of %s does not match index buffer format (%s).",
pipelineIndexFormat, lastRenderPipeline, mIndexFormat);
}
// The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
// It returns the first invalid state found. We shouldn't be able to reach this line
// because to have invalid aspects one of the above conditions must have failed earlier.
// If this is reached, make sure lazy aspects and the error checks above are consistent.
UNREACHABLE();
return DAWN_FORMAT_VALIDATION_ERROR("Index buffer is invalid.");
}
// TODO(dawn:563): Indicate which slots were not set.
DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_VERTEX_BUFFERS],
"Vertex buffer slots required by %s were not set.", GetRenderPipeline());
if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_BIND_GROUPS])) {
for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
ASSERT(HasPipeline());
DAWN_INVALID_IF(mBindgroups[i] == nullptr, "No bind group set at index %u.",
static_cast<uint32_t>(i));
BindGroupLayoutBase* requiredBGL = mLastPipelineLayout->GetBindGroupLayout(i);
BindGroupLayoutBase* currentBGL = mBindgroups[i]->GetLayout();
DAWN_INVALID_IF(
requiredBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0) &&
currentBGL->GetPipelineCompatibilityToken() !=
requiredBGL->GetPipelineCompatibilityToken(),
"The current pipeline (%s) was created with a default layout, and is not "
"compatible with the %s at index %u which uses a %s that was not created by "
"the pipeline. Either use the bind group layout returned by calling "
"getBindGroupLayout(%u) on the pipeline when creating the bind group, or "
"provide an explicit pipeline layout when creating the pipeline.",
mLastPipeline, mBindgroups[i], static_cast<uint32_t>(i), currentBGL,
static_cast<uint32_t>(i));
DAWN_INVALID_IF(
requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
currentBGL->GetPipelineCompatibilityToken() !=
PipelineCompatibilityToken(0),
"%s at index %u uses a %s which was created as part of the default layout for "
"a different pipeline than the current one (%s), and as a result is not "
"compatible. Use an explicit bind group layout when creating bind groups and "
"an explicit pipeline layout when creating pipelines to share bind groups "
"between pipelines.",
mBindgroups[i], static_cast<uint32_t>(i), currentBGL, mLastPipeline);
DAWN_INVALID_IF(
mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout(),
"Bind group layout %s of pipeline layout %s does not match layout %s of bind "
"group %s at index %u.",
requiredBGL, mLastPipelineLayout, currentBGL, mBindgroups[i],
static_cast<uint32_t>(i));
// TODO(dawn:563): Report the binding sizes and which ones are failing.
DAWN_INVALID_IF(!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
(*mMinBufferSizes)[i]),
"Binding sizes are too small for bind group %s at index %u",
mBindgroups[i], static_cast<uint32_t>(i));
}
// The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
// It returns the first invalid state found. We shouldn't be able to reach this line
// because to have invalid aspects one of the above conditions must have failed earlier.
// If this is reached, make sure lazy aspects and the error checks above are consistent.
UNREACHABLE();
return DAWN_FORMAT_VALIDATION_ERROR("Bind groups are invalid.");
} }
// The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
// It returns the first invalid state found. We shouldn't be able to reach this line
// because to have invalid aspects one of the above conditions must have failed earlier.
// If this is reached, make sure lazy aspects and the error checks above are consistent.
UNREACHABLE(); UNREACHABLE();
return DAWN_FORMAT_VALIDATION_ERROR("Index buffer is invalid.");
} }
void CommandBufferStateTracker::SetComputePipeline(ComputePipelineBase* pipeline) { // TODO(dawn:563): Indicate which slots were not set.
SetPipelineCommon(pipeline); DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_VERTEX_BUFFERS],
"Vertex buffer slots required by %s were not set.", GetRenderPipeline());
if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_BIND_GROUPS])) {
for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
ASSERT(HasPipeline());
DAWN_INVALID_IF(mBindgroups[i] == nullptr, "No bind group set at index %u.",
static_cast<uint32_t>(i));
BindGroupLayoutBase* requiredBGL = mLastPipelineLayout->GetBindGroupLayout(i);
BindGroupLayoutBase* currentBGL = mBindgroups[i]->GetLayout();
DAWN_INVALID_IF(
requiredBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0) &&
currentBGL->GetPipelineCompatibilityToken() !=
requiredBGL->GetPipelineCompatibilityToken(),
"The current pipeline (%s) was created with a default layout, and is not "
"compatible with the %s at index %u which uses a %s that was not created by "
"the pipeline. Either use the bind group layout returned by calling "
"getBindGroupLayout(%u) on the pipeline when creating the bind group, or "
"provide an explicit pipeline layout when creating the pipeline.",
mLastPipeline, mBindgroups[i], static_cast<uint32_t>(i), currentBGL,
static_cast<uint32_t>(i));
DAWN_INVALID_IF(
requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
currentBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0),
"%s at index %u uses a %s which was created as part of the default layout for "
"a different pipeline than the current one (%s), and as a result is not "
"compatible. Use an explicit bind group layout when creating bind groups and "
"an explicit pipeline layout when creating pipelines to share bind groups "
"between pipelines.",
mBindgroups[i], static_cast<uint32_t>(i), currentBGL, mLastPipeline);
DAWN_INVALID_IF(
mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout(),
"Bind group layout %s of pipeline layout %s does not match layout %s of bind "
"group %s at index %u.",
requiredBGL, mLastPipelineLayout, currentBGL, mBindgroups[i],
static_cast<uint32_t>(i));
// TODO(dawn:563): Report the binding sizes and which ones are failing.
DAWN_INVALID_IF(!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
(*mMinBufferSizes)[i]),
"Binding sizes are too small for bind group %s at index %u",
mBindgroups[i], static_cast<uint32_t>(i));
}
// The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
// It returns the first invalid state found. We shouldn't be able to reach this line
// because to have invalid aspects one of the above conditions must have failed earlier.
// If this is reached, make sure lazy aspects and the error checks above are consistent.
UNREACHABLE();
return DAWN_FORMAT_VALIDATION_ERROR("Bind groups are invalid.");
} }
void CommandBufferStateTracker::SetRenderPipeline(RenderPipelineBase* pipeline) { UNREACHABLE();
SetPipelineCommon(pipeline); }
}
void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index, void CommandBufferStateTracker::SetComputePipeline(ComputePipelineBase* pipeline) {
BindGroupBase* bindgroup, SetPipelineCommon(pipeline);
uint32_t dynamicOffsetCount, }
const uint32_t* dynamicOffsets) {
mBindgroups[index] = bindgroup;
mDynamicOffsets[index].assign(dynamicOffsets, dynamicOffsets + dynamicOffsetCount);
mAspects.reset(VALIDATION_ASPECT_BIND_GROUPS);
}
void CommandBufferStateTracker::SetIndexBuffer(wgpu::IndexFormat format, uint64_t size) { void CommandBufferStateTracker::SetRenderPipeline(RenderPipelineBase* pipeline) {
mIndexBufferSet = true; SetPipelineCommon(pipeline);
mIndexFormat = format; }
mIndexBufferSize = size;
}
void CommandBufferStateTracker::SetVertexBuffer(VertexBufferSlot slot, uint64_t size) { void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index,
mVertexBufferSlotsUsed.set(slot); BindGroupBase* bindgroup,
mVertexBufferSizes[slot] = size; uint32_t dynamicOffsetCount,
} const uint32_t* dynamicOffsets) {
mBindgroups[index] = bindgroup;
mDynamicOffsets[index].assign(dynamicOffsets, dynamicOffsets + dynamicOffsetCount);
mAspects.reset(VALIDATION_ASPECT_BIND_GROUPS);
}
void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) { void CommandBufferStateTracker::SetIndexBuffer(wgpu::IndexFormat format, uint64_t size) {
mLastPipeline = pipeline; mIndexBufferSet = true;
mLastPipelineLayout = pipeline != nullptr ? pipeline->GetLayout() : nullptr; mIndexFormat = format;
mMinBufferSizes = pipeline != nullptr ? &pipeline->GetMinBufferSizes() : nullptr; mIndexBufferSize = size;
}
mAspects.set(VALIDATION_ASPECT_PIPELINE); void CommandBufferStateTracker::SetVertexBuffer(VertexBufferSlot slot, uint64_t size) {
mVertexBufferSlotsUsed.set(slot);
mVertexBufferSizes[slot] = size;
}
// Reset lazy aspects so they get recomputed on the next operation. void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
mAspects &= ~kLazyAspects; mLastPipeline = pipeline;
} mLastPipelineLayout = pipeline != nullptr ? pipeline->GetLayout() : nullptr;
mMinBufferSizes = pipeline != nullptr ? &pipeline->GetMinBufferSizes() : nullptr;
BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const { mAspects.set(VALIDATION_ASPECT_PIPELINE);
return mBindgroups[index];
}
const std::vector<uint32_t>& CommandBufferStateTracker::GetDynamicOffsets( // Reset lazy aspects so they get recomputed on the next operation.
BindGroupIndex index) const { mAspects &= ~kLazyAspects;
return mDynamicOffsets[index]; }
}
bool CommandBufferStateTracker::HasPipeline() const { BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const {
return mLastPipeline != nullptr; return mBindgroups[index];
} }
RenderPipelineBase* CommandBufferStateTracker::GetRenderPipeline() const { const std::vector<uint32_t>& CommandBufferStateTracker::GetDynamicOffsets(
ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::RenderPipeline); BindGroupIndex index) const {
return static_cast<RenderPipelineBase*>(mLastPipeline); return mDynamicOffsets[index];
} }
ComputePipelineBase* CommandBufferStateTracker::GetComputePipeline() const { bool CommandBufferStateTracker::HasPipeline() const {
ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::ComputePipeline); return mLastPipeline != nullptr;
return static_cast<ComputePipelineBase*>(mLastPipeline); }
}
PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const { RenderPipelineBase* CommandBufferStateTracker::GetRenderPipeline() const {
return mLastPipelineLayout; ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::RenderPipeline);
} return static_cast<RenderPipelineBase*>(mLastPipeline);
}
wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const { ComputePipelineBase* CommandBufferStateTracker::GetComputePipeline() const {
return mIndexFormat; ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::ComputePipeline);
} return static_cast<ComputePipelineBase*>(mLastPipeline);
}
uint64_t CommandBufferStateTracker::GetIndexBufferSize() const { PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
return mIndexBufferSize; return mLastPipelineLayout;
} }
wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const {
return mIndexFormat;
}
uint64_t CommandBufferStateTracker::GetIndexBufferSize() const {
return mIndexBufferSize;
}
} // namespace dawn::native } // namespace dawn::native

View File

@ -26,62 +26,62 @@
namespace dawn::native { namespace dawn::native {
class CommandBufferStateTracker { class CommandBufferStateTracker {
public: public:
// Non-state-modifying validation functions // Non-state-modifying validation functions
MaybeError ValidateCanDispatch(); MaybeError ValidateCanDispatch();
MaybeError ValidateCanDraw(); MaybeError ValidateCanDraw();
MaybeError ValidateCanDrawIndexed(); MaybeError ValidateCanDrawIndexed();
MaybeError ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount, uint32_t firstVertex); MaybeError ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount, uint32_t firstVertex);
MaybeError ValidateBufferInRangeForInstanceBuffer(uint32_t instanceCount, MaybeError ValidateBufferInRangeForInstanceBuffer(uint32_t instanceCount,
uint32_t firstInstance); uint32_t firstInstance);
MaybeError ValidateIndexBufferInRange(uint32_t indexCount, uint32_t firstIndex); MaybeError ValidateIndexBufferInRange(uint32_t indexCount, uint32_t firstIndex);
// State-modifying methods // State-modifying methods
void SetComputePipeline(ComputePipelineBase* pipeline); void SetComputePipeline(ComputePipelineBase* pipeline);
void SetRenderPipeline(RenderPipelineBase* pipeline); void SetRenderPipeline(RenderPipelineBase* pipeline);
void SetBindGroup(BindGroupIndex index, void SetBindGroup(BindGroupIndex index,
BindGroupBase* bindgroup, BindGroupBase* bindgroup,
uint32_t dynamicOffsetCount, uint32_t dynamicOffsetCount,
const uint32_t* dynamicOffsets); const uint32_t* dynamicOffsets);
void SetIndexBuffer(wgpu::IndexFormat format, uint64_t size); void SetIndexBuffer(wgpu::IndexFormat format, uint64_t size);
void SetVertexBuffer(VertexBufferSlot slot, uint64_t size); void SetVertexBuffer(VertexBufferSlot slot, uint64_t size);
static constexpr size_t kNumAspects = 4; static constexpr size_t kNumAspects = 4;
using ValidationAspects = std::bitset<kNumAspects>; using ValidationAspects = std::bitset<kNumAspects>;
BindGroupBase* GetBindGroup(BindGroupIndex index) const; BindGroupBase* GetBindGroup(BindGroupIndex index) const;
const std::vector<uint32_t>& GetDynamicOffsets(BindGroupIndex index) const; const std::vector<uint32_t>& GetDynamicOffsets(BindGroupIndex index) const;
bool HasPipeline() const; bool HasPipeline() const;
RenderPipelineBase* GetRenderPipeline() const; RenderPipelineBase* GetRenderPipeline() const;
ComputePipelineBase* GetComputePipeline() const; ComputePipelineBase* GetComputePipeline() const;
PipelineLayoutBase* GetPipelineLayout() const; PipelineLayoutBase* GetPipelineLayout() const;
wgpu::IndexFormat GetIndexFormat() const; wgpu::IndexFormat GetIndexFormat() const;
uint64_t GetIndexBufferSize() const; uint64_t GetIndexBufferSize() const;
private: private:
MaybeError ValidateOperation(ValidationAspects requiredAspects); MaybeError ValidateOperation(ValidationAspects requiredAspects);
void RecomputeLazyAspects(ValidationAspects aspects); void RecomputeLazyAspects(ValidationAspects aspects);
MaybeError CheckMissingAspects(ValidationAspects aspects); MaybeError CheckMissingAspects(ValidationAspects aspects);
void SetPipelineCommon(PipelineBase* pipeline); void SetPipelineCommon(PipelineBase* pipeline);
ValidationAspects mAspects; ValidationAspects mAspects;
ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindgroups = {}; ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindgroups = {};
ityp::array<BindGroupIndex, std::vector<uint32_t>, kMaxBindGroups> mDynamicOffsets = {}; ityp::array<BindGroupIndex, std::vector<uint32_t>, kMaxBindGroups> mDynamicOffsets = {};
ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed; ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
bool mIndexBufferSet = false; bool mIndexBufferSet = false;
wgpu::IndexFormat mIndexFormat; wgpu::IndexFormat mIndexFormat;
uint64_t mIndexBufferSize = 0; uint64_t mIndexBufferSize = 0;
ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferSizes = {}; ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferSizes = {};
PipelineLayoutBase* mLastPipelineLayout = nullptr; PipelineLayoutBase* mLastPipelineLayout = nullptr;
PipelineBase* mLastPipeline = nullptr; PipelineBase* mLastPipeline = nullptr;
const RequiredBufferSizes* mMinBufferSizes = nullptr; const RequiredBufferSizes* mMinBufferSizes = nullptr;
}; };
} // namespace dawn::native } // namespace dawn::native

File diff suppressed because it is too large Load Diff

View File

@ -27,96 +27,96 @@
namespace dawn::native { namespace dawn::native {
enum class UsageValidationMode; enum class UsageValidationMode;
MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device, MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
const CommandEncoderDescriptor* descriptor); const CommandEncoderDescriptor* descriptor);
class CommandEncoder final : public ApiObjectBase { class CommandEncoder final : public ApiObjectBase {
public: public:
static Ref<CommandEncoder> Create(DeviceBase* device, static Ref<CommandEncoder> Create(DeviceBase* device,
const CommandEncoderDescriptor* descriptor); const CommandEncoderDescriptor* descriptor);
static CommandEncoder* MakeError(DeviceBase* device); static CommandEncoder* MakeError(DeviceBase* device);
ObjectType GetType() const override; ObjectType GetType() const override;
CommandIterator AcquireCommands(); CommandIterator AcquireCommands();
CommandBufferResourceUsage AcquireResourceUsages(); CommandBufferResourceUsage AcquireResourceUsages();
void TrackUsedQuerySet(QuerySetBase* querySet); void TrackUsedQuerySet(QuerySetBase* querySet);
void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex); void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
// Dawn API // Dawn API
ComputePassEncoder* APIBeginComputePass(const ComputePassDescriptor* descriptor); ComputePassEncoder* APIBeginComputePass(const ComputePassDescriptor* descriptor);
RenderPassEncoder* APIBeginRenderPass(const RenderPassDescriptor* descriptor); RenderPassEncoder* APIBeginRenderPass(const RenderPassDescriptor* descriptor);
void APICopyBufferToBuffer(BufferBase* source, void APICopyBufferToBuffer(BufferBase* source,
uint64_t sourceOffset, uint64_t sourceOffset,
BufferBase* destination, BufferBase* destination,
uint64_t destinationOffset, uint64_t destinationOffset,
uint64_t size); uint64_t size);
void APICopyBufferToTexture(const ImageCopyBuffer* source, void APICopyBufferToTexture(const ImageCopyBuffer* source,
const ImageCopyTexture* destination, const ImageCopyTexture* destination,
const Extent3D* copySize); const Extent3D* copySize);
void APICopyTextureToBuffer(const ImageCopyTexture* source, void APICopyTextureToBuffer(const ImageCopyTexture* source,
const ImageCopyBuffer* destination, const ImageCopyBuffer* destination,
const Extent3D* copySize); const Extent3D* copySize);
void APICopyTextureToTexture(const ImageCopyTexture* source, void APICopyTextureToTexture(const ImageCopyTexture* source,
const ImageCopyTexture* destination, const ImageCopyTexture* destination,
const Extent3D* copySize); const Extent3D* copySize);
void APICopyTextureToTextureInternal(const ImageCopyTexture* source, void APICopyTextureToTextureInternal(const ImageCopyTexture* source,
const ImageCopyTexture* destination, const ImageCopyTexture* destination,
const Extent3D* copySize); const Extent3D* copySize);
void APIClearBuffer(BufferBase* destination, uint64_t destinationOffset, uint64_t size); void APIClearBuffer(BufferBase* destination, uint64_t destinationOffset, uint64_t size);
void APIInjectValidationError(const char* message); void APIInjectValidationError(const char* message);
void APIInsertDebugMarker(const char* groupLabel); void APIInsertDebugMarker(const char* groupLabel);
void APIPopDebugGroup(); void APIPopDebugGroup();
void APIPushDebugGroup(const char* groupLabel); void APIPushDebugGroup(const char* groupLabel);
void APIResolveQuerySet(QuerySetBase* querySet, void APIResolveQuerySet(QuerySetBase* querySet,
uint32_t firstQuery, uint32_t firstQuery,
uint32_t queryCount, uint32_t queryCount,
BufferBase* destination, BufferBase* destination,
uint64_t destinationOffset); uint64_t destinationOffset);
void APIWriteBuffer(BufferBase* buffer, void APIWriteBuffer(BufferBase* buffer,
uint64_t bufferOffset, uint64_t bufferOffset,
const uint8_t* data, const uint8_t* data,
uint64_t size); uint64_t size);
void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex); void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
CommandBufferBase* APIFinish(const CommandBufferDescriptor* descriptor = nullptr); CommandBufferBase* APIFinish(const CommandBufferDescriptor* descriptor = nullptr);
Ref<ComputePassEncoder> BeginComputePass(const ComputePassDescriptor* descriptor = nullptr); Ref<ComputePassEncoder> BeginComputePass(const ComputePassDescriptor* descriptor = nullptr);
Ref<RenderPassEncoder> BeginRenderPass(const RenderPassDescriptor* descriptor); Ref<RenderPassEncoder> BeginRenderPass(const RenderPassDescriptor* descriptor);
ResultOrError<Ref<CommandBufferBase>> Finish( ResultOrError<Ref<CommandBufferBase>> Finish(
const CommandBufferDescriptor* descriptor = nullptr); const CommandBufferDescriptor* descriptor = nullptr);
private: private:
CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor); CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor);
CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag); CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag);
void DestroyImpl() override; void DestroyImpl() override;
// Helper to be able to implement both APICopyTextureToTexture and // Helper to be able to implement both APICopyTextureToTexture and
// APICopyTextureToTextureInternal. The only difference between both // APICopyTextureToTextureInternal. The only difference between both
// copies, is that the Internal one will also check internal usage. // copies, is that the Internal one will also check internal usage.
template <bool Internal> template <bool Internal>
void APICopyTextureToTextureHelper(const ImageCopyTexture* source, void APICopyTextureToTextureHelper(const ImageCopyTexture* source,
const ImageCopyTexture* destination, const ImageCopyTexture* destination,
const Extent3D* copySize); const Extent3D* copySize);
MaybeError ValidateFinish() const; MaybeError ValidateFinish() const;
EncodingContext mEncodingContext; EncodingContext mEncodingContext;
std::set<BufferBase*> mTopLevelBuffers; std::set<BufferBase*> mTopLevelBuffers;
std::set<TextureBase*> mTopLevelTextures; std::set<TextureBase*> mTopLevelTextures;
std::set<QuerySetBase*> mUsedQuerySets; std::set<QuerySetBase*> mUsedQuerySets;
uint64_t mDebugGroupStackSize = 0; uint64_t mDebugGroupStackSize = 0;
UsageValidationMode mUsageValidationMode; UsageValidationMode mUsageValidationMode;
}; };
} // namespace dawn::native } // namespace dawn::native

View File

@ -32,461 +32,453 @@
namespace dawn::native { namespace dawn::native {
// Performs validation of the "synchronization scope" rules of WebGPU. // Performs validation of the "synchronization scope" rules of WebGPU.
MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) { MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
// Buffers can only be used as single-write or multiple read. // Buffers can only be used as single-write or multiple read.
for (size_t i = 0; i < scope.bufferUsages.size(); ++i) { for (size_t i = 0; i < scope.bufferUsages.size(); ++i) {
const wgpu::BufferUsage usage = scope.bufferUsages[i]; const wgpu::BufferUsage usage = scope.bufferUsages[i];
bool readOnly = IsSubset(usage, kReadOnlyBufferUsages); bool readOnly = IsSubset(usage, kReadOnlyBufferUsages);
bool singleUse = wgpu::HasZeroOrOneBits(usage); bool singleUse = wgpu::HasZeroOrOneBits(usage);
DAWN_INVALID_IF(!readOnly && !singleUse, DAWN_INVALID_IF(!readOnly && !singleUse,
"%s usage (%s) includes writable usage and another usage in the same "
"synchronization scope.",
scope.buffers[i], usage);
}
// Check that every single subresource is used as either a single-write usage or a
// combination of readonly usages.
for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
MaybeError error = {};
textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
bool singleUse = wgpu::HasZeroOrOneBits(usage);
if (!readOnly && !singleUse && !error.IsError()) {
error = DAWN_FORMAT_VALIDATION_ERROR(
"%s usage (%s) includes writable usage and another usage in the same " "%s usage (%s) includes writable usage and another usage in the same "
"synchronization scope.", "synchronization scope.",
scope.textures[i], usage); scope.buffers[i], usage);
}
});
DAWN_TRY(std::move(error));
}
return {};
} }
MaybeError ValidateTimestampQuery(const DeviceBase* device, // Check that every single subresource is used as either a single-write usage or a
const QuerySetBase* querySet, // combination of readonly usages.
uint32_t queryIndex) { for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
DAWN_TRY(device->ValidateObject(querySet)); const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
MaybeError error = {};
textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
bool singleUse = wgpu::HasZeroOrOneBits(usage);
if (!readOnly && !singleUse && !error.IsError()) {
error = DAWN_FORMAT_VALIDATION_ERROR(
"%s usage (%s) includes writable usage and another usage in the same "
"synchronization scope.",
scope.textures[i], usage);
}
});
DAWN_TRY(std::move(error));
}
return {};
}
DAWN_INVALID_IF(querySet->GetQueryType() != wgpu::QueryType::Timestamp, MaybeError ValidateTimestampQuery(const DeviceBase* device,
"The type of %s is not %s.", querySet, wgpu::QueryType::Timestamp); const QuerySetBase* querySet,
uint32_t queryIndex) {
DAWN_TRY(device->ValidateObject(querySet));
DAWN_INVALID_IF(queryIndex >= querySet->GetQueryCount(), DAWN_INVALID_IF(querySet->GetQueryType() != wgpu::QueryType::Timestamp,
"Query index (%u) exceeds the number of queries (%u) in %s.", queryIndex, "The type of %s is not %s.", querySet, wgpu::QueryType::Timestamp);
querySet->GetQueryCount(), querySet);
return {}; DAWN_INVALID_IF(queryIndex >= querySet->GetQueryCount(),
"Query index (%u) exceeds the number of queries (%u) in %s.", queryIndex,
querySet->GetQueryCount(), querySet);
return {};
}
MaybeError ValidateWriteBuffer(const DeviceBase* device,
const BufferBase* buffer,
uint64_t bufferOffset,
uint64_t size) {
DAWN_TRY(device->ValidateObject(buffer));
DAWN_INVALID_IF(bufferOffset % 4 != 0, "BufferOffset (%u) is not a multiple of 4.",
bufferOffset);
DAWN_INVALID_IF(size % 4 != 0, "Size (%u) is not a multiple of 4.", size);
uint64_t bufferSize = buffer->GetSize();
DAWN_INVALID_IF(bufferOffset > bufferSize || size > (bufferSize - bufferOffset),
"Write range (bufferOffset: %u, size: %u) does not fit in %s size (%u).",
bufferOffset, size, buffer, bufferSize);
DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst));
return {};
}
bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) {
uint32_t maxStart = std::max(startA, startB);
uint32_t minStart = std::min(startA, startB);
return static_cast<uint64_t>(minStart) + static_cast<uint64_t>(length) >
static_cast<uint64_t>(maxStart);
}
ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
const Extent3D& copySize,
uint32_t bytesPerRow,
uint32_t rowsPerImage) {
ASSERT(copySize.width % blockInfo.width == 0);
ASSERT(copySize.height % blockInfo.height == 0);
uint32_t widthInBlocks = copySize.width / blockInfo.width;
uint32_t heightInBlocks = copySize.height / blockInfo.height;
uint64_t bytesInLastRow = Safe32x32(widthInBlocks, blockInfo.byteSize);
if (copySize.depthOrArrayLayers == 0) {
return 0;
} }
MaybeError ValidateWriteBuffer(const DeviceBase* device, // Check for potential overflows for the rest of the computations. We have the following
const BufferBase* buffer, // inequalities:
uint64_t bufferOffset, //
uint64_t size) { // bytesInLastRow <= bytesPerRow
DAWN_TRY(device->ValidateObject(buffer)); // heightInBlocks <= rowsPerImage
//
// So:
//
// bytesInLastImage = bytesPerRow * (heightInBlocks - 1) + bytesInLastRow
// <= bytesPerRow * heightInBlocks
// <= bytesPerRow * rowsPerImage
// <= bytesPerImage
//
// This means that if the computation of depth * bytesPerImage doesn't overflow, none of the
// computations for requiredBytesInCopy will. (and it's not a very pessimizing check)
ASSERT(copySize.depthOrArrayLayers <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined &&
rowsPerImage != wgpu::kCopyStrideUndefined));
uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
DAWN_INVALID_IF(
bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
"The number of bytes per image (%u) exceeds the maximum (%u) when copying %u images.",
bytesPerImage, std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
copySize.depthOrArrayLayers);
DAWN_INVALID_IF(bufferOffset % 4 != 0, "BufferOffset (%u) is not a multiple of 4.", uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depthOrArrayLayers - 1);
bufferOffset); if (heightInBlocks > 0) {
ASSERT(heightInBlocks <= 1 || bytesPerRow != wgpu::kCopyStrideUndefined);
DAWN_INVALID_IF(size % 4 != 0, "Size (%u) is not a multiple of 4.", size); uint64_t bytesInLastImage = Safe32x32(bytesPerRow, heightInBlocks - 1) + bytesInLastRow;
requiredBytesInCopy += bytesInLastImage;
uint64_t bufferSize = buffer->GetSize();
DAWN_INVALID_IF(bufferOffset > bufferSize || size > (bufferSize - bufferOffset),
"Write range (bufferOffset: %u, size: %u) does not fit in %s size (%u).",
bufferOffset, size, buffer, bufferSize);
DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst));
return {};
} }
return requiredBytesInCopy;
}
bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) { MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
uint32_t maxStart = std::max(startA, startB); uint64_t offset,
uint32_t minStart = std::min(startA, startB); uint64_t size) {
return static_cast<uint64_t>(minStart) + static_cast<uint64_t>(length) > uint64_t bufferSize = buffer->GetSize();
static_cast<uint64_t>(maxStart); bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
} DAWN_INVALID_IF(!fitsInBuffer,
"Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset, size,
buffer.Get(), bufferSize);
ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo, return {};
const Extent3D& copySize, }
uint32_t bytesPerRow,
uint32_t rowsPerImage) {
ASSERT(copySize.width % blockInfo.width == 0);
ASSERT(copySize.height % blockInfo.height == 0);
uint32_t widthInBlocks = copySize.width / blockInfo.width;
uint32_t heightInBlocks = copySize.height / blockInfo.height;
uint64_t bytesInLastRow = Safe32x32(widthInBlocks, blockInfo.byteSize);
if (copySize.depthOrArrayLayers == 0) { // Replace wgpu::kCopyStrideUndefined with real values, so backends don't have to think about
return 0; // it.
} void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
const TexelBlockInfo& blockInfo,
const Extent3D& copyExtent) {
ASSERT(layout != nullptr);
ASSERT(copyExtent.height % blockInfo.height == 0);
uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
// Check for potential overflows for the rest of the computations. We have the following if (layout->bytesPerRow == wgpu::kCopyStrideUndefined) {
// inequalities:
//
// bytesInLastRow <= bytesPerRow
// heightInBlocks <= rowsPerImage
//
// So:
//
// bytesInLastImage = bytesPerRow * (heightInBlocks - 1) + bytesInLastRow
// <= bytesPerRow * heightInBlocks
// <= bytesPerRow * rowsPerImage
// <= bytesPerImage
//
// This means that if the computation of depth * bytesPerImage doesn't overflow, none of the
// computations for requiredBytesInCopy will. (and it's not a very pessimizing check)
ASSERT(copySize.depthOrArrayLayers <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined &&
rowsPerImage != wgpu::kCopyStrideUndefined));
uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
DAWN_INVALID_IF(
bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
"The number of bytes per image (%u) exceeds the maximum (%u) when copying %u images.",
bytesPerImage, std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
copySize.depthOrArrayLayers);
uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depthOrArrayLayers - 1);
if (heightInBlocks > 0) {
ASSERT(heightInBlocks <= 1 || bytesPerRow != wgpu::kCopyStrideUndefined);
uint64_t bytesInLastImage = Safe32x32(bytesPerRow, heightInBlocks - 1) + bytesInLastRow;
requiredBytesInCopy += bytesInLastImage;
}
return requiredBytesInCopy;
}
MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
uint64_t offset,
uint64_t size) {
uint64_t bufferSize = buffer->GetSize();
bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
DAWN_INVALID_IF(!fitsInBuffer,
"Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset,
size, buffer.Get(), bufferSize);
return {};
}
// Replace wgpu::kCopyStrideUndefined with real values, so backends don't have to think about
// it.
void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
const TexelBlockInfo& blockInfo,
const Extent3D& copyExtent) {
ASSERT(layout != nullptr);
ASSERT(copyExtent.height % blockInfo.height == 0);
uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
if (layout->bytesPerRow == wgpu::kCopyStrideUndefined) {
ASSERT(copyExtent.width % blockInfo.width == 0);
uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
ASSERT(heightInBlocks <= 1 && copyExtent.depthOrArrayLayers <= 1);
layout->bytesPerRow = Align(bytesInLastRow, kTextureBytesPerRowAlignment);
}
if (layout->rowsPerImage == wgpu::kCopyStrideUndefined) {
ASSERT(copyExtent.depthOrArrayLayers <= 1);
layout->rowsPerImage = heightInBlocks;
}
}
MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
uint64_t byteSize,
const TexelBlockInfo& blockInfo,
const Extent3D& copyExtent) {
ASSERT(copyExtent.height % blockInfo.height == 0);
uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
// TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
// validation message. Investigate ways to make it print as a more readable symbol.
DAWN_INVALID_IF(
copyExtent.depthOrArrayLayers > 1 &&
(layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
layout.rowsPerImage == wgpu::kCopyStrideUndefined),
"Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
DAWN_INVALID_IF(heightInBlocks > 1 && layout.bytesPerRow == wgpu::kCopyStrideUndefined,
"HeightInBlocks (%u) is > 1, but bytesPerRow is not specified.",
heightInBlocks);
// Validation for other members in layout:
ASSERT(copyExtent.width % blockInfo.width == 0); ASSERT(copyExtent.width % blockInfo.width == 0);
uint32_t widthInBlocks = copyExtent.width / blockInfo.width; uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <=
std::numeric_limits<uint32_t>::max());
uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize; uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
// These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks, ASSERT(heightInBlocks <= 1 && copyExtent.depthOrArrayLayers <= 1);
// but they should get optimized out. layout->bytesPerRow = Align(bytesInLastRow, kTextureBytesPerRowAlignment);
DAWN_INVALID_IF( }
layout.bytesPerRow != wgpu::kCopyStrideUndefined && bytesInLastRow > layout.bytesPerRow, if (layout->rowsPerImage == wgpu::kCopyStrideUndefined) {
"The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow, ASSERT(copyExtent.depthOrArrayLayers <= 1);
layout.bytesPerRow); layout->rowsPerImage = heightInBlocks;
}
}
DAWN_INVALID_IF(layout.rowsPerImage != wgpu::kCopyStrideUndefined && MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
heightInBlocks > layout.rowsPerImage, uint64_t byteSize,
"The height of each image in blocks (%u) is > rowsPerImage (%u).", const TexelBlockInfo& blockInfo,
heightInBlocks, layout.rowsPerImage); const Extent3D& copyExtent) {
ASSERT(copyExtent.height % blockInfo.height == 0);
uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
// We compute required bytes in copy after validating texel block alignments // TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
// because the divisibility conditions are necessary for the algorithm to be valid, // validation message. Investigate ways to make it print as a more readable symbol.
// also the bytesPerRow bound is necessary to avoid overflows. DAWN_INVALID_IF(
uint64_t requiredBytesInCopy; copyExtent.depthOrArrayLayers > 1 && (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
DAWN_TRY_ASSIGN(requiredBytesInCopy, layout.rowsPerImage == wgpu::kCopyStrideUndefined),
ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow, "Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
layout.rowsPerImage)); copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
bool fitsInData = DAWN_INVALID_IF(heightInBlocks > 1 && layout.bytesPerRow == wgpu::kCopyStrideUndefined,
layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset)); "HeightInBlocks (%u) is > 1, but bytesPerRow is not specified.",
DAWN_INVALID_IF( heightInBlocks);
!fitsInData,
"Required size for texture data layout (%u) exceeds the linear data size (%u) with "
"offset (%u).",
requiredBytesInCopy, byteSize, layout.offset);
return {}; // Validation for other members in layout:
ASSERT(copyExtent.width % blockInfo.width == 0);
uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <= std::numeric_limits<uint32_t>::max());
uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
// These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks,
// but they should get optimized out.
DAWN_INVALID_IF(
layout.bytesPerRow != wgpu::kCopyStrideUndefined && bytesInLastRow > layout.bytesPerRow,
"The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow,
layout.bytesPerRow);
DAWN_INVALID_IF(
layout.rowsPerImage != wgpu::kCopyStrideUndefined && heightInBlocks > layout.rowsPerImage,
"The height of each image in blocks (%u) is > rowsPerImage (%u).", heightInBlocks,
layout.rowsPerImage);
// We compute required bytes in copy after validating texel block alignments
// because the divisibility conditions are necessary for the algorithm to be valid,
// also the bytesPerRow bound is necessary to avoid overflows.
uint64_t requiredBytesInCopy;
DAWN_TRY_ASSIGN(
requiredBytesInCopy,
ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow, layout.rowsPerImage));
bool fitsInData =
layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
DAWN_INVALID_IF(
!fitsInData,
"Required size for texture data layout (%u) exceeds the linear data size (%u) with "
"offset (%u).",
requiredBytesInCopy, byteSize, layout.offset);
return {};
}
MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
const ImageCopyBuffer& imageCopyBuffer) {
DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer));
if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
DAWN_INVALID_IF(imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0,
"bytesPerRow (%u) is not a multiple of %u.",
imageCopyBuffer.layout.bytesPerRow, kTextureBytesPerRowAlignment);
} }
MaybeError ValidateImageCopyBuffer(DeviceBase const* device, return {};
const ImageCopyBuffer& imageCopyBuffer) { }
DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer));
if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
DAWN_INVALID_IF(imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0,
"bytesPerRow (%u) is not a multiple of %u.",
imageCopyBuffer.layout.bytesPerRow, kTextureBytesPerRowAlignment);
}
return {}; MaybeError ValidateImageCopyTexture(DeviceBase const* device,
} const ImageCopyTexture& textureCopy,
const Extent3D& copySize) {
const TextureBase* texture = textureCopy.texture;
DAWN_TRY(device->ValidateObject(texture));
MaybeError ValidateImageCopyTexture(DeviceBase const* device, DAWN_INVALID_IF(textureCopy.mipLevel >= texture->GetNumMipLevels(),
const ImageCopyTexture& textureCopy, "MipLevel (%u) is greater than the number of mip levels (%u) in %s.",
const Extent3D& copySize) { textureCopy.mipLevel, texture->GetNumMipLevels(), texture);
const TextureBase* texture = textureCopy.texture;
DAWN_TRY(device->ValidateObject(texture));
DAWN_INVALID_IF(textureCopy.mipLevel >= texture->GetNumMipLevels(), DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
"MipLevel (%u) is greater than the number of mip levels (%u) in %s.", DAWN_INVALID_IF(SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
textureCopy.mipLevel, texture->GetNumMipLevels(), texture); "%s format (%s) does not have the selected aspect (%s).", texture,
texture->GetFormat().format, textureCopy.aspect);
DAWN_TRY(ValidateTextureAspect(textureCopy.aspect)); if (texture->GetSampleCount() > 1 || texture->GetFormat().HasDepthOrStencil()) {
Extent3D subresourceSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
DAWN_INVALID_IF( DAWN_INVALID_IF(
SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None, textureCopy.origin.x != 0 || textureCopy.origin.y != 0 ||
"%s format (%s) does not have the selected aspect (%s).", texture, subresourceSize.width != copySize.width ||
texture->GetFormat().format, textureCopy.aspect); subresourceSize.height != copySize.height,
"Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
if (texture->GetSampleCount() > 1 || texture->GetFormat().HasDepthOrStencil()) { "[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
Extent3D subresourceSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel); "format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D); &textureCopy.origin, &copySize, &subresourceSize, texture, texture->GetFormat().format,
DAWN_INVALID_IF( texture->GetSampleCount());
textureCopy.origin.x != 0 || textureCopy.origin.y != 0 ||
subresourceSize.width != copySize.width ||
subresourceSize.height != copySize.height,
"Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
"[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
"format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
&textureCopy.origin, &copySize, &subresourceSize, texture,
texture->GetFormat().format, texture->GetSampleCount());
}
return {};
} }
MaybeError ValidateTextureCopyRange(DeviceBase const* device, return {};
const ImageCopyTexture& textureCopy, }
const Extent3D& copySize) {
const TextureBase* texture = textureCopy.texture;
// Validation for the copy being in-bounds: MaybeError ValidateTextureCopyRange(DeviceBase const* device,
Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel); const ImageCopyTexture& textureCopy,
// For 1D/2D textures, include the array layer as depth so it can be checked with other const Extent3D& copySize) {
// dimensions. const TextureBase* texture = textureCopy.texture;
if (texture->GetDimension() != wgpu::TextureDimension::e3D) {
mipSize.depthOrArrayLayers = texture->GetArrayLayers(); // Validation for the copy being in-bounds:
} Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
// All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid // For 1D/2D textures, include the array layer as depth so it can be checked with other
// overflows. // dimensions.
if (texture->GetDimension() != wgpu::TextureDimension::e3D) {
mipSize.depthOrArrayLayers = texture->GetArrayLayers();
}
// All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
// overflows.
DAWN_INVALID_IF(
static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
static_cast<uint64_t>(mipSize.width) ||
static_cast<uint64_t>(textureCopy.origin.y) + static_cast<uint64_t>(copySize.height) >
static_cast<uint64_t>(mipSize.height) ||
static_cast<uint64_t>(textureCopy.origin.z) +
static_cast<uint64_t>(copySize.depthOrArrayLayers) >
static_cast<uint64_t>(mipSize.depthOrArrayLayers),
"Texture copy range (origin: %s, copySize: %s) touches outside of %s mip level %u "
"size (%s).",
&textureCopy.origin, &copySize, texture, textureCopy.mipLevel, &mipSize);
// Validation for the texel block alignments:
const Format& format = textureCopy.texture->GetFormat();
if (format.isCompressed) {
const TexelBlockInfo& blockInfo = format.GetAspectInfo(textureCopy.aspect).block;
DAWN_INVALID_IF( DAWN_INVALID_IF(
static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) > textureCopy.origin.x % blockInfo.width != 0,
static_cast<uint64_t>(mipSize.width) || "Texture copy origin.x (%u) is not a multiple of compressed texture format block "
static_cast<uint64_t>(textureCopy.origin.y) + "width (%u).",
static_cast<uint64_t>(copySize.height) > textureCopy.origin.x, blockInfo.width);
static_cast<uint64_t>(mipSize.height) || DAWN_INVALID_IF(
static_cast<uint64_t>(textureCopy.origin.z) + textureCopy.origin.y % blockInfo.height != 0,
static_cast<uint64_t>(copySize.depthOrArrayLayers) > "Texture copy origin.y (%u) is not a multiple of compressed texture format block "
static_cast<uint64_t>(mipSize.depthOrArrayLayers), "height (%u).",
"Texture copy range (origin: %s, copySize: %s) touches outside of %s mip level %u " textureCopy.origin.y, blockInfo.height);
"size (%s).", DAWN_INVALID_IF(
&textureCopy.origin, &copySize, texture, textureCopy.mipLevel, &mipSize); copySize.width % blockInfo.width != 0,
"copySize.width (%u) is not a multiple of compressed texture format block width "
// Validation for the texel block alignments: "(%u).",
const Format& format = textureCopy.texture->GetFormat(); copySize.width, blockInfo.width);
if (format.isCompressed) { DAWN_INVALID_IF(copySize.height % blockInfo.height != 0,
const TexelBlockInfo& blockInfo = format.GetAspectInfo(textureCopy.aspect).block; "copySize.height (%u) is not a multiple of compressed texture format block "
DAWN_INVALID_IF( "height (%u).",
textureCopy.origin.x % blockInfo.width != 0, copySize.height, blockInfo.height);
"Texture copy origin.x (%u) is not a multiple of compressed texture format block "
"width (%u).",
textureCopy.origin.x, blockInfo.width);
DAWN_INVALID_IF(
textureCopy.origin.y % blockInfo.height != 0,
"Texture copy origin.y (%u) is not a multiple of compressed texture format block "
"height (%u).",
textureCopy.origin.y, blockInfo.height);
DAWN_INVALID_IF(
copySize.width % blockInfo.width != 0,
"copySize.width (%u) is not a multiple of compressed texture format block width "
"(%u).",
copySize.width, blockInfo.width);
DAWN_INVALID_IF(
copySize.height % blockInfo.height != 0,
"copySize.height (%u) is not a multiple of compressed texture format block "
"height (%u).",
copySize.height, blockInfo.height);
}
return {};
} }
// Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar return {};
// formats). }
ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
const Format& format = view.texture->GetFormat(); // Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar
switch (view.aspect) { // formats).
case wgpu::TextureAspect::All: { ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
const Format& format = view.texture->GetFormat();
switch (view.aspect) {
case wgpu::TextureAspect::All: {
DAWN_INVALID_IF(
!HasOneBit(format.aspects),
"More than a single aspect (%s) is selected for multi-planar format (%s) in "
"%s <-> linear data copy.",
view.aspect, format.format, view.texture);
Aspect single = format.aspects;
return single;
}
case wgpu::TextureAspect::DepthOnly:
ASSERT(format.aspects & Aspect::Depth);
return Aspect::Depth;
case wgpu::TextureAspect::StencilOnly:
ASSERT(format.aspects & Aspect::Stencil);
return Aspect::Stencil;
case wgpu::TextureAspect::Plane0Only:
case wgpu::TextureAspect::Plane1Only:
break;
}
UNREACHABLE();
}
MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) {
Aspect aspectUsed;
DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst));
const Format& format = dst.texture->GetFormat();
switch (format.format) {
case wgpu::TextureFormat::Depth16Unorm:
return {};
default:
DAWN_INVALID_IF(aspectUsed == Aspect::Depth,
"Cannot copy into the depth aspect of %s with format %s.", dst.texture,
format.format);
break;
}
return {};
}
MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
const ImageCopyTexture& dst,
const Extent3D& copySize) {
const uint32_t srcSamples = src.texture->GetSampleCount();
const uint32_t dstSamples = dst.texture->GetSampleCount();
DAWN_INVALID_IF(
srcSamples != dstSamples,
"Source %s sample count (%u) and destination %s sample count (%u) does not match.",
src.texture, srcSamples, dst.texture, dstSamples);
// Metal cannot select a single aspect for texture-to-texture copies.
const Format& format = src.texture->GetFormat();
DAWN_INVALID_IF(
SelectFormatAspects(format, src.aspect) != format.aspects,
"Source %s aspect (%s) doesn't select all the aspects of the source format (%s).",
src.texture, src.aspect, format.format);
DAWN_INVALID_IF(
SelectFormatAspects(format, dst.aspect) != format.aspects,
"Destination %s aspect (%s) doesn't select all the aspects of the destination format "
"(%s).",
dst.texture, dst.aspect, format.format);
if (src.texture == dst.texture) {
switch (src.texture->GetDimension()) {
case wgpu::TextureDimension::e1D:
ASSERT(src.mipLevel == 0 && src.origin.z == 0 && dst.origin.z == 0);
return DAWN_FORMAT_VALIDATION_ERROR("Copy is from %s to itself.", src.texture);
case wgpu::TextureDimension::e2D:
DAWN_INVALID_IF( DAWN_INVALID_IF(
!HasOneBit(format.aspects), src.mipLevel == dst.mipLevel &&
"More than a single aspect (%s) is selected for multi-planar format (%s) in " IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depthOrArrayLayers),
"%s <-> linear data copy.", "Copy source and destination are overlapping layer ranges "
view.aspect, format.format, view.texture); "([%u, %u) and [%u, %u)) of %s mip level %u",
src.origin.z, src.origin.z + copySize.depthOrArrayLayers, dst.origin.z,
dst.origin.z + copySize.depthOrArrayLayers, src.texture, src.mipLevel);
break;
Aspect single = format.aspects; case wgpu::TextureDimension::e3D:
return single; DAWN_INVALID_IF(src.mipLevel == dst.mipLevel,
} "Copy is from %s mip level %u to itself.", src.texture,
case wgpu::TextureAspect::DepthOnly: src.mipLevel);
ASSERT(format.aspects & Aspect::Depth);
return Aspect::Depth;
case wgpu::TextureAspect::StencilOnly:
ASSERT(format.aspects & Aspect::Stencil);
return Aspect::Stencil;
case wgpu::TextureAspect::Plane0Only:
case wgpu::TextureAspect::Plane1Only:
break; break;
} }
UNREACHABLE();
} }
MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) { return {};
Aspect aspectUsed; }
DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst));
const Format& format = dst.texture->GetFormat(); MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
switch (format.format) { const ImageCopyTexture& dst,
case wgpu::TextureFormat::Depth16Unorm: const Extent3D& copySize) {
return {}; // Metal requires texture-to-texture copies happens between texture formats that equal to
default: // each other or only have diff on srgb-ness.
DAWN_INVALID_IF(aspectUsed == Aspect::Depth, DAWN_INVALID_IF(!src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
"Cannot copy into the depth aspect of %s with format %s.", "Source %s format (%s) and destination %s format (%s) are not copy compatible.",
dst.texture, format.format); src.texture, src.texture->GetFormat().format, dst.texture,
break; dst.texture->GetFormat().format);
}
return {}; return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
}
MaybeError ValidateCanUseAs(const TextureBase* texture,
wgpu::TextureUsage usage,
UsageValidationMode mode) {
ASSERT(wgpu::HasZeroOrOneBits(usage));
switch (mode) {
case UsageValidationMode::Default:
DAWN_INVALID_IF(!(texture->GetUsage() & usage), "%s usage (%s) doesn't include %s.",
texture, texture->GetUsage(), usage);
break;
case UsageValidationMode::Internal:
DAWN_INVALID_IF(!(texture->GetInternalUsage() & usage),
"%s internal usage (%s) doesn't include %s.", texture,
texture->GetInternalUsage(), usage);
break;
} }
MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src, return {};
const ImageCopyTexture& dst, }
const Extent3D& copySize) {
const uint32_t srcSamples = src.texture->GetSampleCount();
const uint32_t dstSamples = dst.texture->GetSampleCount();
DAWN_INVALID_IF( MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
srcSamples != dstSamples, ASSERT(wgpu::HasZeroOrOneBits(usage));
"Source %s sample count (%u) and destination %s sample count (%u) does not match.", DAWN_INVALID_IF(!(buffer->GetUsageExternalOnly() & usage), "%s usage (%s) doesn't include %s.",
src.texture, srcSamples, dst.texture, dstSamples); buffer, buffer->GetUsageExternalOnly(), usage);
return {};
// Metal cannot select a single aspect for texture-to-texture copies. }
const Format& format = src.texture->GetFormat();
DAWN_INVALID_IF(
SelectFormatAspects(format, src.aspect) != format.aspects,
"Source %s aspect (%s) doesn't select all the aspects of the source format (%s).",
src.texture, src.aspect, format.format);
DAWN_INVALID_IF(
SelectFormatAspects(format, dst.aspect) != format.aspects,
"Destination %s aspect (%s) doesn't select all the aspects of the destination format "
"(%s).",
dst.texture, dst.aspect, format.format);
if (src.texture == dst.texture) {
switch (src.texture->GetDimension()) {
case wgpu::TextureDimension::e1D:
ASSERT(src.mipLevel == 0 && src.origin.z == 0 && dst.origin.z == 0);
return DAWN_FORMAT_VALIDATION_ERROR("Copy is from %s to itself.", src.texture);
case wgpu::TextureDimension::e2D:
DAWN_INVALID_IF(src.mipLevel == dst.mipLevel &&
IsRangeOverlapped(src.origin.z, dst.origin.z,
copySize.depthOrArrayLayers),
"Copy source and destination are overlapping layer ranges "
"([%u, %u) and [%u, %u)) of %s mip level %u",
src.origin.z, src.origin.z + copySize.depthOrArrayLayers,
dst.origin.z, dst.origin.z + copySize.depthOrArrayLayers,
src.texture, src.mipLevel);
break;
case wgpu::TextureDimension::e3D:
DAWN_INVALID_IF(src.mipLevel == dst.mipLevel,
"Copy is from %s mip level %u to itself.", src.texture,
src.mipLevel);
break;
}
}
return {};
}
MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
const ImageCopyTexture& dst,
const Extent3D& copySize) {
// Metal requires texture-to-texture copies happens between texture formats that equal to
// each other or only have diff on srgb-ness.
DAWN_INVALID_IF(
!src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
"Source %s format (%s) and destination %s format (%s) are not copy compatible.",
src.texture, src.texture->GetFormat().format, dst.texture,
dst.texture->GetFormat().format);
return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
}
MaybeError ValidateCanUseAs(const TextureBase* texture,
wgpu::TextureUsage usage,
UsageValidationMode mode) {
ASSERT(wgpu::HasZeroOrOneBits(usage));
switch (mode) {
case UsageValidationMode::Default:
DAWN_INVALID_IF(!(texture->GetUsage() & usage), "%s usage (%s) doesn't include %s.",
texture, texture->GetUsage(), usage);
break;
case UsageValidationMode::Internal:
DAWN_INVALID_IF(!(texture->GetInternalUsage() & usage),
"%s internal usage (%s) doesn't include %s.", texture,
texture->GetInternalUsage(), usage);
break;
}
return {};
}
MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
ASSERT(wgpu::HasZeroOrOneBits(usage));
DAWN_INVALID_IF(!(buffer->GetUsageExternalOnly() & usage),
"%s usage (%s) doesn't include %s.", buffer, buffer->GetUsageExternalOnly(),
usage);
return {};
}
} // namespace dawn::native } // namespace dawn::native

View File

@ -23,74 +23,74 @@
namespace dawn::native { namespace dawn::native {
class QuerySetBase; class QuerySetBase;
struct SyncScopeResourceUsage; struct SyncScopeResourceUsage;
struct TexelBlockInfo; struct TexelBlockInfo;
MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& usage); MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& usage);
MaybeError ValidateTimestampQuery(const DeviceBase* device, MaybeError ValidateTimestampQuery(const DeviceBase* device,
const QuerySetBase* querySet, const QuerySetBase* querySet,
uint32_t queryIndex); uint32_t queryIndex);
MaybeError ValidateWriteBuffer(const DeviceBase* device, MaybeError ValidateWriteBuffer(const DeviceBase* device,
const BufferBase* buffer, const BufferBase* buffer,
uint64_t bufferOffset, uint64_t bufferOffset,
uint64_t size); uint64_t size);
template <typename A, typename B> template <typename A, typename B>
DAWN_FORCE_INLINE uint64_t Safe32x32(A a, B b) { DAWN_FORCE_INLINE uint64_t Safe32x32(A a, B b) {
static_assert(std::is_same<A, uint32_t>::value, "'a' must be uint32_t"); static_assert(std::is_same<A, uint32_t>::value, "'a' must be uint32_t");
static_assert(std::is_same<B, uint32_t>::value, "'b' must be uint32_t"); static_assert(std::is_same<B, uint32_t>::value, "'b' must be uint32_t");
return uint64_t(a) * uint64_t(b); return uint64_t(a) * uint64_t(b);
} }
ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo, ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
const Extent3D& copySize, const Extent3D& copySize,
uint32_t bytesPerRow, uint32_t bytesPerRow,
uint32_t rowsPerImage); uint32_t rowsPerImage);
void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout, void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
const TexelBlockInfo& blockInfo, const TexelBlockInfo& blockInfo,
const Extent3D& copyExtent); const Extent3D& copyExtent);
MaybeError ValidateLinearTextureData(const TextureDataLayout& layout, MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
uint64_t byteSize, uint64_t byteSize,
const TexelBlockInfo& blockInfo, const TexelBlockInfo& blockInfo,
const Extent3D& copyExtent); const Extent3D& copyExtent);
MaybeError ValidateTextureCopyRange(DeviceBase const* device, MaybeError ValidateTextureCopyRange(DeviceBase const* device,
const ImageCopyTexture& imageCopyTexture, const ImageCopyTexture& imageCopyTexture,
const Extent3D& copySize); const Extent3D& copySize);
ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view); ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view);
MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst); MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst);
MaybeError ValidateImageCopyBuffer(DeviceBase const* device, MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
const ImageCopyBuffer& imageCopyBuffer); const ImageCopyBuffer& imageCopyBuffer);
MaybeError ValidateImageCopyTexture(DeviceBase const* device, MaybeError ValidateImageCopyTexture(DeviceBase const* device,
const ImageCopyTexture& imageCopyTexture, const ImageCopyTexture& imageCopyTexture,
const Extent3D& copySize); const Extent3D& copySize);
MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer, MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
uint64_t offset, uint64_t offset,
uint64_t size); uint64_t size);
bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length); bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src, MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
const ImageCopyTexture& dst, const ImageCopyTexture& dst,
const Extent3D& copySize); const Extent3D& copySize);
MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src, MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
const ImageCopyTexture& dst, const ImageCopyTexture& dst,
const Extent3D& copySize); const Extent3D& copySize);
enum class UsageValidationMode { enum class UsageValidationMode {
Default, Default,
Internal, Internal,
}; };
MaybeError ValidateCanUseAs(const TextureBase* texture, MaybeError ValidateCanUseAs(const TextureBase* texture,
wgpu::TextureUsage usage, wgpu::TextureUsage usage,
UsageValidationMode mode); UsageValidationMode mode);
MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage); MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage);
} // namespace dawn::native } // namespace dawn::native

View File

@ -25,341 +25,340 @@
namespace dawn::native { namespace dawn::native {
void FreeCommands(CommandIterator* commands) { void FreeCommands(CommandIterator* commands) {
commands->Reset(); commands->Reset();
Command type; Command type;
while (commands->NextCommandId(&type)) { while (commands->NextCommandId(&type)) {
switch (type) {
case Command::BeginComputePass: {
BeginComputePassCmd* begin = commands->NextCommand<BeginComputePassCmd>();
begin->~BeginComputePassCmd();
break;
}
case Command::BeginOcclusionQuery: {
BeginOcclusionQueryCmd* begin = commands->NextCommand<BeginOcclusionQueryCmd>();
begin->~BeginOcclusionQueryCmd();
break;
}
case Command::BeginRenderPass: {
BeginRenderPassCmd* begin = commands->NextCommand<BeginRenderPassCmd>();
begin->~BeginRenderPassCmd();
break;
}
case Command::CopyBufferToBuffer: {
CopyBufferToBufferCmd* copy = commands->NextCommand<CopyBufferToBufferCmd>();
copy->~CopyBufferToBufferCmd();
break;
}
case Command::CopyBufferToTexture: {
CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
copy->~CopyBufferToTextureCmd();
break;
}
case Command::CopyTextureToBuffer: {
CopyTextureToBufferCmd* copy = commands->NextCommand<CopyTextureToBufferCmd>();
copy->~CopyTextureToBufferCmd();
break;
}
case Command::CopyTextureToTexture: {
CopyTextureToTextureCmd* copy =
commands->NextCommand<CopyTextureToTextureCmd>();
copy->~CopyTextureToTextureCmd();
break;
}
case Command::Dispatch: {
DispatchCmd* dispatch = commands->NextCommand<DispatchCmd>();
dispatch->~DispatchCmd();
break;
}
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = commands->NextCommand<DispatchIndirectCmd>();
dispatch->~DispatchIndirectCmd();
break;
}
case Command::Draw: {
DrawCmd* draw = commands->NextCommand<DrawCmd>();
draw->~DrawCmd();
break;
}
case Command::DrawIndexed: {
DrawIndexedCmd* draw = commands->NextCommand<DrawIndexedCmd>();
draw->~DrawIndexedCmd();
break;
}
case Command::DrawIndirect: {
DrawIndirectCmd* draw = commands->NextCommand<DrawIndirectCmd>();
draw->~DrawIndirectCmd();
break;
}
case Command::DrawIndexedIndirect: {
DrawIndexedIndirectCmd* draw = commands->NextCommand<DrawIndexedIndirectCmd>();
draw->~DrawIndexedIndirectCmd();
break;
}
case Command::EndComputePass: {
EndComputePassCmd* cmd = commands->NextCommand<EndComputePassCmd>();
cmd->~EndComputePassCmd();
break;
}
case Command::EndOcclusionQuery: {
EndOcclusionQueryCmd* cmd = commands->NextCommand<EndOcclusionQueryCmd>();
cmd->~EndOcclusionQueryCmd();
break;
}
case Command::EndRenderPass: {
EndRenderPassCmd* cmd = commands->NextCommand<EndRenderPassCmd>();
cmd->~EndRenderPassCmd();
break;
}
case Command::ExecuteBundles: {
ExecuteBundlesCmd* cmd = commands->NextCommand<ExecuteBundlesCmd>();
auto bundles = commands->NextData<Ref<RenderBundleBase>>(cmd->count);
for (size_t i = 0; i < cmd->count; ++i) {
(&bundles[i])->~Ref<RenderBundleBase>();
}
cmd->~ExecuteBundlesCmd();
break;
}
case Command::ClearBuffer: {
ClearBufferCmd* cmd = commands->NextCommand<ClearBufferCmd>();
cmd->~ClearBufferCmd();
break;
}
case Command::InsertDebugMarker: {
InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
commands->NextData<char>(cmd->length + 1);
cmd->~InsertDebugMarkerCmd();
break;
}
case Command::PopDebugGroup: {
PopDebugGroupCmd* cmd = commands->NextCommand<PopDebugGroupCmd>();
cmd->~PopDebugGroupCmd();
break;
}
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
commands->NextData<char>(cmd->length + 1);
cmd->~PushDebugGroupCmd();
break;
}
case Command::ResolveQuerySet: {
ResolveQuerySetCmd* cmd = commands->NextCommand<ResolveQuerySetCmd>();
cmd->~ResolveQuerySetCmd();
break;
}
case Command::SetComputePipeline: {
SetComputePipelineCmd* cmd = commands->NextCommand<SetComputePipelineCmd>();
cmd->~SetComputePipelineCmd();
break;
}
case Command::SetRenderPipeline: {
SetRenderPipelineCmd* cmd = commands->NextCommand<SetRenderPipelineCmd>();
cmd->~SetRenderPipelineCmd();
break;
}
case Command::SetStencilReference: {
SetStencilReferenceCmd* cmd = commands->NextCommand<SetStencilReferenceCmd>();
cmd->~SetStencilReferenceCmd();
break;
}
case Command::SetViewport: {
SetViewportCmd* cmd = commands->NextCommand<SetViewportCmd>();
cmd->~SetViewportCmd();
break;
}
case Command::SetScissorRect: {
SetScissorRectCmd* cmd = commands->NextCommand<SetScissorRectCmd>();
cmd->~SetScissorRectCmd();
break;
}
case Command::SetBlendConstant: {
SetBlendConstantCmd* cmd = commands->NextCommand<SetBlendConstantCmd>();
cmd->~SetBlendConstantCmd();
break;
}
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
if (cmd->dynamicOffsetCount > 0) {
commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
cmd->~SetBindGroupCmd();
break;
}
case Command::SetIndexBuffer: {
SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
cmd->~SetIndexBufferCmd();
break;
}
case Command::SetVertexBuffer: {
SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
cmd->~SetVertexBufferCmd();
break;
}
case Command::WriteBuffer: {
WriteBufferCmd* write = commands->NextCommand<WriteBufferCmd>();
commands->NextData<uint8_t>(write->size);
write->~WriteBufferCmd();
break;
}
case Command::WriteTimestamp: {
WriteTimestampCmd* cmd = commands->NextCommand<WriteTimestampCmd>();
cmd->~WriteTimestampCmd();
break;
}
}
}
commands->MakeEmptyAsDataWasDestroyed();
}
void SkipCommand(CommandIterator* commands, Command type) {
switch (type) { switch (type) {
case Command::BeginComputePass: case Command::BeginComputePass: {
commands->NextCommand<BeginComputePassCmd>(); BeginComputePassCmd* begin = commands->NextCommand<BeginComputePassCmd>();
break; begin->~BeginComputePassCmd();
case Command::BeginOcclusionQuery:
commands->NextCommand<BeginOcclusionQueryCmd>();
break;
case Command::BeginRenderPass:
commands->NextCommand<BeginRenderPassCmd>();
break;
case Command::CopyBufferToBuffer:
commands->NextCommand<CopyBufferToBufferCmd>();
break;
case Command::CopyBufferToTexture:
commands->NextCommand<CopyBufferToTextureCmd>();
break;
case Command::CopyTextureToBuffer:
commands->NextCommand<CopyTextureToBufferCmd>();
break;
case Command::CopyTextureToTexture:
commands->NextCommand<CopyTextureToTextureCmd>();
break;
case Command::Dispatch:
commands->NextCommand<DispatchCmd>();
break;
case Command::DispatchIndirect:
commands->NextCommand<DispatchIndirectCmd>();
break;
case Command::Draw:
commands->NextCommand<DrawCmd>();
break;
case Command::DrawIndexed:
commands->NextCommand<DrawIndexedCmd>();
break;
case Command::DrawIndirect:
commands->NextCommand<DrawIndirectCmd>();
break;
case Command::DrawIndexedIndirect:
commands->NextCommand<DrawIndexedIndirectCmd>();
break;
case Command::EndComputePass:
commands->NextCommand<EndComputePassCmd>();
break;
case Command::EndOcclusionQuery:
commands->NextCommand<EndOcclusionQueryCmd>();
break;
case Command::EndRenderPass:
commands->NextCommand<EndRenderPassCmd>();
break;
case Command::ExecuteBundles: {
auto* cmd = commands->NextCommand<ExecuteBundlesCmd>();
commands->NextData<Ref<RenderBundleBase>>(cmd->count);
break; break;
} }
case Command::BeginOcclusionQuery: {
case Command::ClearBuffer: BeginOcclusionQueryCmd* begin = commands->NextCommand<BeginOcclusionQueryCmd>();
commands->NextCommand<ClearBufferCmd>(); begin->~BeginOcclusionQueryCmd();
break; break;
}
case Command::BeginRenderPass: {
BeginRenderPassCmd* begin = commands->NextCommand<BeginRenderPassCmd>();
begin->~BeginRenderPassCmd();
break;
}
case Command::CopyBufferToBuffer: {
CopyBufferToBufferCmd* copy = commands->NextCommand<CopyBufferToBufferCmd>();
copy->~CopyBufferToBufferCmd();
break;
}
case Command::CopyBufferToTexture: {
CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
copy->~CopyBufferToTextureCmd();
break;
}
case Command::CopyTextureToBuffer: {
CopyTextureToBufferCmd* copy = commands->NextCommand<CopyTextureToBufferCmd>();
copy->~CopyTextureToBufferCmd();
break;
}
case Command::CopyTextureToTexture: {
CopyTextureToTextureCmd* copy = commands->NextCommand<CopyTextureToTextureCmd>();
copy->~CopyTextureToTextureCmd();
break;
}
case Command::Dispatch: {
DispatchCmd* dispatch = commands->NextCommand<DispatchCmd>();
dispatch->~DispatchCmd();
break;
}
case Command::DispatchIndirect: {
DispatchIndirectCmd* dispatch = commands->NextCommand<DispatchIndirectCmd>();
dispatch->~DispatchIndirectCmd();
break;
}
case Command::Draw: {
DrawCmd* draw = commands->NextCommand<DrawCmd>();
draw->~DrawCmd();
break;
}
case Command::DrawIndexed: {
DrawIndexedCmd* draw = commands->NextCommand<DrawIndexedCmd>();
draw->~DrawIndexedCmd();
break;
}
case Command::DrawIndirect: {
DrawIndirectCmd* draw = commands->NextCommand<DrawIndirectCmd>();
draw->~DrawIndirectCmd();
break;
}
case Command::DrawIndexedIndirect: {
DrawIndexedIndirectCmd* draw = commands->NextCommand<DrawIndexedIndirectCmd>();
draw->~DrawIndexedIndirectCmd();
break;
}
case Command::EndComputePass: {
EndComputePassCmd* cmd = commands->NextCommand<EndComputePassCmd>();
cmd->~EndComputePassCmd();
break;
}
case Command::EndOcclusionQuery: {
EndOcclusionQueryCmd* cmd = commands->NextCommand<EndOcclusionQueryCmd>();
cmd->~EndOcclusionQueryCmd();
break;
}
case Command::EndRenderPass: {
EndRenderPassCmd* cmd = commands->NextCommand<EndRenderPassCmd>();
cmd->~EndRenderPassCmd();
break;
}
case Command::ExecuteBundles: {
ExecuteBundlesCmd* cmd = commands->NextCommand<ExecuteBundlesCmd>();
auto bundles = commands->NextData<Ref<RenderBundleBase>>(cmd->count);
for (size_t i = 0; i < cmd->count; ++i) {
(&bundles[i])->~Ref<RenderBundleBase>();
}
cmd->~ExecuteBundlesCmd();
break;
}
case Command::ClearBuffer: {
ClearBufferCmd* cmd = commands->NextCommand<ClearBufferCmd>();
cmd->~ClearBufferCmd();
break;
}
case Command::InsertDebugMarker: { case Command::InsertDebugMarker: {
InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>(); InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
commands->NextData<char>(cmd->length + 1); commands->NextData<char>(cmd->length + 1);
cmd->~InsertDebugMarkerCmd();
break; break;
} }
case Command::PopDebugGroup: {
case Command::PopDebugGroup: PopDebugGroupCmd* cmd = commands->NextCommand<PopDebugGroupCmd>();
commands->NextCommand<PopDebugGroupCmd>(); cmd->~PopDebugGroupCmd();
break; break;
}
case Command::PushDebugGroup: { case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>(); PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
commands->NextData<char>(cmd->length + 1); commands->NextData<char>(cmd->length + 1);
cmd->~PushDebugGroupCmd();
break; break;
} }
case Command::ResolveQuerySet: { case Command::ResolveQuerySet: {
commands->NextCommand<ResolveQuerySetCmd>(); ResolveQuerySetCmd* cmd = commands->NextCommand<ResolveQuerySetCmd>();
cmd->~ResolveQuerySetCmd();
break; break;
} }
case Command::SetComputePipeline: {
case Command::SetComputePipeline: SetComputePipelineCmd* cmd = commands->NextCommand<SetComputePipelineCmd>();
commands->NextCommand<SetComputePipelineCmd>(); cmd->~SetComputePipelineCmd();
break; break;
}
case Command::SetRenderPipeline: case Command::SetRenderPipeline: {
commands->NextCommand<SetRenderPipelineCmd>(); SetRenderPipelineCmd* cmd = commands->NextCommand<SetRenderPipelineCmd>();
cmd->~SetRenderPipelineCmd();
break; break;
}
case Command::SetStencilReference: case Command::SetStencilReference: {
commands->NextCommand<SetStencilReferenceCmd>(); SetStencilReferenceCmd* cmd = commands->NextCommand<SetStencilReferenceCmd>();
cmd->~SetStencilReferenceCmd();
break; break;
}
case Command::SetViewport: case Command::SetViewport: {
commands->NextCommand<SetViewportCmd>(); SetViewportCmd* cmd = commands->NextCommand<SetViewportCmd>();
cmd->~SetViewportCmd();
break; break;
}
case Command::SetScissorRect: case Command::SetScissorRect: {
commands->NextCommand<SetScissorRectCmd>(); SetScissorRectCmd* cmd = commands->NextCommand<SetScissorRectCmd>();
cmd->~SetScissorRectCmd();
break; break;
}
case Command::SetBlendConstant: case Command::SetBlendConstant: {
commands->NextCommand<SetBlendConstantCmd>(); SetBlendConstantCmd* cmd = commands->NextCommand<SetBlendConstantCmd>();
cmd->~SetBlendConstantCmd();
break; break;
}
case Command::SetBindGroup: { case Command::SetBindGroup: {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>(); SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
if (cmd->dynamicOffsetCount > 0) { if (cmd->dynamicOffsetCount > 0) {
commands->NextData<uint32_t>(cmd->dynamicOffsetCount); commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
} }
cmd->~SetBindGroupCmd();
break; break;
} }
case Command::SetIndexBuffer: {
case Command::SetIndexBuffer: SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
commands->NextCommand<SetIndexBufferCmd>(); cmd->~SetIndexBufferCmd();
break; break;
}
case Command::SetVertexBuffer: { case Command::SetVertexBuffer: {
commands->NextCommand<SetVertexBufferCmd>(); SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
cmd->~SetVertexBufferCmd();
break; break;
} }
case Command::WriteBuffer: {
case Command::WriteBuffer: WriteBufferCmd* write = commands->NextCommand<WriteBufferCmd>();
commands->NextCommand<WriteBufferCmd>(); commands->NextData<uint8_t>(write->size);
write->~WriteBufferCmd();
break; break;
}
case Command::WriteTimestamp: { case Command::WriteTimestamp: {
commands->NextCommand<WriteTimestampCmd>(); WriteTimestampCmd* cmd = commands->NextCommand<WriteTimestampCmd>();
cmd->~WriteTimestampCmd();
break; break;
} }
} }
} }
commands->MakeEmptyAsDataWasDestroyed();
}
void SkipCommand(CommandIterator* commands, Command type) {
switch (type) {
case Command::BeginComputePass:
commands->NextCommand<BeginComputePassCmd>();
break;
case Command::BeginOcclusionQuery:
commands->NextCommand<BeginOcclusionQueryCmd>();
break;
case Command::BeginRenderPass:
commands->NextCommand<BeginRenderPassCmd>();
break;
case Command::CopyBufferToBuffer:
commands->NextCommand<CopyBufferToBufferCmd>();
break;
case Command::CopyBufferToTexture:
commands->NextCommand<CopyBufferToTextureCmd>();
break;
case Command::CopyTextureToBuffer:
commands->NextCommand<CopyTextureToBufferCmd>();
break;
case Command::CopyTextureToTexture:
commands->NextCommand<CopyTextureToTextureCmd>();
break;
case Command::Dispatch:
commands->NextCommand<DispatchCmd>();
break;
case Command::DispatchIndirect:
commands->NextCommand<DispatchIndirectCmd>();
break;
case Command::Draw:
commands->NextCommand<DrawCmd>();
break;
case Command::DrawIndexed:
commands->NextCommand<DrawIndexedCmd>();
break;
case Command::DrawIndirect:
commands->NextCommand<DrawIndirectCmd>();
break;
case Command::DrawIndexedIndirect:
commands->NextCommand<DrawIndexedIndirectCmd>();
break;
case Command::EndComputePass:
commands->NextCommand<EndComputePassCmd>();
break;
case Command::EndOcclusionQuery:
commands->NextCommand<EndOcclusionQueryCmd>();
break;
case Command::EndRenderPass:
commands->NextCommand<EndRenderPassCmd>();
break;
case Command::ExecuteBundles: {
auto* cmd = commands->NextCommand<ExecuteBundlesCmd>();
commands->NextData<Ref<RenderBundleBase>>(cmd->count);
break;
}
case Command::ClearBuffer:
commands->NextCommand<ClearBufferCmd>();
break;
case Command::InsertDebugMarker: {
InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
commands->NextData<char>(cmd->length + 1);
break;
}
case Command::PopDebugGroup:
commands->NextCommand<PopDebugGroupCmd>();
break;
case Command::PushDebugGroup: {
PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
commands->NextData<char>(cmd->length + 1);
break;
}
case Command::ResolveQuerySet: {
commands->NextCommand<ResolveQuerySetCmd>();
break;
}
case Command::SetComputePipeline:
commands->NextCommand<SetComputePipelineCmd>();
break;
case Command::SetRenderPipeline:
commands->NextCommand<SetRenderPipelineCmd>();
break;
case Command::SetStencilReference:
commands->NextCommand<SetStencilReferenceCmd>();
break;
case Command::SetViewport:
commands->NextCommand<SetViewportCmd>();
break;
case Command::SetScissorRect:
commands->NextCommand<SetScissorRectCmd>();
break;
case Command::SetBlendConstant:
commands->NextCommand<SetBlendConstantCmd>();
break;
case Command::SetBindGroup: {
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
if (cmd->dynamicOffsetCount > 0) {
commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
}
break;
}
case Command::SetIndexBuffer:
commands->NextCommand<SetIndexBufferCmd>();
break;
case Command::SetVertexBuffer: {
commands->NextCommand<SetVertexBufferCmd>();
break;
}
case Command::WriteBuffer:
commands->NextCommand<WriteBufferCmd>();
break;
case Command::WriteTimestamp: {
commands->NextCommand<WriteTimestampCmd>();
break;
}
}
}
} // namespace dawn::native } // namespace dawn::native

View File

@ -29,271 +29,271 @@
namespace dawn::native { namespace dawn::native {
// Definition of the commands that are present in the CommandIterator given by the // Definition of the commands that are present in the CommandIterator given by the
// CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header // CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header
// dependencies: Ref<Object> needs Object to be defined. // dependencies: Ref<Object> needs Object to be defined.
enum class Command { enum class Command {
BeginComputePass, BeginComputePass,
BeginOcclusionQuery, BeginOcclusionQuery,
BeginRenderPass, BeginRenderPass,
ClearBuffer, ClearBuffer,
CopyBufferToBuffer, CopyBufferToBuffer,
CopyBufferToTexture, CopyBufferToTexture,
CopyTextureToBuffer, CopyTextureToBuffer,
CopyTextureToTexture, CopyTextureToTexture,
Dispatch, Dispatch,
DispatchIndirect, DispatchIndirect,
Draw, Draw,
DrawIndexed, DrawIndexed,
DrawIndirect, DrawIndirect,
DrawIndexedIndirect, DrawIndexedIndirect,
EndComputePass, EndComputePass,
EndOcclusionQuery, EndOcclusionQuery,
EndRenderPass, EndRenderPass,
ExecuteBundles, ExecuteBundles,
InsertDebugMarker, InsertDebugMarker,
PopDebugGroup, PopDebugGroup,
PushDebugGroup, PushDebugGroup,
ResolveQuerySet, ResolveQuerySet,
SetComputePipeline, SetComputePipeline,
SetRenderPipeline, SetRenderPipeline,
SetStencilReference, SetStencilReference,
SetViewport, SetViewport,
SetScissorRect, SetScissorRect,
SetBlendConstant, SetBlendConstant,
SetBindGroup, SetBindGroup,
SetIndexBuffer, SetIndexBuffer,
SetVertexBuffer, SetVertexBuffer,
WriteBuffer, WriteBuffer,
WriteTimestamp, WriteTimestamp,
}; };
struct TimestampWrite { struct TimestampWrite {
Ref<QuerySetBase> querySet; Ref<QuerySetBase> querySet;
uint32_t queryIndex; uint32_t queryIndex;
}; };
struct BeginComputePassCmd { struct BeginComputePassCmd {
std::vector<TimestampWrite> timestampWrites; std::vector<TimestampWrite> timestampWrites;
}; };
struct BeginOcclusionQueryCmd { struct BeginOcclusionQueryCmd {
Ref<QuerySetBase> querySet; Ref<QuerySetBase> querySet;
uint32_t queryIndex; uint32_t queryIndex;
}; };
struct RenderPassColorAttachmentInfo { struct RenderPassColorAttachmentInfo {
Ref<TextureViewBase> view; Ref<TextureViewBase> view;
Ref<TextureViewBase> resolveTarget; Ref<TextureViewBase> resolveTarget;
wgpu::LoadOp loadOp; wgpu::LoadOp loadOp;
wgpu::StoreOp storeOp; wgpu::StoreOp storeOp;
dawn::native::Color clearColor; dawn::native::Color clearColor;
}; };
struct RenderPassDepthStencilAttachmentInfo { struct RenderPassDepthStencilAttachmentInfo {
Ref<TextureViewBase> view; Ref<TextureViewBase> view;
wgpu::LoadOp depthLoadOp; wgpu::LoadOp depthLoadOp;
wgpu::StoreOp depthStoreOp; wgpu::StoreOp depthStoreOp;
wgpu::LoadOp stencilLoadOp; wgpu::LoadOp stencilLoadOp;
wgpu::StoreOp stencilStoreOp; wgpu::StoreOp stencilStoreOp;
float clearDepth; float clearDepth;
uint32_t clearStencil; uint32_t clearStencil;
bool depthReadOnly; bool depthReadOnly;
bool stencilReadOnly; bool stencilReadOnly;
}; };
struct BeginRenderPassCmd { struct BeginRenderPassCmd {
Ref<AttachmentState> attachmentState; Ref<AttachmentState> attachmentState;
ityp::array<ColorAttachmentIndex, RenderPassColorAttachmentInfo, kMaxColorAttachments> ityp::array<ColorAttachmentIndex, RenderPassColorAttachmentInfo, kMaxColorAttachments>
colorAttachments; colorAttachments;
RenderPassDepthStencilAttachmentInfo depthStencilAttachment; RenderPassDepthStencilAttachmentInfo depthStencilAttachment;
// Cache the width and height of all attachments for convenience // Cache the width and height of all attachments for convenience
uint32_t width; uint32_t width;
uint32_t height; uint32_t height;
Ref<QuerySetBase> occlusionQuerySet; Ref<QuerySetBase> occlusionQuerySet;
std::vector<TimestampWrite> timestampWrites; std::vector<TimestampWrite> timestampWrites;
}; };
struct BufferCopy { struct BufferCopy {
Ref<BufferBase> buffer; Ref<BufferBase> buffer;
uint64_t offset; uint64_t offset;
uint32_t bytesPerRow; uint32_t bytesPerRow;
uint32_t rowsPerImage; uint32_t rowsPerImage;
}; };
struct TextureCopy { struct TextureCopy {
Ref<TextureBase> texture; Ref<TextureBase> texture;
uint32_t mipLevel; uint32_t mipLevel;
Origin3D origin; // Texels / array layer Origin3D origin; // Texels / array layer
Aspect aspect; Aspect aspect;
}; };
struct CopyBufferToBufferCmd { struct CopyBufferToBufferCmd {
Ref<BufferBase> source; Ref<BufferBase> source;
uint64_t sourceOffset; uint64_t sourceOffset;
Ref<BufferBase> destination; Ref<BufferBase> destination;
uint64_t destinationOffset; uint64_t destinationOffset;
uint64_t size; uint64_t size;
}; };
struct CopyBufferToTextureCmd { struct CopyBufferToTextureCmd {
BufferCopy source; BufferCopy source;
TextureCopy destination; TextureCopy destination;
Extent3D copySize; // Texels Extent3D copySize; // Texels
}; };
struct CopyTextureToBufferCmd { struct CopyTextureToBufferCmd {
TextureCopy source; TextureCopy source;
BufferCopy destination; BufferCopy destination;
Extent3D copySize; // Texels Extent3D copySize; // Texels
}; };
struct CopyTextureToTextureCmd { struct CopyTextureToTextureCmd {
TextureCopy source; TextureCopy source;
TextureCopy destination; TextureCopy destination;
Extent3D copySize; // Texels Extent3D copySize; // Texels
}; };
struct DispatchCmd { struct DispatchCmd {
uint32_t x; uint32_t x;
uint32_t y; uint32_t y;
uint32_t z; uint32_t z;
}; };
struct DispatchIndirectCmd { struct DispatchIndirectCmd {
Ref<BufferBase> indirectBuffer; Ref<BufferBase> indirectBuffer;
uint64_t indirectOffset; uint64_t indirectOffset;
}; };
struct DrawCmd { struct DrawCmd {
uint32_t vertexCount; uint32_t vertexCount;
uint32_t instanceCount; uint32_t instanceCount;
uint32_t firstVertex; uint32_t firstVertex;
uint32_t firstInstance; uint32_t firstInstance;
}; };
struct DrawIndexedCmd { struct DrawIndexedCmd {
uint32_t indexCount; uint32_t indexCount;
uint32_t instanceCount; uint32_t instanceCount;
uint32_t firstIndex; uint32_t firstIndex;
int32_t baseVertex; int32_t baseVertex;
uint32_t firstInstance; uint32_t firstInstance;
}; };
struct DrawIndirectCmd { struct DrawIndirectCmd {
Ref<BufferBase> indirectBuffer; Ref<BufferBase> indirectBuffer;
uint64_t indirectOffset; uint64_t indirectOffset;
}; };
struct DrawIndexedIndirectCmd : DrawIndirectCmd {}; struct DrawIndexedIndirectCmd : DrawIndirectCmd {};
struct EndComputePassCmd { struct EndComputePassCmd {
std::vector<TimestampWrite> timestampWrites; std::vector<TimestampWrite> timestampWrites;
}; };
struct EndOcclusionQueryCmd { struct EndOcclusionQueryCmd {
Ref<QuerySetBase> querySet; Ref<QuerySetBase> querySet;
uint32_t queryIndex; uint32_t queryIndex;
}; };
struct EndRenderPassCmd { struct EndRenderPassCmd {
std::vector<TimestampWrite> timestampWrites; std::vector<TimestampWrite> timestampWrites;
}; };
struct ExecuteBundlesCmd { struct ExecuteBundlesCmd {
uint32_t count; uint32_t count;
}; };
struct ClearBufferCmd { struct ClearBufferCmd {
Ref<BufferBase> buffer; Ref<BufferBase> buffer;
uint64_t offset; uint64_t offset;
uint64_t size; uint64_t size;
}; };
struct InsertDebugMarkerCmd { struct InsertDebugMarkerCmd {
uint32_t length; uint32_t length;
}; };
struct PopDebugGroupCmd {}; struct PopDebugGroupCmd {};
struct PushDebugGroupCmd { struct PushDebugGroupCmd {
uint32_t length; uint32_t length;
}; };
struct ResolveQuerySetCmd { struct ResolveQuerySetCmd {
Ref<QuerySetBase> querySet; Ref<QuerySetBase> querySet;
uint32_t firstQuery; uint32_t firstQuery;
uint32_t queryCount; uint32_t queryCount;
Ref<BufferBase> destination; Ref<BufferBase> destination;
uint64_t destinationOffset; uint64_t destinationOffset;
}; };
struct SetComputePipelineCmd { struct SetComputePipelineCmd {
Ref<ComputePipelineBase> pipeline; Ref<ComputePipelineBase> pipeline;
}; };
struct SetRenderPipelineCmd { struct SetRenderPipelineCmd {
Ref<RenderPipelineBase> pipeline; Ref<RenderPipelineBase> pipeline;
}; };
struct SetStencilReferenceCmd { struct SetStencilReferenceCmd {
uint32_t reference; uint32_t reference;
}; };
struct SetViewportCmd { struct SetViewportCmd {
float x, y, width, height, minDepth, maxDepth; float x, y, width, height, minDepth, maxDepth;
}; };
struct SetScissorRectCmd { struct SetScissorRectCmd {
uint32_t x, y, width, height; uint32_t x, y, width, height;
}; };
struct SetBlendConstantCmd { struct SetBlendConstantCmd {
Color color; Color color;
}; };
struct SetBindGroupCmd { struct SetBindGroupCmd {
BindGroupIndex index; BindGroupIndex index;
Ref<BindGroupBase> group; Ref<BindGroupBase> group;
uint32_t dynamicOffsetCount; uint32_t dynamicOffsetCount;
}; };
struct SetIndexBufferCmd { struct SetIndexBufferCmd {
Ref<BufferBase> buffer; Ref<BufferBase> buffer;
wgpu::IndexFormat format; wgpu::IndexFormat format;
uint64_t offset; uint64_t offset;
uint64_t size; uint64_t size;
}; };
struct SetVertexBufferCmd { struct SetVertexBufferCmd {
VertexBufferSlot slot; VertexBufferSlot slot;
Ref<BufferBase> buffer; Ref<BufferBase> buffer;
uint64_t offset; uint64_t offset;
uint64_t size; uint64_t size;
}; };
struct WriteBufferCmd { struct WriteBufferCmd {
Ref<BufferBase> buffer; Ref<BufferBase> buffer;
uint64_t offset; uint64_t offset;
uint64_t size; uint64_t size;
}; };
struct WriteTimestampCmd { struct WriteTimestampCmd {
Ref<QuerySetBase> querySet; Ref<QuerySetBase> querySet;
uint32_t queryIndex; uint32_t queryIndex;
}; };
// This needs to be called before the CommandIterator is freed so that the Ref<> present in // This needs to be called before the CommandIterator is freed so that the Ref<> present in
// the commands have a chance to run their destructor and remove internal references. // the commands have a chance to run their destructor and remove internal references.
class CommandIterator; class CommandIterator;
void FreeCommands(CommandIterator* commands); void FreeCommands(CommandIterator* commands);
// Helper function to allow skipping over a command when it is unimplemented, while still // Helper function to allow skipping over a command when it is unimplemented, while still
// consuming the correct amount of data from the command iterator. // consuming the correct amount of data from the command iterator.
void SkipCommand(CommandIterator* commands, Command type); void SkipCommand(CommandIterator* commands, Command type);
} // namespace dawn::native } // namespace dawn::native

View File

@ -21,181 +21,181 @@
namespace dawn::native { namespace dawn::native {
namespace { namespace {
WGPUCompilationMessageType tintSeverityToMessageType(tint::diag::Severity severity) { WGPUCompilationMessageType tintSeverityToMessageType(tint::diag::Severity severity) {
switch (severity) { switch (severity) {
case tint::diag::Severity::Note: case tint::diag::Severity::Note:
return WGPUCompilationMessageType_Info; return WGPUCompilationMessageType_Info;
case tint::diag::Severity::Warning: case tint::diag::Severity::Warning:
return WGPUCompilationMessageType_Warning; return WGPUCompilationMessageType_Warning;
default: default:
return WGPUCompilationMessageType_Error; return WGPUCompilationMessageType_Error;
}
}
} // anonymous namespace
OwnedCompilationMessages::OwnedCompilationMessages() {
mCompilationInfo.nextInChain = 0;
mCompilationInfo.messageCount = 0;
mCompilationInfo.messages = nullptr;
}
void OwnedCompilationMessages::AddMessageForTesting(std::string message,
wgpu::CompilationMessageType type,
uint64_t lineNum,
uint64_t linePos,
uint64_t offset,
uint64_t length) {
// Cannot add messages after GetCompilationInfo has been called.
ASSERT(mCompilationInfo.messages == nullptr);
mMessageStrings.push_back(message);
mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type), lineNum,
linePos, offset, length});
}
void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
// Cannot add messages after GetCompilationInfo has been called.
ASSERT(mCompilationInfo.messages == nullptr);
// Tint line and column values are 1-based.
uint64_t lineNum = diagnostic.source.range.begin.line;
uint64_t linePos = diagnostic.source.range.begin.column;
// The offset is 0-based.
uint64_t offset = 0;
uint64_t length = 0;
if (lineNum && linePos && diagnostic.source.file) {
const auto& lines = diagnostic.source.file->content.lines;
size_t i = 0;
// To find the offset of the message position, loop through each of the first lineNum-1
// lines and add it's length (+1 to account for the line break) to the offset.
for (; i < lineNum - 1; ++i) {
offset += lines[i].length() + 1;
}
// If the end line is on a different line from the beginning line, add the length of the
// lines in between to the ending offset.
uint64_t endLineNum = diagnostic.source.range.end.line;
uint64_t endLinePos = diagnostic.source.range.end.column;
// If the range has a valid start but the end it not specified, clamp it to the start.
if (endLineNum == 0 || endLinePos == 0) {
endLineNum = lineNum;
endLinePos = linePos;
}
// Negative ranges aren't allowed
ASSERT(endLineNum >= lineNum);
uint64_t endOffset = offset;
for (; i < endLineNum - 1; ++i) {
endOffset += lines[i].length() + 1;
}
// Add the line positions to the offset and endOffset to get their final positions
// within the code string.
offset += linePos - 1;
endOffset += endLinePos - 1;
// Negative ranges aren't allowed
ASSERT(endOffset >= offset);
// The length of the message is the difference between the starting offset and the
// ending offset.
length = endOffset - offset;
}
if (diagnostic.code) {
mMessageStrings.push_back(std::string(diagnostic.code) + ": " + diagnostic.message);
} else {
mMessageStrings.push_back(diagnostic.message);
}
mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity), lineNum,
linePos, offset, length});
}
void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
// Cannot add messages after GetCompilationInfo has been called.
ASSERT(mCompilationInfo.messages == nullptr);
for (const auto& diag : diagnostics) {
AddMessage(diag);
}
AddFormattedTintMessages(diagnostics);
}
void OwnedCompilationMessages::ClearMessages() {
// Cannot clear messages after GetCompilationInfo has been called.
ASSERT(mCompilationInfo.messages == nullptr);
mMessageStrings.clear();
mMessages.clear();
}
const WGPUCompilationInfo* OwnedCompilationMessages::GetCompilationInfo() {
mCompilationInfo.messageCount = mMessages.size();
mCompilationInfo.messages = mMessages.data();
// Ensure every message points at the correct message string. Cannot do this earlier, since
// vector reallocations may move the pointers around.
for (size_t i = 0; i < mCompilationInfo.messageCount; ++i) {
WGPUCompilationMessage& message = mMessages[i];
std::string& messageString = mMessageStrings[i];
message.message = messageString.c_str();
}
return &mCompilationInfo;
}
const std::vector<std::string>& OwnedCompilationMessages::GetFormattedTintMessages() {
return mFormattedTintMessages;
}
void OwnedCompilationMessages::AddFormattedTintMessages(const tint::diag::List& diagnostics) {
tint::diag::List messageList;
size_t warningCount = 0;
size_t errorCount = 0;
for (auto& diag : diagnostics) {
switch (diag.severity) {
case (tint::diag::Severity::Fatal):
case (tint::diag::Severity::Error):
case (tint::diag::Severity::InternalCompilerError): {
errorCount++;
messageList.add(tint::diag::Diagnostic(diag));
break;
} }
} case (tint::diag::Severity::Warning): {
warningCount++;
} // anonymous namespace messageList.add(tint::diag::Diagnostic(diag));
break;
OwnedCompilationMessages::OwnedCompilationMessages() {
mCompilationInfo.nextInChain = 0;
mCompilationInfo.messageCount = 0;
mCompilationInfo.messages = nullptr;
}
void OwnedCompilationMessages::AddMessageForTesting(std::string message,
wgpu::CompilationMessageType type,
uint64_t lineNum,
uint64_t linePos,
uint64_t offset,
uint64_t length) {
// Cannot add messages after GetCompilationInfo has been called.
ASSERT(mCompilationInfo.messages == nullptr);
mMessageStrings.push_back(message);
mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type),
lineNum, linePos, offset, length});
}
void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
// Cannot add messages after GetCompilationInfo has been called.
ASSERT(mCompilationInfo.messages == nullptr);
// Tint line and column values are 1-based.
uint64_t lineNum = diagnostic.source.range.begin.line;
uint64_t linePos = diagnostic.source.range.begin.column;
// The offset is 0-based.
uint64_t offset = 0;
uint64_t length = 0;
if (lineNum && linePos && diagnostic.source.file) {
const auto& lines = diagnostic.source.file->content.lines;
size_t i = 0;
// To find the offset of the message position, loop through each of the first lineNum-1
// lines and add it's length (+1 to account for the line break) to the offset.
for (; i < lineNum - 1; ++i) {
offset += lines[i].length() + 1;
} }
default:
// If the end line is on a different line from the beginning line, add the length of the break;
// lines in between to the ending offset.
uint64_t endLineNum = diagnostic.source.range.end.line;
uint64_t endLinePos = diagnostic.source.range.end.column;
// If the range has a valid start but the end it not specified, clamp it to the start.
if (endLineNum == 0 || endLinePos == 0) {
endLineNum = lineNum;
endLinePos = linePos;
}
// Negative ranges aren't allowed
ASSERT(endLineNum >= lineNum);
uint64_t endOffset = offset;
for (; i < endLineNum - 1; ++i) {
endOffset += lines[i].length() + 1;
}
// Add the line positions to the offset and endOffset to get their final positions
// within the code string.
offset += linePos - 1;
endOffset += endLinePos - 1;
// Negative ranges aren't allowed
ASSERT(endOffset >= offset);
// The length of the message is the difference between the starting offset and the
// ending offset.
length = endOffset - offset;
} }
if (diagnostic.code) {
mMessageStrings.push_back(std::string(diagnostic.code) + ": " + diagnostic.message);
} else {
mMessageStrings.push_back(diagnostic.message);
}
mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity),
lineNum, linePos, offset, length});
} }
if (errorCount == 0 && warningCount == 0) {
void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) { return;
// Cannot add messages after GetCompilationInfo has been called.
ASSERT(mCompilationInfo.messages == nullptr);
for (const auto& diag : diagnostics) {
AddMessage(diag);
}
AddFormattedTintMessages(diagnostics);
} }
tint::diag::Formatter::Style style;
void OwnedCompilationMessages::ClearMessages() { style.print_newline_at_end = false;
// Cannot clear messages after GetCompilationInfo has been called. std::ostringstream t;
ASSERT(mCompilationInfo.messages == nullptr); if (errorCount > 0) {
t << errorCount << " error(s) ";
mMessageStrings.clear();
mMessages.clear();
}
const WGPUCompilationInfo* OwnedCompilationMessages::GetCompilationInfo() {
mCompilationInfo.messageCount = mMessages.size();
mCompilationInfo.messages = mMessages.data();
// Ensure every message points at the correct message string. Cannot do this earlier, since
// vector reallocations may move the pointers around.
for (size_t i = 0; i < mCompilationInfo.messageCount; ++i) {
WGPUCompilationMessage& message = mMessages[i];
std::string& messageString = mMessageStrings[i];
message.message = messageString.c_str();
}
return &mCompilationInfo;
}
const std::vector<std::string>& OwnedCompilationMessages::GetFormattedTintMessages() {
return mFormattedTintMessages;
}
void OwnedCompilationMessages::AddFormattedTintMessages(const tint::diag::List& diagnostics) {
tint::diag::List messageList;
size_t warningCount = 0;
size_t errorCount = 0;
for (auto& diag : diagnostics) {
switch (diag.severity) {
case (tint::diag::Severity::Fatal):
case (tint::diag::Severity::Error):
case (tint::diag::Severity::InternalCompilerError): {
errorCount++;
messageList.add(tint::diag::Diagnostic(diag));
break;
}
case (tint::diag::Severity::Warning): {
warningCount++;
messageList.add(tint::diag::Diagnostic(diag));
break;
}
default:
break;
}
}
if (errorCount == 0 && warningCount == 0) {
return;
}
tint::diag::Formatter::Style style;
style.print_newline_at_end = false;
std::ostringstream t;
if (errorCount > 0) {
t << errorCount << " error(s) ";
if (warningCount > 0) {
t << "and ";
}
}
if (warningCount > 0) { if (warningCount > 0) {
t << warningCount << " warning(s) "; t << "and ";
} }
t << "generated while compiling the shader:" << std::endl
<< tint::diag::Formatter{style}.format(messageList);
mFormattedTintMessages.push_back(t.str());
} }
if (warningCount > 0) {
t << warningCount << " warning(s) ";
}
t << "generated while compiling the shader:" << std::endl
<< tint::diag::Formatter{style}.format(messageList);
mFormattedTintMessages.push_back(t.str());
}
} // namespace dawn::native } // namespace dawn::native

View File

@ -23,39 +23,39 @@
#include "dawn/common/NonCopyable.h" #include "dawn/common/NonCopyable.h"
namespace tint::diag { namespace tint::diag {
class Diagnostic; class Diagnostic;
class List; class List;
} // namespace tint::diag } // namespace tint::diag
namespace dawn::native { namespace dawn::native {
class OwnedCompilationMessages : public NonCopyable { class OwnedCompilationMessages : public NonCopyable {
public: public:
OwnedCompilationMessages(); OwnedCompilationMessages();
~OwnedCompilationMessages() = default; ~OwnedCompilationMessages() = default;
void AddMessageForTesting( void AddMessageForTesting(
std::string message, std::string message,
wgpu::CompilationMessageType type = wgpu::CompilationMessageType::Info, wgpu::CompilationMessageType type = wgpu::CompilationMessageType::Info,
uint64_t lineNum = 0, uint64_t lineNum = 0,
uint64_t linePos = 0, uint64_t linePos = 0,
uint64_t offset = 0, uint64_t offset = 0,
uint64_t length = 0); uint64_t length = 0);
void AddMessages(const tint::diag::List& diagnostics); void AddMessages(const tint::diag::List& diagnostics);
void ClearMessages(); void ClearMessages();
const WGPUCompilationInfo* GetCompilationInfo(); const WGPUCompilationInfo* GetCompilationInfo();
const std::vector<std::string>& GetFormattedTintMessages(); const std::vector<std::string>& GetFormattedTintMessages();
private: private:
void AddMessage(const tint::diag::Diagnostic& diagnostic); void AddMessage(const tint::diag::Diagnostic& diagnostic);
void AddFormattedTintMessages(const tint::diag::List& diagnostics); void AddFormattedTintMessages(const tint::diag::List& diagnostics);
WGPUCompilationInfo mCompilationInfo; WGPUCompilationInfo mCompilationInfo;
std::vector<std::string> mMessageStrings; std::vector<std::string> mMessageStrings;
std::vector<WGPUCompilationMessage> mMessages; std::vector<WGPUCompilationMessage> mMessages;
std::vector<std::string> mFormattedTintMessages; std::vector<std::string> mFormattedTintMessages;
}; };
} // namespace dawn::native } // namespace dawn::native

Some files were not shown because too many files have changed in this diff Show More