Consistent formatting for Dawn/Tint.
This CL updates the clang format files to have a single shared format between Dawn and Tint. The major changes are tabs are 4 spaces, lines are 100 columns and namespaces are not indented. Bug: dawn:1339 Change-Id: I4208742c95643998d9fd14e77a9cc558071ded39 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87603 Commit-Queue: Dan Sinclair <dsinclair@chromium.org> Reviewed-by: Corentin Wallez <cwallez@chromium.org> Kokoro: Kokoro <noreply+kokoro@google.com>
This commit is contained in:
parent
73b1d1dafa
commit
41e4d9a34c
|
@ -1,8 +1,5 @@
|
|||
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
|
||||
BasedOnStyle: Chromium
|
||||
Standard: Cpp11
|
||||
|
||||
AllowShortFunctionsOnASingleLine: false
|
||||
|
||||
ColumnLimit: 100
|
||||
|
||||
|
@ -11,10 +8,3 @@ IndentWidth: 4
|
|||
ObjCBlockIndentWidth: 4
|
||||
AccessModifierOffset: -2
|
||||
|
||||
CompactNamespaces: true
|
||||
|
||||
# This should result in only one indentation level with compacted namespaces
|
||||
NamespaceIndentation: All
|
||||
|
||||
# Use this option once clang-format 6 is out.
|
||||
IndentPPDirectives: AfterHash
|
||||
|
|
|
@ -121,7 +121,7 @@ def _NonInclusiveFileFilter(file):
|
|||
"third_party/khronos/KHR/khrplatform.h", # Third party file
|
||||
"tools/roll-all", # Branch name
|
||||
"tools/src/container/key.go", # External URL
|
||||
"tools/src/go.sum", # External URL
|
||||
"go.sum", # External URL
|
||||
]
|
||||
return file.LocalPath() not in filter_list
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
filter=-runtime/indentation_namespace
|
|
@ -31,126 +31,117 @@
|
|||
|
||||
namespace dawn {
|
||||
|
||||
template <typename T>
|
||||
struct IsDawnBitmask {
|
||||
template <typename T>
|
||||
struct IsDawnBitmask {
|
||||
static constexpr bool enable = false;
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T, typename Enable = void>
|
||||
struct LowerBitmask {
|
||||
template <typename T, typename Enable = void>
|
||||
struct LowerBitmask {
|
||||
static constexpr bool enable = false;
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> {
|
||||
template <typename T>
|
||||
struct LowerBitmask<T, typename std::enable_if<IsDawnBitmask<T>::enable>::type> {
|
||||
static constexpr bool enable = true;
|
||||
using type = T;
|
||||
constexpr static T Lower(T t) {
|
||||
return t;
|
||||
}
|
||||
};
|
||||
constexpr static T Lower(T t) { return t; }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct BoolConvertible {
|
||||
template <typename T>
|
||||
struct BoolConvertible {
|
||||
using Integral = typename std::underlying_type<T>::type;
|
||||
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr BoolConvertible(Integral value) : value(value) {
|
||||
}
|
||||
constexpr operator bool() const {
|
||||
return value != 0;
|
||||
}
|
||||
constexpr operator T() const {
|
||||
return static_cast<T>(value);
|
||||
}
|
||||
constexpr BoolConvertible(Integral value) : value(value) {}
|
||||
constexpr operator bool() const { return value != 0; }
|
||||
constexpr operator T() const { return static_cast<T>(value); }
|
||||
|
||||
Integral value;
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct LowerBitmask<BoolConvertible<T>> {
|
||||
template <typename T>
|
||||
struct LowerBitmask<BoolConvertible<T>> {
|
||||
static constexpr bool enable = true;
|
||||
using type = T;
|
||||
static constexpr type Lower(BoolConvertible<T> t) {
|
||||
return t;
|
||||
}
|
||||
};
|
||||
static constexpr type Lower(BoolConvertible<T> t) { return t; }
|
||||
};
|
||||
|
||||
template <typename T1,
|
||||
template <
|
||||
typename T1,
|
||||
typename T2,
|
||||
typename = typename std::enable_if<LowerBitmask<T1>::enable &&
|
||||
LowerBitmask<T2>::enable>::type>
|
||||
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
|
||||
typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
|
||||
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator|(T1 left, T2 right) {
|
||||
using T = typename LowerBitmask<T1>::type;
|
||||
using Integral = typename std::underlying_type<T>::type;
|
||||
return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) |
|
||||
static_cast<Integral>(LowerBitmask<T2>::Lower(right));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T1,
|
||||
template <
|
||||
typename T1,
|
||||
typename T2,
|
||||
typename = typename std::enable_if<LowerBitmask<T1>::enable &&
|
||||
LowerBitmask<T2>::enable>::type>
|
||||
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
|
||||
typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
|
||||
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator&(T1 left, T2 right) {
|
||||
using T = typename LowerBitmask<T1>::type;
|
||||
using Integral = typename std::underlying_type<T>::type;
|
||||
return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) &
|
||||
static_cast<Integral>(LowerBitmask<T2>::Lower(right));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T1,
|
||||
template <
|
||||
typename T1,
|
||||
typename T2,
|
||||
typename = typename std::enable_if<LowerBitmask<T1>::enable &&
|
||||
LowerBitmask<T2>::enable>::type>
|
||||
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
|
||||
typename = typename std::enable_if<LowerBitmask<T1>::enable && LowerBitmask<T2>::enable>::type>
|
||||
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator^(T1 left, T2 right) {
|
||||
using T = typename LowerBitmask<T1>::type;
|
||||
using Integral = typename std::underlying_type<T>::type;
|
||||
return static_cast<Integral>(LowerBitmask<T1>::Lower(left)) ^
|
||||
static_cast<Integral>(LowerBitmask<T2>::Lower(right));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T1>
|
||||
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator~(T1 t) {
|
||||
template <typename T1>
|
||||
constexpr BoolConvertible<typename LowerBitmask<T1>::type> operator~(T1 t) {
|
||||
using T = typename LowerBitmask<T1>::type;
|
||||
using Integral = typename std::underlying_type<T>::type;
|
||||
return ~static_cast<Integral>(LowerBitmask<T1>::Lower(t));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T,
|
||||
template <
|
||||
typename T,
|
||||
typename T2,
|
||||
typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
|
||||
LowerBitmask<T2>::enable>::type>
|
||||
constexpr T& operator&=(T& l, T2 right) {
|
||||
typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
|
||||
constexpr T& operator&=(T& l, T2 right) {
|
||||
T r = LowerBitmask<T2>::Lower(right);
|
||||
l = l & r;
|
||||
return l;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T,
|
||||
template <
|
||||
typename T,
|
||||
typename T2,
|
||||
typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
|
||||
LowerBitmask<T2>::enable>::type>
|
||||
constexpr T& operator|=(T& l, T2 right) {
|
||||
typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
|
||||
constexpr T& operator|=(T& l, T2 right) {
|
||||
T r = LowerBitmask<T2>::Lower(right);
|
||||
l = l | r;
|
||||
return l;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T,
|
||||
template <
|
||||
typename T,
|
||||
typename T2,
|
||||
typename = typename std::enable_if<IsDawnBitmask<T>::enable &&
|
||||
LowerBitmask<T2>::enable>::type>
|
||||
constexpr T& operator^=(T& l, T2 right) {
|
||||
typename = typename std::enable_if<IsDawnBitmask<T>::enable && LowerBitmask<T2>::enable>::type>
|
||||
constexpr T& operator^=(T& l, T2 right) {
|
||||
T r = LowerBitmask<T2>::Lower(right);
|
||||
l = l ^ r;
|
||||
return l;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr bool HasZeroOrOneBits(T value) {
|
||||
template <typename T>
|
||||
constexpr bool HasZeroOrOneBits(T value) {
|
||||
using Integral = typename std::underlying_type<T>::type;
|
||||
return (static_cast<Integral>(value) & (static_cast<Integral>(value) - 1)) == 0;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ struct DawnWSIContextD3D12 {
|
|||
#endif
|
||||
|
||||
#if defined(DAWN_ENABLE_BACKEND_METAL) && defined(__OBJC__)
|
||||
# import <Metal/Metal.h>
|
||||
#import <Metal/Metal.h>
|
||||
|
||||
struct DawnWSIContextMetal {
|
||||
id<MTLDevice> device = nil;
|
||||
|
|
|
@ -30,35 +30,35 @@ struct ID3D12Resource;
|
|||
|
||||
namespace dawn::native::d3d12 {
|
||||
|
||||
class D3D11on12ResourceCache;
|
||||
class D3D11on12ResourceCache;
|
||||
|
||||
DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device);
|
||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
|
||||
DAWN_NATIVE_EXPORT Microsoft::WRL::ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device);
|
||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
|
||||
HWND window);
|
||||
DAWN_NATIVE_EXPORT WGPUTextureFormat
|
||||
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
|
||||
DAWN_NATIVE_EXPORT WGPUTextureFormat
|
||||
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
|
||||
|
||||
enum MemorySegment {
|
||||
enum MemorySegment {
|
||||
Local,
|
||||
NonLocal,
|
||||
};
|
||||
};
|
||||
|
||||
DAWN_NATIVE_EXPORT uint64_t SetExternalMemoryReservation(WGPUDevice device,
|
||||
DAWN_NATIVE_EXPORT uint64_t SetExternalMemoryReservation(WGPUDevice device,
|
||||
uint64_t requestedReservationSize,
|
||||
MemorySegment memorySegment);
|
||||
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDXGISharedHandle : ExternalImageDescriptor {
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDXGISharedHandle : ExternalImageDescriptor {
|
||||
public:
|
||||
ExternalImageDescriptorDXGISharedHandle();
|
||||
|
||||
// Note: SharedHandle must be a handle to a texture object.
|
||||
HANDLE sharedHandle;
|
||||
};
|
||||
};
|
||||
|
||||
// Keyed mutex acquire/release uses a fixed key of 0 to match Chromium behavior.
|
||||
constexpr UINT64 kDXGIKeyedMutexAcquireReleaseKey = 0;
|
||||
// Keyed mutex acquire/release uses a fixed key of 0 to match Chromium behavior.
|
||||
constexpr UINT64 kDXGIKeyedMutexAcquireReleaseKey = 0;
|
||||
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptorDXGIKeyedMutex
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptorDXGIKeyedMutex
|
||||
: ExternalImageAccessDescriptor {
|
||||
public:
|
||||
// TODO(chromium:1241533): Remove deprecated keyed mutex params after removing associated
|
||||
|
@ -66,9 +66,9 @@ namespace dawn::native::d3d12 {
|
|||
uint64_t acquireMutexKey;
|
||||
uint64_t releaseMutexKey;
|
||||
bool isSwapChainTexture = false;
|
||||
};
|
||||
};
|
||||
|
||||
class DAWN_NATIVE_EXPORT ExternalImageDXGI {
|
||||
class DAWN_NATIVE_EXPORT ExternalImageDXGI {
|
||||
public:
|
||||
~ExternalImageDXGI();
|
||||
|
||||
|
@ -97,14 +97,14 @@ namespace dawn::native::d3d12 {
|
|||
uint32_t mSampleCount;
|
||||
|
||||
std::unique_ptr<D3D11on12ResourceCache> mD3D11on12ResourceCache;
|
||||
};
|
||||
};
|
||||
|
||||
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
|
||||
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
|
||||
AdapterDiscoveryOptions();
|
||||
explicit AdapterDiscoveryOptions(Microsoft::WRL::ComPtr<IDXGIAdapter> adapter);
|
||||
|
||||
Microsoft::WRL::ComPtr<IDXGIAdapter> dxgiAdapter;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native::d3d12
|
||||
|
||||
|
|
|
@ -23,50 +23,50 @@
|
|||
#include "dawn/webgpu.h"
|
||||
|
||||
namespace dawn::platform {
|
||||
class Platform;
|
||||
class Platform;
|
||||
} // namespace dawn::platform
|
||||
|
||||
namespace wgpu {
|
||||
struct AdapterProperties;
|
||||
struct DeviceDescriptor;
|
||||
struct AdapterProperties;
|
||||
struct DeviceDescriptor;
|
||||
} // namespace wgpu
|
||||
|
||||
namespace dawn::native {
|
||||
|
||||
class InstanceBase;
|
||||
class AdapterBase;
|
||||
class InstanceBase;
|
||||
class AdapterBase;
|
||||
|
||||
// An optional parameter of Adapter::CreateDevice() to send additional information when creating
|
||||
// a Device. For example, we can use it to enable a workaround, optimization or feature.
|
||||
struct DAWN_NATIVE_EXPORT DawnDeviceDescriptor {
|
||||
// An optional parameter of Adapter::CreateDevice() to send additional information when creating
|
||||
// a Device. For example, we can use it to enable a workaround, optimization or feature.
|
||||
struct DAWN_NATIVE_EXPORT DawnDeviceDescriptor {
|
||||
std::vector<const char*> requiredFeatures;
|
||||
std::vector<const char*> forceEnabledToggles;
|
||||
std::vector<const char*> forceDisabledToggles;
|
||||
|
||||
const WGPURequiredLimits* requiredLimits = nullptr;
|
||||
};
|
||||
};
|
||||
|
||||
// A struct to record the information of a toggle. A toggle is a code path in Dawn device that
|
||||
// can be manually configured to run or not outside Dawn, including workarounds, special
|
||||
// features and optimizations.
|
||||
struct ToggleInfo {
|
||||
// A struct to record the information of a toggle. A toggle is a code path in Dawn device that
|
||||
// can be manually configured to run or not outside Dawn, including workarounds, special
|
||||
// features and optimizations.
|
||||
struct ToggleInfo {
|
||||
const char* name;
|
||||
const char* description;
|
||||
const char* url;
|
||||
};
|
||||
};
|
||||
|
||||
// A struct to record the information of a feature. A feature is a GPU feature that is not
|
||||
// required to be supported by all Dawn backends and can only be used when it is enabled on the
|
||||
// creation of device.
|
||||
using FeatureInfo = ToggleInfo;
|
||||
// A struct to record the information of a feature. A feature is a GPU feature that is not
|
||||
// required to be supported by all Dawn backends and can only be used when it is enabled on the
|
||||
// creation of device.
|
||||
using FeatureInfo = ToggleInfo;
|
||||
|
||||
// An adapter is an object that represent on possibility of creating devices in the system.
|
||||
// Most of the time it will represent a combination of a physical GPU and an API. Not that the
|
||||
// same GPU can be represented by multiple adapters but on different APIs.
|
||||
//
|
||||
// The underlying Dawn adapter is owned by the Dawn instance so this class is not RAII but just
|
||||
// a reference to an underlying adapter.
|
||||
class DAWN_NATIVE_EXPORT Adapter {
|
||||
// An adapter is an object that represent on possibility of creating devices in the system.
|
||||
// Most of the time it will represent a combination of a physical GPU and an API. Not that the
|
||||
// same GPU can be represented by multiple adapters but on different APIs.
|
||||
//
|
||||
// The underlying Dawn adapter is owned by the Dawn instance so this class is not RAII but just
|
||||
// a reference to an underlying adapter.
|
||||
class DAWN_NATIVE_EXPORT Adapter {
|
||||
public:
|
||||
Adapter();
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
|
@ -117,25 +117,25 @@ namespace dawn::native {
|
|||
|
||||
private:
|
||||
AdapterBase* mImpl = nullptr;
|
||||
};
|
||||
};
|
||||
|
||||
// Base class for options passed to Instance::DiscoverAdapters.
|
||||
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsBase {
|
||||
// Base class for options passed to Instance::DiscoverAdapters.
|
||||
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsBase {
|
||||
public:
|
||||
const WGPUBackendType backendType;
|
||||
|
||||
protected:
|
||||
explicit AdapterDiscoveryOptionsBase(WGPUBackendType type);
|
||||
};
|
||||
};
|
||||
|
||||
enum BackendValidationLevel { Full, Partial, Disabled };
|
||||
enum BackendValidationLevel { Full, Partial, Disabled };
|
||||
|
||||
// Represents a connection to dawn_native and is used for dependency injection, discovering
|
||||
// system adapters and injecting custom adapters (like a Swiftshader Vulkan adapter).
|
||||
//
|
||||
// This is an RAII class for Dawn instances and also controls the lifetime of all adapters
|
||||
// for this instance.
|
||||
class DAWN_NATIVE_EXPORT Instance {
|
||||
// Represents a connection to dawn_native and is used for dependency injection, discovering
|
||||
// system adapters and injecting custom adapters (like a Swiftshader Vulkan adapter).
|
||||
//
|
||||
// This is an RAII class for Dawn instances and also controls the lifetime of all adapters
|
||||
// for this instance.
|
||||
class DAWN_NATIVE_EXPORT Instance {
|
||||
public:
|
||||
explicit Instance(const WGPUInstanceDescriptor* desc = nullptr);
|
||||
~Instance();
|
||||
|
@ -172,22 +172,22 @@ namespace dawn::native {
|
|||
|
||||
private:
|
||||
InstanceBase* mImpl = nullptr;
|
||||
};
|
||||
};
|
||||
|
||||
// Backend-agnostic API for dawn_native
|
||||
DAWN_NATIVE_EXPORT const DawnProcTable& GetProcs();
|
||||
// Backend-agnostic API for dawn_native
|
||||
DAWN_NATIVE_EXPORT const DawnProcTable& GetProcs();
|
||||
|
||||
// Query the names of all the toggles that are enabled in device
|
||||
DAWN_NATIVE_EXPORT std::vector<const char*> GetTogglesUsed(WGPUDevice device);
|
||||
// Query the names of all the toggles that are enabled in device
|
||||
DAWN_NATIVE_EXPORT std::vector<const char*> GetTogglesUsed(WGPUDevice device);
|
||||
|
||||
// Backdoor to get the number of lazy clears for testing
|
||||
DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(WGPUDevice device);
|
||||
// Backdoor to get the number of lazy clears for testing
|
||||
DAWN_NATIVE_EXPORT size_t GetLazyClearCountForTesting(WGPUDevice device);
|
||||
|
||||
// Backdoor to get the number of deprecation warnings for testing
|
||||
DAWN_NATIVE_EXPORT size_t GetDeprecationWarningCountForTesting(WGPUDevice device);
|
||||
// Backdoor to get the number of deprecation warnings for testing
|
||||
DAWN_NATIVE_EXPORT size_t GetDeprecationWarningCountForTesting(WGPUDevice device);
|
||||
|
||||
// Query if texture has been initialized
|
||||
DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized(
|
||||
// Query if texture has been initialized
|
||||
DAWN_NATIVE_EXPORT bool IsTextureSubresourceInitialized(
|
||||
WGPUTexture texture,
|
||||
uint32_t baseMipLevel,
|
||||
uint32_t levelCount,
|
||||
|
@ -195,29 +195,29 @@ namespace dawn::native {
|
|||
uint32_t layerCount,
|
||||
WGPUTextureAspect aspect = WGPUTextureAspect_All);
|
||||
|
||||
// Backdoor to get the order of the ProcMap for testing
|
||||
DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
|
||||
// Backdoor to get the order of the ProcMap for testing
|
||||
DAWN_NATIVE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
|
||||
|
||||
DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device);
|
||||
DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device);
|
||||
|
||||
// ErrorInjector functions used for testing only. Defined in dawn_native/ErrorInjector.cpp
|
||||
DAWN_NATIVE_EXPORT void EnableErrorInjector();
|
||||
DAWN_NATIVE_EXPORT void DisableErrorInjector();
|
||||
DAWN_NATIVE_EXPORT void ClearErrorInjector();
|
||||
DAWN_NATIVE_EXPORT uint64_t AcquireErrorInjectorCallCount();
|
||||
DAWN_NATIVE_EXPORT void InjectErrorAt(uint64_t index);
|
||||
// ErrorInjector functions used for testing only. Defined in dawn_native/ErrorInjector.cpp
|
||||
DAWN_NATIVE_EXPORT void EnableErrorInjector();
|
||||
DAWN_NATIVE_EXPORT void DisableErrorInjector();
|
||||
DAWN_NATIVE_EXPORT void ClearErrorInjector();
|
||||
DAWN_NATIVE_EXPORT uint64_t AcquireErrorInjectorCallCount();
|
||||
DAWN_NATIVE_EXPORT void InjectErrorAt(uint64_t index);
|
||||
|
||||
// The different types of external images
|
||||
enum ExternalImageType {
|
||||
// The different types of external images
|
||||
enum ExternalImageType {
|
||||
OpaqueFD,
|
||||
DmaBuf,
|
||||
IOSurface,
|
||||
DXGISharedHandle,
|
||||
EGLImage,
|
||||
};
|
||||
};
|
||||
|
||||
// Common properties of external images
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptor {
|
||||
// Common properties of external images
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptor {
|
||||
public:
|
||||
const WGPUTextureDescriptor* cTextureDescriptor; // Must match image creation params
|
||||
bool isInitialized; // Whether the texture is initialized on import
|
||||
|
@ -228,15 +228,15 @@ namespace dawn::native {
|
|||
|
||||
private:
|
||||
ExternalImageType mType;
|
||||
};
|
||||
};
|
||||
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptor {
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageAccessDescriptor {
|
||||
public:
|
||||
bool isInitialized; // Whether the texture is initialized on import
|
||||
WGPUTextureUsageFlags usage;
|
||||
};
|
||||
};
|
||||
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageExportInfo {
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageExportInfo {
|
||||
public:
|
||||
bool isInitialized; // Whether the texture is initialized after export
|
||||
ExternalImageType GetType() const;
|
||||
|
@ -246,13 +246,13 @@ namespace dawn::native {
|
|||
|
||||
private:
|
||||
ExternalImageType mType;
|
||||
};
|
||||
};
|
||||
|
||||
DAWN_NATIVE_EXPORT const char* GetObjectLabelForTesting(void* objectHandle);
|
||||
DAWN_NATIVE_EXPORT const char* GetObjectLabelForTesting(void* objectHandle);
|
||||
|
||||
DAWN_NATIVE_EXPORT uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer);
|
||||
DAWN_NATIVE_EXPORT uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer);
|
||||
|
||||
DAWN_NATIVE_EXPORT bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a,
|
||||
DAWN_NATIVE_EXPORT bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a,
|
||||
WGPUBindGroupLayout b);
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -29,16 +29,16 @@ struct __IOSurface;
|
|||
typedef __IOSurface* IOSurfaceRef;
|
||||
|
||||
#ifdef __OBJC__
|
||||
# import <Metal/Metal.h>
|
||||
#import <Metal/Metal.h>
|
||||
#endif // __OBJC__
|
||||
|
||||
namespace dawn::native::metal {
|
||||
|
||||
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
|
||||
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
|
||||
AdapterDiscoveryOptions();
|
||||
};
|
||||
};
|
||||
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorIOSurface : ExternalImageDescriptor {
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorIOSurface : ExternalImageDescriptor {
|
||||
public:
|
||||
ExternalImageDescriptorIOSurface();
|
||||
|
||||
|
@ -46,24 +46,24 @@ namespace dawn::native::metal {
|
|||
|
||||
// This has been deprecated.
|
||||
uint32_t plane;
|
||||
};
|
||||
};
|
||||
|
||||
DAWN_NATIVE_EXPORT WGPUTexture
|
||||
WrapIOSurface(WGPUDevice device, const ExternalImageDescriptorIOSurface* descriptor);
|
||||
DAWN_NATIVE_EXPORT WGPUTexture WrapIOSurface(WGPUDevice device,
|
||||
const ExternalImageDescriptorIOSurface* descriptor);
|
||||
|
||||
// When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't
|
||||
// mean that the operations will be visible to other APIs/Metal devices right away. macOS
|
||||
// does have a global queue of graphics operations, but the command buffers are inserted there
|
||||
// when they are "scheduled". Submitting other operations before the command buffer is
|
||||
// scheduled could lead to races in who gets scheduled first and incorrect rendering.
|
||||
DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(WGPUDevice device);
|
||||
// When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't
|
||||
// mean that the operations will be visible to other APIs/Metal devices right away. macOS
|
||||
// does have a global queue of graphics operations, but the command buffers are inserted there
|
||||
// when they are "scheduled". Submitting other operations before the command buffer is
|
||||
// scheduled could lead to races in who gets scheduled first and incorrect rendering.
|
||||
DAWN_NATIVE_EXPORT void WaitForCommandsToBeScheduled(WGPUDevice device);
|
||||
|
||||
} // namespace dawn::native::metal
|
||||
|
||||
#ifdef __OBJC__
|
||||
namespace dawn::native::metal {
|
||||
|
||||
DAWN_NATIVE_EXPORT id<MTLDevice> GetMetalDevice(WGPUDevice device);
|
||||
DAWN_NATIVE_EXPORT id<MTLDevice> GetMetalDevice(WGPUDevice device);
|
||||
|
||||
} // namespace dawn::native::metal
|
||||
#endif // __OBJC__
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "dawn/native/DawnNative.h"
|
||||
|
||||
namespace dawn::native::null {
|
||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl();
|
||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl();
|
||||
} // namespace dawn::native::null
|
||||
|
||||
#endif // INCLUDE_DAWN_NATIVE_NULLBACKEND_H_
|
||||
|
|
|
@ -22,33 +22,34 @@ typedef void* EGLImage;
|
|||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
|
||||
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
|
||||
AdapterDiscoveryOptions();
|
||||
|
||||
void* (*getProc)(const char*);
|
||||
};
|
||||
};
|
||||
|
||||
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsES : public AdapterDiscoveryOptionsBase {
|
||||
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptionsES : public AdapterDiscoveryOptionsBase {
|
||||
AdapterDiscoveryOptionsES();
|
||||
|
||||
void* (*getProc)(const char*);
|
||||
};
|
||||
};
|
||||
|
||||
using PresentCallback = void (*)(void*);
|
||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation
|
||||
CreateNativeSwapChainImpl(WGPUDevice device, PresentCallback present, void* presentUserdata);
|
||||
DAWN_NATIVE_EXPORT WGPUTextureFormat
|
||||
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
|
||||
using PresentCallback = void (*)(void*);
|
||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
|
||||
PresentCallback present,
|
||||
void* presentUserdata);
|
||||
DAWN_NATIVE_EXPORT WGPUTextureFormat
|
||||
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
|
||||
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorEGLImage : ExternalImageDescriptor {
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorEGLImage : ExternalImageDescriptor {
|
||||
public:
|
||||
ExternalImageDescriptorEGLImage();
|
||||
|
||||
::EGLImage image;
|
||||
};
|
||||
};
|
||||
|
||||
DAWN_NATIVE_EXPORT WGPUTexture
|
||||
WrapExternalEGLImage(WGPUDevice device, const ExternalImageDescriptorEGLImage* descriptor);
|
||||
DAWN_NATIVE_EXPORT WGPUTexture
|
||||
WrapExternalEGLImage(WGPUDevice device, const ExternalImageDescriptorEGLImage* descriptor);
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
|
|
@ -24,22 +24,22 @@
|
|||
|
||||
namespace dawn::native::vulkan {
|
||||
|
||||
DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
|
||||
DAWN_NATIVE_EXPORT VkInstance GetInstance(WGPUDevice device);
|
||||
|
||||
DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
|
||||
DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName);
|
||||
|
||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation
|
||||
CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surface);
|
||||
DAWN_NATIVE_EXPORT WGPUTextureFormat
|
||||
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
|
||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
|
||||
::VkSurfaceKHR surface);
|
||||
DAWN_NATIVE_EXPORT WGPUTextureFormat
|
||||
GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain);
|
||||
|
||||
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
|
||||
struct DAWN_NATIVE_EXPORT AdapterDiscoveryOptions : public AdapterDiscoveryOptionsBase {
|
||||
AdapterDiscoveryOptions();
|
||||
|
||||
bool forceSwiftShader = false;
|
||||
};
|
||||
};
|
||||
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorVk : ExternalImageDescriptor {
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorVk : ExternalImageDescriptor {
|
||||
public:
|
||||
// The following members may be ignored if |ExternalImageDescriptor::isInitialized| is false
|
||||
// since the import does not need to preserve texture contents.
|
||||
|
@ -55,9 +55,9 @@ namespace dawn::native::vulkan {
|
|||
|
||||
protected:
|
||||
using ExternalImageDescriptor::ExternalImageDescriptor;
|
||||
};
|
||||
};
|
||||
|
||||
struct ExternalImageExportInfoVk : ExternalImageExportInfo {
|
||||
struct ExternalImageExportInfoVk : ExternalImageExportInfo {
|
||||
public:
|
||||
// See comments in |ExternalImageDescriptorVk|
|
||||
// Contains the old/new layouts used in the queue release operation.
|
||||
|
@ -66,72 +66,72 @@ namespace dawn::native::vulkan {
|
|||
|
||||
protected:
|
||||
using ExternalImageExportInfo::ExternalImageExportInfo;
|
||||
};
|
||||
};
|
||||
|
||||
// Can't use DAWN_PLATFORM_LINUX since header included in both Dawn and Chrome
|
||||
#ifdef __linux__
|
||||
|
||||
// Common properties of external images represented by FDs. On successful import the file
|
||||
// descriptor's ownership is transferred to the Dawn implementation and they shouldn't be
|
||||
// used outside of Dawn again. TODO(enga): Also transfer ownership in the error case so the
|
||||
// caller can assume the FD is always consumed.
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorFD : ExternalImageDescriptorVk {
|
||||
// Common properties of external images represented by FDs. On successful import the file
|
||||
// descriptor's ownership is transferred to the Dawn implementation and they shouldn't be
|
||||
// used outside of Dawn again. TODO(enga): Also transfer ownership in the error case so the
|
||||
// caller can assume the FD is always consumed.
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorFD : ExternalImageDescriptorVk {
|
||||
public:
|
||||
int memoryFD; // A file descriptor from an export of the memory of the image
|
||||
std::vector<int> waitFDs; // File descriptors of semaphores which will be waited on
|
||||
|
||||
protected:
|
||||
using ExternalImageDescriptorVk::ExternalImageDescriptorVk;
|
||||
};
|
||||
};
|
||||
|
||||
// Descriptor for opaque file descriptor image import
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorOpaqueFD : ExternalImageDescriptorFD {
|
||||
// Descriptor for opaque file descriptor image import
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorOpaqueFD : ExternalImageDescriptorFD {
|
||||
ExternalImageDescriptorOpaqueFD();
|
||||
|
||||
VkDeviceSize allocationSize; // Must match VkMemoryAllocateInfo from image creation
|
||||
uint32_t memoryTypeIndex; // Must match VkMemoryAllocateInfo from image creation
|
||||
};
|
||||
};
|
||||
|
||||
// Descriptor for dma-buf file descriptor image import
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDmaBuf : ExternalImageDescriptorFD {
|
||||
// Descriptor for dma-buf file descriptor image import
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageDescriptorDmaBuf : ExternalImageDescriptorFD {
|
||||
ExternalImageDescriptorDmaBuf();
|
||||
|
||||
uint32_t stride; // Stride of the buffer in bytes
|
||||
uint64_t drmModifier; // DRM modifier of the buffer
|
||||
};
|
||||
};
|
||||
|
||||
// Info struct that is written to in |ExportVulkanImage|.
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageExportInfoFD : ExternalImageExportInfoVk {
|
||||
// Info struct that is written to in |ExportVulkanImage|.
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageExportInfoFD : ExternalImageExportInfoVk {
|
||||
public:
|
||||
// Contains the exported semaphore handles.
|
||||
std::vector<int> semaphoreHandles;
|
||||
|
||||
protected:
|
||||
using ExternalImageExportInfoVk::ExternalImageExportInfoVk;
|
||||
};
|
||||
};
|
||||
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageExportInfoOpaqueFD : ExternalImageExportInfoFD {
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageExportInfoOpaqueFD : ExternalImageExportInfoFD {
|
||||
ExternalImageExportInfoOpaqueFD();
|
||||
};
|
||||
};
|
||||
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageExportInfoDmaBuf : ExternalImageExportInfoFD {
|
||||
struct DAWN_NATIVE_EXPORT ExternalImageExportInfoDmaBuf : ExternalImageExportInfoFD {
|
||||
ExternalImageExportInfoDmaBuf();
|
||||
};
|
||||
};
|
||||
|
||||
#endif // __linux__
|
||||
|
||||
// Imports external memory into a Vulkan image. Internally, this uses external memory /
|
||||
// semaphore extensions to import the image and wait on the provided synchronizaton
|
||||
// primitives before the texture can be used.
|
||||
// On failure, returns a nullptr.
|
||||
DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice device,
|
||||
// Imports external memory into a Vulkan image. Internally, this uses external memory /
|
||||
// semaphore extensions to import the image and wait on the provided synchronizaton
|
||||
// primitives before the texture can be used.
|
||||
// On failure, returns a nullptr.
|
||||
DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice device,
|
||||
const ExternalImageDescriptorVk* descriptor);
|
||||
|
||||
// Exports external memory from a Vulkan image. This must be called on wrapped textures
|
||||
// before they are destroyed. It writes the semaphore to wait on and the old/new image
|
||||
// layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to
|
||||
// perform a layout transition.
|
||||
DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture texture,
|
||||
// Exports external memory from a Vulkan image. This must be called on wrapped textures
|
||||
// before they are destroyed. It writes the semaphore to wait on and the old/new image
|
||||
// layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to
|
||||
// perform a layout transition.
|
||||
DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture texture,
|
||||
VkImageLayout desiredLayout,
|
||||
ExternalImageExportInfoVk* info);
|
||||
|
||||
|
|
|
@ -16,21 +16,21 @@
|
|||
#define INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_
|
||||
|
||||
#if defined(DAWN_NATIVE_SHARED_LIBRARY)
|
||||
# if defined(_WIN32)
|
||||
# if defined(DAWN_NATIVE_IMPLEMENTATION)
|
||||
# define DAWN_NATIVE_EXPORT __declspec(dllexport)
|
||||
# else
|
||||
# define DAWN_NATIVE_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
# else // defined(_WIN32)
|
||||
# if defined(DAWN_NATIVE_IMPLEMENTATION)
|
||||
# define DAWN_NATIVE_EXPORT __attribute__((visibility("default")))
|
||||
# else
|
||||
# define DAWN_NATIVE_EXPORT
|
||||
# endif
|
||||
# endif // defined(_WIN32)
|
||||
#if defined(_WIN32)
|
||||
#if defined(DAWN_NATIVE_IMPLEMENTATION)
|
||||
#define DAWN_NATIVE_EXPORT __declspec(dllexport)
|
||||
#else
|
||||
#define DAWN_NATIVE_EXPORT __declspec(dllimport)
|
||||
#endif
|
||||
#else // defined(_WIN32)
|
||||
#if defined(DAWN_NATIVE_IMPLEMENTATION)
|
||||
#define DAWN_NATIVE_EXPORT __attribute__((visibility("default")))
|
||||
#else
|
||||
#define DAWN_NATIVE_EXPORT
|
||||
#endif
|
||||
#endif // defined(_WIN32)
|
||||
#else // defined(DAWN_NATIVE_SHARED_LIBRARY)
|
||||
# define DAWN_NATIVE_EXPORT
|
||||
#define DAWN_NATIVE_EXPORT
|
||||
#endif // defined(DAWN_NATIVE_SHARED_LIBRARY)
|
||||
|
||||
#endif // INCLUDE_DAWN_NATIVE_DAWN_NATIVE_EXPORT_H_
|
||||
|
|
|
@ -24,14 +24,14 @@
|
|||
|
||||
namespace dawn::platform {
|
||||
|
||||
enum class TraceCategory {
|
||||
enum class TraceCategory {
|
||||
General, // General trace events
|
||||
Validation, // Dawn validation
|
||||
Recording, // Native command recording
|
||||
GPUWork, // Actual GPU work
|
||||
};
|
||||
};
|
||||
|
||||
class DAWN_PLATFORM_EXPORT CachingInterface {
|
||||
class DAWN_PLATFORM_EXPORT CachingInterface {
|
||||
public:
|
||||
CachingInterface();
|
||||
virtual ~CachingInterface();
|
||||
|
@ -58,27 +58,27 @@ namespace dawn::platform {
|
|||
private:
|
||||
CachingInterface(const CachingInterface&) = delete;
|
||||
CachingInterface& operator=(const CachingInterface&) = delete;
|
||||
};
|
||||
};
|
||||
|
||||
class DAWN_PLATFORM_EXPORT WaitableEvent {
|
||||
class DAWN_PLATFORM_EXPORT WaitableEvent {
|
||||
public:
|
||||
WaitableEvent() = default;
|
||||
virtual ~WaitableEvent() = default;
|
||||
virtual void Wait() = 0; // Wait for completion
|
||||
virtual bool IsComplete() = 0; // Non-blocking check if the event is complete
|
||||
};
|
||||
};
|
||||
|
||||
using PostWorkerTaskCallback = void (*)(void* userdata);
|
||||
using PostWorkerTaskCallback = void (*)(void* userdata);
|
||||
|
||||
class DAWN_PLATFORM_EXPORT WorkerTaskPool {
|
||||
class DAWN_PLATFORM_EXPORT WorkerTaskPool {
|
||||
public:
|
||||
WorkerTaskPool() = default;
|
||||
virtual ~WorkerTaskPool() = default;
|
||||
virtual std::unique_ptr<WaitableEvent> PostWorkerTask(PostWorkerTaskCallback,
|
||||
void* userdata) = 0;
|
||||
};
|
||||
};
|
||||
|
||||
class DAWN_PLATFORM_EXPORT Platform {
|
||||
class DAWN_PLATFORM_EXPORT Platform {
|
||||
public:
|
||||
Platform();
|
||||
virtual ~Platform();
|
||||
|
@ -101,14 +101,13 @@ namespace dawn::platform {
|
|||
// The |fingerprint| is provided by Dawn to inform the client to discard the Dawn caches
|
||||
// when the fingerprint changes. The returned CachingInterface is expected to outlive the
|
||||
// device which uses it to persistently cache objects.
|
||||
virtual CachingInterface* GetCachingInterface(const void* fingerprint,
|
||||
size_t fingerprintSize);
|
||||
virtual CachingInterface* GetCachingInterface(const void* fingerprint, size_t fingerprintSize);
|
||||
virtual std::unique_ptr<WorkerTaskPool> CreateWorkerTaskPool();
|
||||
|
||||
private:
|
||||
Platform(const Platform&) = delete;
|
||||
Platform& operator=(const Platform&) = delete;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::platform
|
||||
|
||||
|
|
|
@ -16,21 +16,21 @@
|
|||
#define INCLUDE_DAWN_PLATFORM_DAWN_PLATFORM_EXPORT_H_
|
||||
|
||||
#if defined(DAWN_PLATFORM_SHARED_LIBRARY)
|
||||
# if defined(_WIN32)
|
||||
# if defined(DAWN_PLATFORM_IMPLEMENTATION)
|
||||
# define DAWN_PLATFORM_EXPORT __declspec(dllexport)
|
||||
# else
|
||||
# define DAWN_PLATFORM_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
# else // defined(_WIN32)
|
||||
# if defined(DAWN_PLATFORM_IMPLEMENTATION)
|
||||
# define DAWN_PLATFORM_EXPORT __attribute__((visibility("default")))
|
||||
# else
|
||||
# define DAWN_PLATFORM_EXPORT
|
||||
# endif
|
||||
# endif // defined(_WIN32)
|
||||
#if defined(_WIN32)
|
||||
#if defined(DAWN_PLATFORM_IMPLEMENTATION)
|
||||
#define DAWN_PLATFORM_EXPORT __declspec(dllexport)
|
||||
#else
|
||||
#define DAWN_PLATFORM_EXPORT __declspec(dllimport)
|
||||
#endif
|
||||
#else // defined(_WIN32)
|
||||
#if defined(DAWN_PLATFORM_IMPLEMENTATION)
|
||||
#define DAWN_PLATFORM_EXPORT __attribute__((visibility("default")))
|
||||
#else
|
||||
#define DAWN_PLATFORM_EXPORT
|
||||
#endif
|
||||
#endif // defined(_WIN32)
|
||||
#else // defined(DAWN_PLATFORM_SHARED_LIBRARY)
|
||||
# define DAWN_PLATFORM_EXPORT
|
||||
#define DAWN_PLATFORM_EXPORT
|
||||
#endif // defined(DAWN_PLATFORM_SHARED_LIBRARY)
|
||||
|
||||
#endif // INCLUDE_DAWN_PLATFORM_DAWN_PLATFORM_EXPORT_H_
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
namespace dawn::wire {
|
||||
|
||||
class DAWN_WIRE_EXPORT CommandSerializer {
|
||||
class DAWN_WIRE_EXPORT CommandSerializer {
|
||||
public:
|
||||
CommandSerializer();
|
||||
virtual ~CommandSerializer();
|
||||
|
@ -38,9 +38,9 @@ namespace dawn::wire {
|
|||
virtual bool Flush() = 0;
|
||||
virtual size_t GetMaximumAllocationSize() const = 0;
|
||||
virtual void OnSerializeError();
|
||||
};
|
||||
};
|
||||
|
||||
class DAWN_WIRE_EXPORT CommandHandler {
|
||||
class DAWN_WIRE_EXPORT CommandHandler {
|
||||
public:
|
||||
CommandHandler();
|
||||
virtual ~CommandHandler();
|
||||
|
@ -48,26 +48,25 @@ namespace dawn::wire {
|
|||
CommandHandler& operator=(const CommandHandler& rhs) = delete;
|
||||
|
||||
virtual const volatile char* HandleCommands(const volatile char* commands, size_t size) = 0;
|
||||
};
|
||||
};
|
||||
|
||||
DAWN_WIRE_EXPORT size_t
|
||||
SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties);
|
||||
DAWN_WIRE_EXPORT size_t
|
||||
SerializedWGPUDevicePropertiesSize(const WGPUDeviceProperties* deviceProperties);
|
||||
|
||||
DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties(
|
||||
const WGPUDeviceProperties* deviceProperties,
|
||||
DAWN_WIRE_EXPORT void SerializeWGPUDeviceProperties(const WGPUDeviceProperties* deviceProperties,
|
||||
char* serializeBuffer);
|
||||
|
||||
DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
|
||||
DAWN_WIRE_EXPORT bool DeserializeWGPUDeviceProperties(WGPUDeviceProperties* deviceProperties,
|
||||
const volatile char* deserializeBuffer,
|
||||
size_t deserializeBufferSize);
|
||||
|
||||
DAWN_WIRE_EXPORT size_t
|
||||
SerializedWGPUSupportedLimitsSize(const WGPUSupportedLimits* supportedLimits);
|
||||
DAWN_WIRE_EXPORT size_t
|
||||
SerializedWGPUSupportedLimitsSize(const WGPUSupportedLimits* supportedLimits);
|
||||
|
||||
DAWN_WIRE_EXPORT void SerializeWGPUSupportedLimits(const WGPUSupportedLimits* supportedLimits,
|
||||
DAWN_WIRE_EXPORT void SerializeWGPUSupportedLimits(const WGPUSupportedLimits* supportedLimits,
|
||||
char* serializeBuffer);
|
||||
|
||||
DAWN_WIRE_EXPORT bool DeserializeWGPUSupportedLimits(WGPUSupportedLimits* supportedLimits,
|
||||
DAWN_WIRE_EXPORT bool DeserializeWGPUSupportedLimits(WGPUSupportedLimits* supportedLimits,
|
||||
const volatile char* deserializeBuffer,
|
||||
size_t deserializeBufferSize);
|
||||
|
||||
|
|
|
@ -23,47 +23,47 @@
|
|||
|
||||
namespace dawn::wire {
|
||||
|
||||
namespace client {
|
||||
class Client;
|
||||
class MemoryTransferService;
|
||||
namespace client {
|
||||
class Client;
|
||||
class MemoryTransferService;
|
||||
|
||||
DAWN_WIRE_EXPORT const DawnProcTable& GetProcs();
|
||||
} // namespace client
|
||||
DAWN_WIRE_EXPORT const DawnProcTable& GetProcs();
|
||||
} // namespace client
|
||||
|
||||
struct ReservedTexture {
|
||||
struct ReservedTexture {
|
||||
WGPUTexture texture;
|
||||
uint32_t id;
|
||||
uint32_t generation;
|
||||
uint32_t deviceId;
|
||||
uint32_t deviceGeneration;
|
||||
};
|
||||
};
|
||||
|
||||
struct ReservedSwapChain {
|
||||
struct ReservedSwapChain {
|
||||
WGPUSwapChain swapchain;
|
||||
uint32_t id;
|
||||
uint32_t generation;
|
||||
uint32_t deviceId;
|
||||
uint32_t deviceGeneration;
|
||||
};
|
||||
};
|
||||
|
||||
struct ReservedDevice {
|
||||
struct ReservedDevice {
|
||||
WGPUDevice device;
|
||||
uint32_t id;
|
||||
uint32_t generation;
|
||||
};
|
||||
};
|
||||
|
||||
struct ReservedInstance {
|
||||
struct ReservedInstance {
|
||||
WGPUInstance instance;
|
||||
uint32_t id;
|
||||
uint32_t generation;
|
||||
};
|
||||
};
|
||||
|
||||
struct DAWN_WIRE_EXPORT WireClientDescriptor {
|
||||
struct DAWN_WIRE_EXPORT WireClientDescriptor {
|
||||
CommandSerializer* serializer;
|
||||
client::MemoryTransferService* memoryTransferService = nullptr;
|
||||
};
|
||||
};
|
||||
|
||||
class DAWN_WIRE_EXPORT WireClient : public CommandHandler {
|
||||
class DAWN_WIRE_EXPORT WireClient : public CommandHandler {
|
||||
public:
|
||||
explicit WireClient(const WireClientDescriptor& descriptor);
|
||||
~WireClient() override;
|
||||
|
@ -86,10 +86,10 @@ namespace dawn::wire {
|
|||
|
||||
private:
|
||||
std::unique_ptr<client::Client> mImpl;
|
||||
};
|
||||
};
|
||||
|
||||
namespace client {
|
||||
class DAWN_WIRE_EXPORT MemoryTransferService {
|
||||
namespace client {
|
||||
class DAWN_WIRE_EXPORT MemoryTransferService {
|
||||
public:
|
||||
MemoryTransferService();
|
||||
virtual ~MemoryTransferService();
|
||||
|
@ -160,9 +160,7 @@ namespace dawn::wire {
|
|||
// the subrange (offset, offset + size) of the allocation at buffer unmap
|
||||
// This subrange is always the whole mapped region for now
|
||||
// There could be nothing to be serialized (if using shared memory)
|
||||
virtual void SerializeDataUpdate(void* serializePointer,
|
||||
size_t offset,
|
||||
size_t size) = 0;
|
||||
virtual void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) = 0;
|
||||
|
||||
private:
|
||||
WriteHandle(const WriteHandle&) = delete;
|
||||
|
@ -172,11 +170,11 @@ namespace dawn::wire {
|
|||
private:
|
||||
MemoryTransferService(const MemoryTransferService&) = delete;
|
||||
MemoryTransferService& operator=(const MemoryTransferService&) = delete;
|
||||
};
|
||||
};
|
||||
|
||||
// Backdoor to get the order of the ProcMap for testing
|
||||
DAWN_WIRE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
|
||||
} // namespace client
|
||||
// Backdoor to get the order of the ProcMap for testing
|
||||
DAWN_WIRE_EXPORT std::vector<const char*> GetProcMapNamesForTesting();
|
||||
} // namespace client
|
||||
} // namespace dawn::wire
|
||||
|
||||
#endif // INCLUDE_DAWN_WIRE_WIRECLIENT_H_
|
||||
|
|
|
@ -23,18 +23,18 @@ struct DawnProcTable;
|
|||
|
||||
namespace dawn::wire {
|
||||
|
||||
namespace server {
|
||||
class Server;
|
||||
class MemoryTransferService;
|
||||
} // namespace server
|
||||
namespace server {
|
||||
class Server;
|
||||
class MemoryTransferService;
|
||||
} // namespace server
|
||||
|
||||
struct DAWN_WIRE_EXPORT WireServerDescriptor {
|
||||
struct DAWN_WIRE_EXPORT WireServerDescriptor {
|
||||
const DawnProcTable* procs;
|
||||
CommandSerializer* serializer;
|
||||
server::MemoryTransferService* memoryTransferService = nullptr;
|
||||
};
|
||||
};
|
||||
|
||||
class DAWN_WIRE_EXPORT WireServer : public CommandHandler {
|
||||
class DAWN_WIRE_EXPORT WireServer : public CommandHandler {
|
||||
public:
|
||||
explicit WireServer(const WireServerDescriptor& descriptor);
|
||||
~WireServer() override;
|
||||
|
@ -67,10 +67,10 @@ namespace dawn::wire {
|
|||
|
||||
private:
|
||||
std::unique_ptr<server::Server> mImpl;
|
||||
};
|
||||
};
|
||||
|
||||
namespace server {
|
||||
class DAWN_WIRE_EXPORT MemoryTransferService {
|
||||
namespace server {
|
||||
class DAWN_WIRE_EXPORT MemoryTransferService {
|
||||
public:
|
||||
MemoryTransferService();
|
||||
virtual ~MemoryTransferService();
|
||||
|
@ -141,8 +141,8 @@ namespace dawn::wire {
|
|||
private:
|
||||
MemoryTransferService(const MemoryTransferService&) = delete;
|
||||
MemoryTransferService& operator=(const MemoryTransferService&) = delete;
|
||||
};
|
||||
} // namespace server
|
||||
};
|
||||
} // namespace server
|
||||
|
||||
} // namespace dawn::wire
|
||||
|
||||
|
|
|
@ -16,21 +16,21 @@
|
|||
#define INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_
|
||||
|
||||
#if defined(DAWN_WIRE_SHARED_LIBRARY)
|
||||
# if defined(_WIN32)
|
||||
# if defined(DAWN_WIRE_IMPLEMENTATION)
|
||||
# define DAWN_WIRE_EXPORT __declspec(dllexport)
|
||||
# else
|
||||
# define DAWN_WIRE_EXPORT __declspec(dllimport)
|
||||
# endif
|
||||
# else // defined(_WIN32)
|
||||
# if defined(DAWN_WIRE_IMPLEMENTATION)
|
||||
# define DAWN_WIRE_EXPORT __attribute__((visibility("default")))
|
||||
# else
|
||||
# define DAWN_WIRE_EXPORT
|
||||
# endif
|
||||
# endif // defined(_WIN32)
|
||||
#if defined(_WIN32)
|
||||
#if defined(DAWN_WIRE_IMPLEMENTATION)
|
||||
#define DAWN_WIRE_EXPORT __declspec(dllexport)
|
||||
#else
|
||||
#define DAWN_WIRE_EXPORT __declspec(dllimport)
|
||||
#endif
|
||||
#else // defined(_WIN32)
|
||||
#if defined(DAWN_WIRE_IMPLEMENTATION)
|
||||
#define DAWN_WIRE_EXPORT __attribute__((visibility("default")))
|
||||
#else
|
||||
#define DAWN_WIRE_EXPORT
|
||||
#endif
|
||||
#endif // defined(_WIN32)
|
||||
#else // defined(DAWN_WIRE_SHARED_LIBRARY)
|
||||
# define DAWN_WIRE_EXPORT
|
||||
#define DAWN_WIRE_EXPORT
|
||||
#endif // defined(DAWN_WIRE_SHARED_LIBRARY)
|
||||
|
||||
#endif // INCLUDE_DAWN_WIRE_DAWN_WIRE_EXPORT_H_
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
|
||||
BasedOnStyle: Chromium
|
|
@ -1 +0,0 @@
|
|||
filter=-runtime/indentation_namespace
|
|
@ -32,32 +32,32 @@
|
|||
// MSVC triggers a warning in /W4 for do {} while(0). SDL worked around this by using (0,0) and
|
||||
// points out that it looks like an owl face.
|
||||
#if defined(DAWN_COMPILER_MSVC)
|
||||
# define DAWN_ASSERT_LOOP_CONDITION (0, 0)
|
||||
#define DAWN_ASSERT_LOOP_CONDITION (0, 0)
|
||||
#else
|
||||
# define DAWN_ASSERT_LOOP_CONDITION (0)
|
||||
#define DAWN_ASSERT_LOOP_CONDITION (0)
|
||||
#endif
|
||||
|
||||
// DAWN_ASSERT_CALLSITE_HELPER generates the actual assert code. In Debug it does what you would
|
||||
// expect of an assert and in release it tries to give hints to make the compiler generate better
|
||||
// code.
|
||||
#if defined(DAWN_ENABLE_ASSERTS)
|
||||
# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
|
||||
#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
|
||||
do { \
|
||||
if (!(condition)) { \
|
||||
HandleAssertionFailure(file, func, line, #condition); \
|
||||
} \
|
||||
} while (DAWN_ASSERT_LOOP_CONDITION)
|
||||
#else
|
||||
# if defined(DAWN_COMPILER_MSVC)
|
||||
# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition)
|
||||
# elif defined(DAWN_COMPILER_CLANG) && defined(__builtin_assume)
|
||||
# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition)
|
||||
# else
|
||||
# define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
|
||||
#if defined(DAWN_COMPILER_MSVC)
|
||||
#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition)
|
||||
#elif defined(DAWN_COMPILER_CLANG) && defined(__builtin_assume)
|
||||
#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition)
|
||||
#else
|
||||
#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
|
||||
do { \
|
||||
DAWN_UNUSED(sizeof(condition)); \
|
||||
} while (DAWN_ASSERT_LOOP_CONDITION)
|
||||
# endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define DAWN_ASSERT(condition) DAWN_ASSERT_CALLSITE_HELPER(__FILE__, __func__, __LINE__, condition)
|
||||
|
@ -68,8 +68,8 @@
|
|||
} while (DAWN_ASSERT_LOOP_CONDITION)
|
||||
|
||||
#if !defined(DAWN_SKIP_ASSERT_SHORTHANDS)
|
||||
# define ASSERT DAWN_ASSERT
|
||||
# define UNREACHABLE DAWN_UNREACHABLE
|
||||
#define ASSERT DAWN_ASSERT
|
||||
#define UNREACHABLE DAWN_UNREACHABLE
|
||||
#endif
|
||||
|
||||
void HandleAssertionFailure(const char* file,
|
||||
|
|
|
@ -62,24 +62,18 @@ class BitSetIterator final {
|
|||
uint32_t mOffset;
|
||||
};
|
||||
|
||||
Iterator begin() const {
|
||||
return Iterator(mBits);
|
||||
}
|
||||
Iterator end() const {
|
||||
return Iterator(std::bitset<N>(0));
|
||||
}
|
||||
Iterator begin() const { return Iterator(mBits); }
|
||||
Iterator end() const { return Iterator(std::bitset<N>(0)); }
|
||||
|
||||
private:
|
||||
const std::bitset<N> mBits;
|
||||
};
|
||||
|
||||
template <size_t N, typename T>
|
||||
BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {
|
||||
}
|
||||
BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {}
|
||||
|
||||
template <size_t N, typename T>
|
||||
BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {
|
||||
}
|
||||
BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {}
|
||||
|
||||
template <size_t N, typename T>
|
||||
BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) {
|
||||
|
|
|
@ -29,50 +29,50 @@
|
|||
|
||||
// Clang and GCC, check for __clang__ too to catch clang-cl masquarading as MSVC
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
# if defined(__clang__)
|
||||
# define DAWN_COMPILER_CLANG
|
||||
# else
|
||||
# define DAWN_COMPILER_GCC
|
||||
# endif
|
||||
#if defined(__clang__)
|
||||
#define DAWN_COMPILER_CLANG
|
||||
#else
|
||||
#define DAWN_COMPILER_GCC
|
||||
#endif
|
||||
|
||||
# if defined(__i386__) || defined(__x86_64__)
|
||||
# define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t")
|
||||
# else
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
#define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t")
|
||||
#else
|
||||
// TODO(cwallez@chromium.org): Implement breakpoint on all supported architectures
|
||||
# define DAWN_BREAKPOINT()
|
||||
# endif
|
||||
#define DAWN_BREAKPOINT()
|
||||
#endif
|
||||
|
||||
# define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable()
|
||||
# define DAWN_LIKELY(x) __builtin_expect(!!(x), 1)
|
||||
# define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0)
|
||||
#define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable()
|
||||
#define DAWN_LIKELY(x) __builtin_expect(!!(x), 1)
|
||||
#define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0)
|
||||
|
||||
# if !defined(__has_cpp_attribute)
|
||||
# define __has_cpp_attribute(name) 0
|
||||
# endif
|
||||
#if !defined(__has_cpp_attribute)
|
||||
#define __has_cpp_attribute(name) 0
|
||||
#endif
|
||||
|
||||
# define DAWN_DECLARE_UNUSED __attribute__((unused))
|
||||
# if defined(NDEBUG)
|
||||
# define DAWN_FORCE_INLINE inline __attribute__((always_inline))
|
||||
# endif
|
||||
# define DAWN_NOINLINE __attribute__((noinline))
|
||||
#define DAWN_DECLARE_UNUSED __attribute__((unused))
|
||||
#if defined(NDEBUG)
|
||||
#define DAWN_FORCE_INLINE inline __attribute__((always_inline))
|
||||
#endif
|
||||
#define DAWN_NOINLINE __attribute__((noinline))
|
||||
|
||||
// MSVC
|
||||
#elif defined(_MSC_VER)
|
||||
# define DAWN_COMPILER_MSVC
|
||||
#define DAWN_COMPILER_MSVC
|
||||
|
||||
extern void __cdecl __debugbreak(void);
|
||||
# define DAWN_BREAKPOINT() __debugbreak()
|
||||
#define DAWN_BREAKPOINT() __debugbreak()
|
||||
|
||||
# define DAWN_BUILTIN_UNREACHABLE() __assume(false)
|
||||
#define DAWN_BUILTIN_UNREACHABLE() __assume(false)
|
||||
|
||||
# define DAWN_DECLARE_UNUSED
|
||||
# if defined(NDEBUG)
|
||||
# define DAWN_FORCE_INLINE __forceinline
|
||||
# endif
|
||||
# define DAWN_NOINLINE __declspec(noinline)
|
||||
#define DAWN_DECLARE_UNUSED
|
||||
#if defined(NDEBUG)
|
||||
#define DAWN_FORCE_INLINE __forceinline
|
||||
#endif
|
||||
#define DAWN_NOINLINE __declspec(noinline)
|
||||
|
||||
#else
|
||||
# error "Unsupported compiler"
|
||||
#error "Unsupported compiler"
|
||||
#endif
|
||||
|
||||
// It seems that (void) EXPR works on all compilers to silence the unused variable warning.
|
||||
|
@ -82,16 +82,16 @@ extern void __cdecl __debugbreak(void);
|
|||
|
||||
// Add noop replacements for macros for features that aren't supported by the compiler.
|
||||
#if !defined(DAWN_LIKELY)
|
||||
# define DAWN_LIKELY(X) X
|
||||
#define DAWN_LIKELY(X) X
|
||||
#endif
|
||||
#if !defined(DAWN_UNLIKELY)
|
||||
# define DAWN_UNLIKELY(X) X
|
||||
#define DAWN_UNLIKELY(X) X
|
||||
#endif
|
||||
#if !defined(DAWN_FORCE_INLINE)
|
||||
# define DAWN_FORCE_INLINE inline
|
||||
#define DAWN_FORCE_INLINE inline
|
||||
#endif
|
||||
#if !defined(DAWN_NOINLINE)
|
||||
# define DAWN_NOINLINE
|
||||
#define DAWN_NOINLINE
|
||||
#endif
|
||||
|
||||
#endif // SRC_DAWN_COMMON_COMPILER_H_
|
||||
|
|
|
@ -22,12 +22,8 @@
|
|||
template <typename T>
|
||||
struct CoreFoundationRefTraits {
|
||||
static constexpr T kNullValue = nullptr;
|
||||
static void Reference(T value) {
|
||||
CFRetain(value);
|
||||
}
|
||||
static void Release(T value) {
|
||||
CFRelease(value);
|
||||
}
|
||||
static void Reference(T value) { CFRetain(value); }
|
||||
static void Release(T value) { CFRelease(value); }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -19,14 +19,14 @@
|
|||
#include "dawn/common/Platform.h"
|
||||
|
||||
#if DAWN_PLATFORM_WINDOWS
|
||||
# include "dawn/common/windows_with_undefs.h"
|
||||
# if DAWN_PLATFORM_WINUWP
|
||||
# include "dawn/common/WindowsUtils.h"
|
||||
# endif
|
||||
#include "dawn/common/windows_with_undefs.h"
|
||||
#if DAWN_PLATFORM_WINUWP
|
||||
#include "dawn/common/WindowsUtils.h"
|
||||
#endif
|
||||
#elif DAWN_PLATFORM_POSIX
|
||||
# include <dlfcn.h>
|
||||
#include <dlfcn.h>
|
||||
#else
|
||||
# error "Unsupported platform for DynamicLib"
|
||||
#error "Unsupported platform for DynamicLib"
|
||||
#endif
|
||||
|
||||
DynamicLib::~DynamicLib() {
|
||||
|
@ -48,11 +48,11 @@ bool DynamicLib::Valid() const {
|
|||
|
||||
bool DynamicLib::Open(const std::string& filename, std::string* error) {
|
||||
#if DAWN_PLATFORM_WINDOWS
|
||||
# if DAWN_PLATFORM_WINUWP
|
||||
#if DAWN_PLATFORM_WINUWP
|
||||
mHandle = LoadPackagedLibrary(UTF8ToWStr(filename.c_str()).c_str(), 0);
|
||||
# else
|
||||
#else
|
||||
mHandle = LoadLibraryA(filename.c_str());
|
||||
# endif
|
||||
#endif
|
||||
if (mHandle == nullptr && error != nullptr) {
|
||||
*error = "Windows Error: " + std::to_string(GetLastError());
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ bool DynamicLib::Open(const std::string& filename, std::string* error) {
|
|||
*error = dlerror();
|
||||
}
|
||||
#else
|
||||
# error "Unsupported platform for DynamicLib"
|
||||
#error "Unsupported platform for DynamicLib"
|
||||
#endif
|
||||
|
||||
return mHandle != nullptr;
|
||||
|
@ -79,7 +79,7 @@ void DynamicLib::Close() {
|
|||
#elif DAWN_PLATFORM_POSIX
|
||||
dlclose(mHandle);
|
||||
#else
|
||||
# error "Unsupported platform for DynamicLib"
|
||||
#error "Unsupported platform for DynamicLib"
|
||||
#endif
|
||||
|
||||
mHandle = nullptr;
|
||||
|
@ -101,7 +101,7 @@ void* DynamicLib::GetProc(const std::string& procName, std::string* error) const
|
|||
*error = dlerror();
|
||||
}
|
||||
#else
|
||||
# error "Unsupported platform for DynamicLib"
|
||||
#error "Unsupported platform for DynamicLib"
|
||||
#endif
|
||||
|
||||
return proc;
|
||||
|
|
|
@ -20,66 +20,66 @@
|
|||
#include "dawn/common/Assert.h"
|
||||
|
||||
namespace gpu_info {
|
||||
namespace {
|
||||
// Intel
|
||||
// Referenced from the following Mesa source code:
|
||||
// https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
|
||||
// gen9
|
||||
const std::array<uint32_t, 25> Skylake = {
|
||||
{0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913, 0x1915, 0x1916,
|
||||
0x1917, 0x191A, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, 0x1926, 0x1927,
|
||||
0x192A, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D}};
|
||||
// gen9p5
|
||||
const std::array<uint32_t, 20> Kabylake = {
|
||||
{0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E, 0x591E, 0x5912, 0x5917,
|
||||
0x5902, 0x591B, 0x593B, 0x590B, 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
|
||||
const std::array<uint32_t, 17> Coffeelake = {
|
||||
{0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B,
|
||||
0x3E94, 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
|
||||
const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
|
||||
const std::array<uint32_t, 21> Cometlake = {
|
||||
namespace {
|
||||
// Intel
|
||||
// Referenced from the following Mesa source code:
|
||||
// https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
|
||||
// gen9
|
||||
const std::array<uint32_t, 25> Skylake = {{0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913,
|
||||
0x1915, 0x1916, 0x1917, 0x191A, 0x191B, 0x191D, 0x191E,
|
||||
0x1921, 0x1923, 0x1926, 0x1927, 0x192A, 0x192B, 0x192D,
|
||||
0x1932, 0x193A, 0x193B, 0x193D}};
|
||||
// gen9p5
|
||||
const std::array<uint32_t, 20> Kabylake = {{0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E,
|
||||
0x591E, 0x5912, 0x5917, 0x5902, 0x591B, 0x593B, 0x590B,
|
||||
0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
|
||||
const std::array<uint32_t, 17> Coffeelake = {{0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91,
|
||||
0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B, 0x3E94,
|
||||
0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
|
||||
const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
|
||||
const std::array<uint32_t, 21> Cometlake = {
|
||||
{0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
|
||||
0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}};
|
||||
|
||||
// According to Intel graphics driver version schema, build number is generated from the
|
||||
// last two fields.
|
||||
// See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for
|
||||
// more details.
|
||||
uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) {
|
||||
// According to Intel graphics driver version schema, build number is generated from the
|
||||
// last two fields.
|
||||
// See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for
|
||||
// more details.
|
||||
uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) {
|
||||
return driverVersion[2] * 10000 + driverVersion[3];
|
||||
}
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
} // anonymous namespace
|
||||
|
||||
bool IsAMD(PCIVendorID vendorId) {
|
||||
bool IsAMD(PCIVendorID vendorId) {
|
||||
return vendorId == kVendorID_AMD;
|
||||
}
|
||||
bool IsARM(PCIVendorID vendorId) {
|
||||
}
|
||||
bool IsARM(PCIVendorID vendorId) {
|
||||
return vendorId == kVendorID_ARM;
|
||||
}
|
||||
bool IsImgTec(PCIVendorID vendorId) {
|
||||
}
|
||||
bool IsImgTec(PCIVendorID vendorId) {
|
||||
return vendorId == kVendorID_ImgTec;
|
||||
}
|
||||
bool IsIntel(PCIVendorID vendorId) {
|
||||
}
|
||||
bool IsIntel(PCIVendorID vendorId) {
|
||||
return vendorId == kVendorID_Intel;
|
||||
}
|
||||
bool IsMesa(PCIVendorID vendorId) {
|
||||
}
|
||||
bool IsMesa(PCIVendorID vendorId) {
|
||||
return vendorId == kVendorID_Mesa;
|
||||
}
|
||||
bool IsNvidia(PCIVendorID vendorId) {
|
||||
}
|
||||
bool IsNvidia(PCIVendorID vendorId) {
|
||||
return vendorId == kVendorID_Nvidia;
|
||||
}
|
||||
bool IsQualcomm(PCIVendorID vendorId) {
|
||||
}
|
||||
bool IsQualcomm(PCIVendorID vendorId) {
|
||||
return vendorId == kVendorID_Qualcomm;
|
||||
}
|
||||
bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) {
|
||||
}
|
||||
bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) {
|
||||
return vendorId == kVendorID_Google && deviceId == kDeviceID_Swiftshader;
|
||||
}
|
||||
bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
|
||||
}
|
||||
bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
|
||||
return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
|
||||
}
|
||||
}
|
||||
|
||||
int CompareD3DDriverVersion(PCIVendorID vendorId,
|
||||
int CompareD3DDriverVersion(PCIVendorID vendorId,
|
||||
const D3DDriverVersion& version1,
|
||||
const D3DDriverVersion& version2) {
|
||||
if (IsIntel(vendorId)) {
|
||||
|
@ -91,18 +91,18 @@ namespace gpu_info {
|
|||
// TODO(crbug.com/dawn/823): support other GPU vendors
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Intel GPUs
|
||||
bool IsSkylake(PCIDeviceID deviceId) {
|
||||
// Intel GPUs
|
||||
bool IsSkylake(PCIDeviceID deviceId) {
|
||||
return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
|
||||
}
|
||||
bool IsKabylake(PCIDeviceID deviceId) {
|
||||
}
|
||||
bool IsKabylake(PCIDeviceID deviceId) {
|
||||
return std::find(Kabylake.cbegin(), Kabylake.cend(), deviceId) != Kabylake.cend();
|
||||
}
|
||||
bool IsCoffeelake(PCIDeviceID deviceId) {
|
||||
}
|
||||
bool IsCoffeelake(PCIDeviceID deviceId) {
|
||||
return (std::find(Coffeelake.cbegin(), Coffeelake.cend(), deviceId) != Coffeelake.cend()) ||
|
||||
(std::find(Whiskylake.cbegin(), Whiskylake.cend(), deviceId) != Whiskylake.cend()) ||
|
||||
(std::find(Cometlake.cbegin(), Cometlake.cend(), deviceId) != Cometlake.cend());
|
||||
}
|
||||
}
|
||||
} // namespace gpu_info
|
||||
|
|
|
@ -23,44 +23,44 @@ using PCIDeviceID = uint32_t;
|
|||
|
||||
namespace gpu_info {
|
||||
|
||||
static constexpr PCIVendorID kVendorID_AMD = 0x1002;
|
||||
static constexpr PCIVendorID kVendorID_ARM = 0x13B5;
|
||||
static constexpr PCIVendorID kVendorID_ImgTec = 0x1010;
|
||||
static constexpr PCIVendorID kVendorID_Intel = 0x8086;
|
||||
static constexpr PCIVendorID kVendorID_Mesa = 0x10005;
|
||||
static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE;
|
||||
static constexpr PCIVendorID kVendorID_Qualcomm = 0x5143;
|
||||
static constexpr PCIVendorID kVendorID_Google = 0x1AE0;
|
||||
static constexpr PCIVendorID kVendorID_Microsoft = 0x1414;
|
||||
static constexpr PCIVendorID kVendorID_AMD = 0x1002;
|
||||
static constexpr PCIVendorID kVendorID_ARM = 0x13B5;
|
||||
static constexpr PCIVendorID kVendorID_ImgTec = 0x1010;
|
||||
static constexpr PCIVendorID kVendorID_Intel = 0x8086;
|
||||
static constexpr PCIVendorID kVendorID_Mesa = 0x10005;
|
||||
static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE;
|
||||
static constexpr PCIVendorID kVendorID_Qualcomm = 0x5143;
|
||||
static constexpr PCIVendorID kVendorID_Google = 0x1AE0;
|
||||
static constexpr PCIVendorID kVendorID_Microsoft = 0x1414;
|
||||
|
||||
static constexpr PCIDeviceID kDeviceID_Swiftshader = 0xC0DE;
|
||||
static constexpr PCIDeviceID kDeviceID_WARP = 0x8c;
|
||||
static constexpr PCIDeviceID kDeviceID_Swiftshader = 0xC0DE;
|
||||
static constexpr PCIDeviceID kDeviceID_WARP = 0x8c;
|
||||
|
||||
bool IsAMD(PCIVendorID vendorId);
|
||||
bool IsARM(PCIVendorID vendorId);
|
||||
bool IsImgTec(PCIVendorID vendorId);
|
||||
bool IsIntel(PCIVendorID vendorId);
|
||||
bool IsMesa(PCIVendorID vendorId);
|
||||
bool IsNvidia(PCIVendorID vendorId);
|
||||
bool IsQualcomm(PCIVendorID vendorId);
|
||||
bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId);
|
||||
bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId);
|
||||
bool IsAMD(PCIVendorID vendorId);
|
||||
bool IsARM(PCIVendorID vendorId);
|
||||
bool IsImgTec(PCIVendorID vendorId);
|
||||
bool IsIntel(PCIVendorID vendorId);
|
||||
bool IsMesa(PCIVendorID vendorId);
|
||||
bool IsNvidia(PCIVendorID vendorId);
|
||||
bool IsQualcomm(PCIVendorID vendorId);
|
||||
bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId);
|
||||
bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId);
|
||||
|
||||
using D3DDriverVersion = std::array<uint16_t, 4>;
|
||||
using D3DDriverVersion = std::array<uint16_t, 4>;
|
||||
|
||||
// Do comparison between two driver versions. Currently we only support the comparison between
|
||||
// Intel D3D driver versions.
|
||||
// - Return -1 if build number of version1 is smaller
|
||||
// - Return 1 if build number of version1 is bigger
|
||||
// - Return 0 if version1 and version2 represent same driver version
|
||||
int CompareD3DDriverVersion(PCIVendorID vendorId,
|
||||
// Do comparison between two driver versions. Currently we only support the comparison between
|
||||
// Intel D3D driver versions.
|
||||
// - Return -1 if build number of version1 is smaller
|
||||
// - Return 1 if build number of version1 is bigger
|
||||
// - Return 0 if version1 and version2 represent same driver version
|
||||
int CompareD3DDriverVersion(PCIVendorID vendorId,
|
||||
const D3DDriverVersion& version1,
|
||||
const D3DDriverVersion& version2);
|
||||
|
||||
// Intel architectures
|
||||
bool IsSkylake(PCIDeviceID deviceId);
|
||||
bool IsKabylake(PCIDeviceID deviceId);
|
||||
bool IsCoffeelake(PCIDeviceID deviceId);
|
||||
// Intel architectures
|
||||
bool IsSkylake(PCIDeviceID deviceId);
|
||||
bool IsKabylake(PCIDeviceID deviceId);
|
||||
bool IsCoffeelake(PCIDeviceID deviceId);
|
||||
|
||||
} // namespace gpu_info
|
||||
#endif // SRC_DAWN_COMMON_GPUINFO_H_
|
||||
|
|
|
@ -50,7 +50,7 @@ void HashCombine(size_t* hash, const T& value) {
|
|||
#elif defined(DAWN_PLATFORM_32_BIT)
|
||||
const size_t offset = 0x9e3779b9;
|
||||
#else
|
||||
# error "Unsupported platform"
|
||||
#error "Unsupported platform"
|
||||
#endif
|
||||
*hash ^= Hash(value) + offset + (*hash << 6) + (*hash >> 2);
|
||||
}
|
||||
|
@ -89,13 +89,13 @@ size_t Hash(const std::bitset<N>& value) {
|
|||
#endif
|
||||
|
||||
namespace std {
|
||||
template <typename Index, size_t N>
|
||||
struct hash<ityp::bitset<Index, N>> {
|
||||
template <typename Index, size_t N>
|
||||
struct hash<ityp::bitset<Index, N>> {
|
||||
public:
|
||||
size_t operator()(const ityp::bitset<Index, N>& value) const {
|
||||
return Hash(static_cast<const std::bitset<N>&>(value));
|
||||
}
|
||||
};
|
||||
};
|
||||
} // namespace std
|
||||
|
||||
#endif // SRC_DAWN_COMMON_HASHUTILS_H_
|
||||
|
|
|
@ -22,12 +22,8 @@
|
|||
template <typename T>
|
||||
struct IOKitRefTraits {
|
||||
static constexpr T kNullValue = IO_OBJECT_NULL;
|
||||
static void Reference(T value) {
|
||||
IOObjectRetain(value);
|
||||
}
|
||||
static void Release(T value) {
|
||||
IOObjectRelease(value);
|
||||
}
|
||||
static void Reference(T value) { IOObjectRetain(value); }
|
||||
static void Release(T value) { IOObjectRelease(value); }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -99,10 +99,8 @@ class LinkedList;
|
|||
template <typename T>
|
||||
class LinkNode {
|
||||
public:
|
||||
LinkNode() : previous_(nullptr), next_(nullptr) {
|
||||
}
|
||||
LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {
|
||||
}
|
||||
LinkNode() : previous_(nullptr), next_(nullptr) {}
|
||||
LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {}
|
||||
|
||||
LinkNode(LinkNode<T>&& rhs) {
|
||||
next_ = rhs.next_;
|
||||
|
@ -154,22 +152,14 @@ class LinkNode {
|
|||
return true;
|
||||
}
|
||||
|
||||
LinkNode<T>* previous() const {
|
||||
return previous_;
|
||||
}
|
||||
LinkNode<T>* previous() const { return previous_; }
|
||||
|
||||
LinkNode<T>* next() const {
|
||||
return next_;
|
||||
}
|
||||
LinkNode<T>* next() const { return next_; }
|
||||
|
||||
// Cast from the node-type to the value type.
|
||||
const T* value() const {
|
||||
return static_cast<const T*>(this);
|
||||
}
|
||||
const T* value() const { return static_cast<const T*>(this); }
|
||||
|
||||
T* value() {
|
||||
return static_cast<T*>(this);
|
||||
}
|
||||
T* value() { return static_cast<T*>(this); }
|
||||
|
||||
private:
|
||||
friend class LinkedList<T>;
|
||||
|
@ -183,8 +173,7 @@ class LinkedList {
|
|||
// The "root" node is self-referential, and forms the basis of a circular
|
||||
// list (root_.next() will point back to the start of the list,
|
||||
// and root_->previous() wraps around to the end of the list).
|
||||
LinkedList() : root_(&root_, &root_) {
|
||||
}
|
||||
LinkedList() : root_(&root_, &root_) {}
|
||||
|
||||
~LinkedList() {
|
||||
// If any LinkNodes still exist in the LinkedList, there will be outstanding references to
|
||||
|
@ -194,9 +183,7 @@ class LinkedList {
|
|||
}
|
||||
|
||||
// Appends |e| to the end of the linked list.
|
||||
void Append(LinkNode<T>* e) {
|
||||
e->InsertBefore(&root_);
|
||||
}
|
||||
void Append(LinkNode<T>* e) { e->InsertBefore(&root_); }
|
||||
|
||||
// Moves all elements (in order) of the list and appends them into |l| leaving the list empty.
|
||||
void MoveInto(LinkedList<T>* l) {
|
||||
|
@ -212,21 +199,13 @@ class LinkedList {
|
|||
root_.previous_ = &root_;
|
||||
}
|
||||
|
||||
LinkNode<T>* head() const {
|
||||
return root_.next();
|
||||
}
|
||||
LinkNode<T>* head() const { return root_.next(); }
|
||||
|
||||
LinkNode<T>* tail() const {
|
||||
return root_.previous();
|
||||
}
|
||||
LinkNode<T>* tail() const { return root_.previous(); }
|
||||
|
||||
const LinkNode<T>* end() const {
|
||||
return &root_;
|
||||
}
|
||||
const LinkNode<T>* end() const { return &root_; }
|
||||
|
||||
bool empty() const {
|
||||
return head() == end();
|
||||
}
|
||||
bool empty() const { return head() == end(); }
|
||||
|
||||
private:
|
||||
LinkNode<T> root_;
|
||||
|
@ -235,8 +214,7 @@ class LinkedList {
|
|||
template <typename T>
|
||||
class LinkedListIterator {
|
||||
public:
|
||||
explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {
|
||||
}
|
||||
explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {}
|
||||
|
||||
// We keep an early reference to the next node in the list so that even if the current element
|
||||
// is modified or removed from the list, we have a valid next node.
|
||||
|
@ -246,13 +224,9 @@ class LinkedListIterator {
|
|||
return *this;
|
||||
}
|
||||
|
||||
bool operator!=(const LinkedListIterator<T>& other) const {
|
||||
return current_ != other.current_;
|
||||
}
|
||||
bool operator!=(const LinkedListIterator<T>& other) const { return current_ != other.current_; }
|
||||
|
||||
LinkNode<T>* operator*() const {
|
||||
return current_;
|
||||
}
|
||||
LinkNode<T>* operator*() const { return current_; }
|
||||
|
||||
private:
|
||||
LinkNode<T>* current_;
|
||||
|
|
|
@ -21,14 +21,14 @@
|
|||
#include "dawn/common/Platform.h"
|
||||
|
||||
#if defined(DAWN_PLATFORM_ANDROID)
|
||||
# include <android/log.h>
|
||||
#include <android/log.h>
|
||||
#endif
|
||||
|
||||
namespace dawn {
|
||||
|
||||
namespace {
|
||||
namespace {
|
||||
|
||||
const char* SeverityName(LogSeverity severity) {
|
||||
const char* SeverityName(LogSeverity severity) {
|
||||
switch (severity) {
|
||||
case LogSeverity::Debug:
|
||||
return "Debug";
|
||||
|
@ -42,10 +42,10 @@ namespace dawn {
|
|||
UNREACHABLE();
|
||||
return "";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(DAWN_PLATFORM_ANDROID)
|
||||
android_LogPriority AndroidLogPriority(LogSeverity severity) {
|
||||
android_LogPriority AndroidLogPriority(LogSeverity severity) {
|
||||
switch (severity) {
|
||||
case LogSeverity::Debug:
|
||||
return ANDROID_LOG_INFO;
|
||||
|
@ -59,15 +59,14 @@ namespace dawn {
|
|||
UNREACHABLE();
|
||||
return ANDROID_LOG_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // defined(DAWN_PLATFORM_ANDROID)
|
||||
|
||||
} // anonymous namespace
|
||||
} // anonymous namespace
|
||||
|
||||
LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {
|
||||
}
|
||||
LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {}
|
||||
|
||||
LogMessage::~LogMessage() {
|
||||
LogMessage::~LogMessage() {
|
||||
std::string fullMessage = mStream.str();
|
||||
|
||||
// If this message has been moved, its stream is empty.
|
||||
|
@ -90,28 +89,28 @@ namespace dawn {
|
|||
fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str());
|
||||
fflush(outputStream);
|
||||
#endif // defined(DAWN_PLATFORM_ANDROID)
|
||||
}
|
||||
}
|
||||
|
||||
LogMessage DebugLog() {
|
||||
LogMessage DebugLog() {
|
||||
return LogMessage(LogSeverity::Debug);
|
||||
}
|
||||
}
|
||||
|
||||
LogMessage InfoLog() {
|
||||
LogMessage InfoLog() {
|
||||
return LogMessage(LogSeverity::Info);
|
||||
}
|
||||
}
|
||||
|
||||
LogMessage WarningLog() {
|
||||
LogMessage WarningLog() {
|
||||
return LogMessage(LogSeverity::Warning);
|
||||
}
|
||||
}
|
||||
|
||||
LogMessage ErrorLog() {
|
||||
LogMessage ErrorLog() {
|
||||
return LogMessage(LogSeverity::Error);
|
||||
}
|
||||
}
|
||||
|
||||
LogMessage DebugLog(const char* file, const char* function, int line) {
|
||||
LogMessage DebugLog(const char* file, const char* function, int line) {
|
||||
LogMessage message = DebugLog();
|
||||
message << file << ":" << line << "(" << function << ")";
|
||||
return message;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn
|
||||
|
|
|
@ -47,17 +47,17 @@
|
|||
|
||||
namespace dawn {
|
||||
|
||||
// Log levels mostly used to signal intent where the log message is produced and used to route
|
||||
// the message to the correct output.
|
||||
enum class LogSeverity {
|
||||
// Log levels mostly used to signal intent where the log message is produced and used to route
|
||||
// the message to the correct output.
|
||||
enum class LogSeverity {
|
||||
Debug,
|
||||
Info,
|
||||
Warning,
|
||||
Error,
|
||||
};
|
||||
};
|
||||
|
||||
// Essentially an ostringstream that will print itself in its destructor.
|
||||
class LogMessage {
|
||||
// Essentially an ostringstream that will print itself in its destructor.
|
||||
class LogMessage {
|
||||
public:
|
||||
explicit LogMessage(LogSeverity severity);
|
||||
~LogMessage();
|
||||
|
@ -77,17 +77,17 @@ namespace dawn {
|
|||
|
||||
LogSeverity mSeverity;
|
||||
std::ostringstream mStream;
|
||||
};
|
||||
};
|
||||
|
||||
// Short-hands to create a LogMessage with the respective severity.
|
||||
LogMessage DebugLog();
|
||||
LogMessage InfoLog();
|
||||
LogMessage WarningLog();
|
||||
LogMessage ErrorLog();
|
||||
// Short-hands to create a LogMessage with the respective severity.
|
||||
LogMessage DebugLog();
|
||||
LogMessage InfoLog();
|
||||
LogMessage WarningLog();
|
||||
LogMessage ErrorLog();
|
||||
|
||||
// DAWN_DEBUG is a helper macro that creates a DebugLog and outputs file/line/function
|
||||
// information
|
||||
LogMessage DebugLog(const char* file, const char* function, int line);
|
||||
// DAWN_DEBUG is a helper macro that creates a DebugLog and outputs file/line/function
|
||||
// information
|
||||
LogMessage DebugLog(const char* file, const char* function, int line);
|
||||
#define DAWN_DEBUG() ::dawn::DebugLog(__FILE__, __func__, __LINE__)
|
||||
|
||||
} // namespace dawn
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include "dawn/common/Platform.h"
|
||||
|
||||
#if defined(DAWN_COMPILER_MSVC)
|
||||
# include <intrin.h>
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
uint32_t ScanForward(uint32_t bits) {
|
||||
|
@ -54,13 +54,13 @@ uint32_t Log2(uint32_t value) {
|
|||
uint32_t Log2(uint64_t value) {
|
||||
ASSERT(value != 0);
|
||||
#if defined(DAWN_COMPILER_MSVC)
|
||||
# if defined(DAWN_PLATFORM_64_BIT)
|
||||
#if defined(DAWN_PLATFORM_64_BIT)
|
||||
// NOLINTNEXTLINE(runtime/int)
|
||||
unsigned long firstBitIndex = 0ul;
|
||||
unsigned char ret = _BitScanReverse64(&firstBitIndex, value);
|
||||
ASSERT(ret != 0);
|
||||
return firstBitIndex;
|
||||
# else // defined(DAWN_PLATFORM_64_BIT)
|
||||
#else // defined(DAWN_PLATFORM_64_BIT)
|
||||
// NOLINTNEXTLINE(runtime/int)
|
||||
unsigned long firstBitIndex = 0ul;
|
||||
if (_BitScanReverse(&firstBitIndex, value >> 32)) {
|
||||
|
@ -69,7 +69,7 @@ uint32_t Log2(uint64_t value) {
|
|||
unsigned char ret = _BitScanReverse(&firstBitIndex, value & 0xFFFFFFFF);
|
||||
ASSERT(ret != 0);
|
||||
return firstBitIndex;
|
||||
# endif // defined(DAWN_PLATFORM_64_BIT)
|
||||
#endif // defined(DAWN_PLATFORM_64_BIT)
|
||||
#else // defined(DAWN_COMPILER_MSVC)
|
||||
return 63 - static_cast<uint32_t>(__builtin_clzll(value));
|
||||
#endif // defined(DAWN_COMPILER_MSVC)
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#import <Foundation/NSObject.h>
|
||||
|
||||
#if !defined(__OBJC__)
|
||||
# error "NSRef can only be used in Objective C/C++ code."
|
||||
#error "NSRef can only be used in Objective C/C++ code."
|
||||
#endif
|
||||
|
||||
// This file contains smart pointers that automatically reference and release Objective C objects
|
||||
|
@ -67,12 +67,8 @@
|
|||
template <typename T>
|
||||
struct NSRefTraits {
|
||||
static constexpr T kNullValue = nullptr;
|
||||
static void Reference(T value) {
|
||||
[value retain];
|
||||
}
|
||||
static void Release(T value) {
|
||||
[value release];
|
||||
}
|
||||
static void Reference(T value) { [value retain]; }
|
||||
static void Release(T value) { [value release]; }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
@ -80,13 +76,9 @@ class NSRef : public RefBase<T*, NSRefTraits<T*>> {
|
|||
public:
|
||||
using RefBase<T*, NSRefTraits<T*>>::RefBase;
|
||||
|
||||
const T* operator*() const {
|
||||
return this->Get();
|
||||
}
|
||||
const T* operator*() const { return this->Get(); }
|
||||
|
||||
T* operator*() {
|
||||
return this->Get();
|
||||
}
|
||||
T* operator*() { return this->Get(); }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
@ -104,13 +96,9 @@ class NSPRef : public RefBase<T, NSRefTraits<T>> {
|
|||
public:
|
||||
using RefBase<T, NSRefTraits<T>>::RefBase;
|
||||
|
||||
const T operator*() const {
|
||||
return this->Get();
|
||||
}
|
||||
const T operator*() const { return this->Get(); }
|
||||
|
||||
T operator*() {
|
||||
return this->Get();
|
||||
}
|
||||
T operator*() { return this->Get(); }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -22,17 +22,17 @@
|
|||
|
||||
namespace detail {
|
||||
|
||||
template <typename T>
|
||||
inline constexpr uint32_t u32_sizeof() {
|
||||
template <typename T>
|
||||
inline constexpr uint32_t u32_sizeof() {
|
||||
static_assert(sizeof(T) <= std::numeric_limits<uint32_t>::max());
|
||||
return uint32_t(sizeof(T));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline constexpr uint32_t u32_alignof() {
|
||||
template <typename T>
|
||||
inline constexpr uint32_t u32_alignof() {
|
||||
static_assert(alignof(T) <= std::numeric_limits<uint32_t>::max());
|
||||
return uint32_t(alignof(T));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
|
|
|
@ -16,67 +16,67 @@
|
|||
#define SRC_DAWN_COMMON_PLATFORM_H_
|
||||
|
||||
#if defined(_WIN32) || defined(_WIN64)
|
||||
# include <winapifamily.h>
|
||||
# define DAWN_PLATFORM_WINDOWS 1
|
||||
# if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
|
||||
# define DAWN_PLATFORM_WIN32 1
|
||||
# elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP
|
||||
# define DAWN_PLATFORM_WINUWP 1
|
||||
# else
|
||||
# error "Unsupported Windows platform."
|
||||
# endif
|
||||
#include <winapifamily.h>
|
||||
#define DAWN_PLATFORM_WINDOWS 1
|
||||
#if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
|
||||
#define DAWN_PLATFORM_WIN32 1
|
||||
#elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP
|
||||
#define DAWN_PLATFORM_WINUWP 1
|
||||
#else
|
||||
#error "Unsupported Windows platform."
|
||||
#endif
|
||||
|
||||
#elif defined(__linux__)
|
||||
# define DAWN_PLATFORM_LINUX 1
|
||||
# define DAWN_PLATFORM_POSIX 1
|
||||
# if defined(__ANDROID__)
|
||||
# define DAWN_PLATFORM_ANDROID 1
|
||||
# endif
|
||||
#define DAWN_PLATFORM_LINUX 1
|
||||
#define DAWN_PLATFORM_POSIX 1
|
||||
#if defined(__ANDROID__)
|
||||
#define DAWN_PLATFORM_ANDROID 1
|
||||
#endif
|
||||
|
||||
#elif defined(__APPLE__)
|
||||
# define DAWN_PLATFORM_APPLE 1
|
||||
# define DAWN_PLATFORM_POSIX 1
|
||||
# include <TargetConditionals.h>
|
||||
# if TARGET_OS_IPHONE
|
||||
# define DAWN_PLATFORM_IOS
|
||||
# elif TARGET_OS_MAC
|
||||
# define DAWN_PLATFORM_MACOS
|
||||
# else
|
||||
# error "Unsupported Apple platform."
|
||||
# endif
|
||||
#define DAWN_PLATFORM_APPLE 1
|
||||
#define DAWN_PLATFORM_POSIX 1
|
||||
#include <TargetConditionals.h>
|
||||
#if TARGET_OS_IPHONE
|
||||
#define DAWN_PLATFORM_IOS
|
||||
#elif TARGET_OS_MAC
|
||||
#define DAWN_PLATFORM_MACOS
|
||||
#else
|
||||
#error "Unsupported Apple platform."
|
||||
#endif
|
||||
|
||||
#elif defined(__Fuchsia__)
|
||||
# define DAWN_PLATFORM_FUCHSIA 1
|
||||
# define DAWN_PLATFORM_POSIX 1
|
||||
#define DAWN_PLATFORM_FUCHSIA 1
|
||||
#define DAWN_PLATFORM_POSIX 1
|
||||
|
||||
#elif defined(__EMSCRIPTEN__)
|
||||
# define DAWN_PLATFORM_EMSCRIPTEN 1
|
||||
# define DAWN_PLATFORM_POSIX 1
|
||||
#define DAWN_PLATFORM_EMSCRIPTEN 1
|
||||
#define DAWN_PLATFORM_POSIX 1
|
||||
|
||||
#else
|
||||
# error "Unsupported platform."
|
||||
#error "Unsupported platform."
|
||||
#endif
|
||||
|
||||
// Distinguish mips32.
|
||||
#if defined(__mips__) && (_MIPS_SIM == _ABIO32) && !defined(__mips32__)
|
||||
# define __mips32__
|
||||
#define __mips32__
|
||||
#endif
|
||||
|
||||
// Distinguish mips64.
|
||||
#if defined(__mips__) && (_MIPS_SIM == _ABI64) && !defined(__mips64__)
|
||||
# define __mips64__
|
||||
#define __mips64__
|
||||
#endif
|
||||
|
||||
#if defined(_WIN64) || defined(__aarch64__) || defined(__x86_64__) || defined(__mips64__) || \
|
||||
defined(__s390x__) || defined(__PPC64__)
|
||||
# define DAWN_PLATFORM_64_BIT 1
|
||||
#define DAWN_PLATFORM_64_BIT 1
|
||||
static_assert(sizeof(sizeof(char)) == 8, "Expect sizeof(size_t) == 8");
|
||||
#elif defined(_WIN32) || defined(__arm__) || defined(__i386__) || defined(__mips32__) || \
|
||||
defined(__s390__) || defined(__EMSCRIPTEN__)
|
||||
# define DAWN_PLATFORM_32_BIT 1
|
||||
#define DAWN_PLATFORM_32_BIT 1
|
||||
static_assert(sizeof(sizeof(char)) == 4, "Expect sizeof(size_t) == 4");
|
||||
#else
|
||||
# error "Unsupported platform"
|
||||
#error "Unsupported platform"
|
||||
#endif
|
||||
|
||||
#endif // SRC_DAWN_COMMON_PLATFORM_H_
|
||||
|
|
|
@ -36,17 +36,13 @@ template <typename T, typename Traits>
|
|||
class RefBase {
|
||||
public:
|
||||
// Default constructor and destructor.
|
||||
RefBase() : mValue(Traits::kNullValue) {
|
||||
}
|
||||
RefBase() : mValue(Traits::kNullValue) {}
|
||||
|
||||
~RefBase() {
|
||||
Release(mValue);
|
||||
}
|
||||
~RefBase() { Release(mValue); }
|
||||
|
||||
// Constructors from nullptr.
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr RefBase(std::nullptr_t) : RefBase() {
|
||||
}
|
||||
constexpr RefBase(std::nullptr_t) : RefBase() {}
|
||||
|
||||
RefBase<T, Traits>& operator=(std::nullptr_t) {
|
||||
Set(Traits::kNullValue);
|
||||
|
@ -55,9 +51,7 @@ class RefBase {
|
|||
|
||||
// Constructors from a value T.
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
RefBase(T value) : mValue(value) {
|
||||
Reference(value);
|
||||
}
|
||||
RefBase(T value) : mValue(value) { Reference(value); }
|
||||
|
||||
RefBase<T, Traits>& operator=(const T& value) {
|
||||
Set(value);
|
||||
|
@ -65,18 +59,14 @@ class RefBase {
|
|||
}
|
||||
|
||||
// Constructors from a RefBase<T>
|
||||
RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) {
|
||||
Reference(other.mValue);
|
||||
}
|
||||
RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) { Reference(other.mValue); }
|
||||
|
||||
RefBase<T, Traits>& operator=(const RefBase<T, Traits>& other) {
|
||||
Set(other.mValue);
|
||||
return *this;
|
||||
}
|
||||
|
||||
RefBase(RefBase<T, Traits>&& other) {
|
||||
mValue = other.Detach();
|
||||
}
|
||||
RefBase(RefBase<T, Traits>&& other) { mValue = other.Detach(); }
|
||||
|
||||
RefBase<T, Traits>& operator=(RefBase<T, Traits>&& other) {
|
||||
if (&other != this) {
|
||||
|
@ -113,28 +103,16 @@ class RefBase {
|
|||
}
|
||||
|
||||
// Comparison operators.
|
||||
bool operator==(const T& other) const {
|
||||
return mValue == other;
|
||||
}
|
||||
bool operator==(const T& other) const { return mValue == other; }
|
||||
|
||||
bool operator!=(const T& other) const {
|
||||
return mValue != other;
|
||||
}
|
||||
bool operator!=(const T& other) const { return mValue != other; }
|
||||
|
||||
const T operator->() const {
|
||||
return mValue;
|
||||
}
|
||||
T operator->() {
|
||||
return mValue;
|
||||
}
|
||||
const T operator->() const { return mValue; }
|
||||
T operator->() { return mValue; }
|
||||
|
||||
// Smart pointer methods.
|
||||
const T& Get() const {
|
||||
return mValue;
|
||||
}
|
||||
T& Get() {
|
||||
return mValue;
|
||||
}
|
||||
const T& Get() const { return mValue; }
|
||||
T& Get() { return mValue; }
|
||||
|
||||
[[nodiscard]] T Detach() {
|
||||
T value{std::move(mValue)};
|
||||
|
|
|
@ -45,12 +45,8 @@ class RefCounted {
|
|||
template <typename T>
|
||||
struct RefCountedTraits {
|
||||
static constexpr T* kNullValue = nullptr;
|
||||
static void Reference(T* value) {
|
||||
value->Reference();
|
||||
}
|
||||
static void Release(T* value) {
|
||||
value->Release();
|
||||
}
|
||||
static void Reference(T* value) { value->Reference(); }
|
||||
static void Release(T* value) { value->Release(); }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -17,14 +17,14 @@
|
|||
// Implementation details of the tagged pointer Results
|
||||
namespace detail {
|
||||
|
||||
intptr_t MakePayload(const void* pointer, PayloadType type) {
|
||||
intptr_t MakePayload(const void* pointer, PayloadType type) {
|
||||
intptr_t payload = reinterpret_cast<intptr_t>(pointer);
|
||||
ASSERT((payload & 3) == 0);
|
||||
return payload | type;
|
||||
}
|
||||
}
|
||||
|
||||
PayloadType GetPayloadType(intptr_t payload) {
|
||||
PayloadType GetPayloadType(intptr_t payload) {
|
||||
return static_cast<PayloadType>(payload & 3);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
|
|
@ -63,7 +63,7 @@ class [[nodiscard]] Result<void, E> {
|
|||
Result();
|
||||
Result(std::unique_ptr<E> error);
|
||||
|
||||
Result(Result<void, E> && other);
|
||||
Result(Result<void, E>&& other);
|
||||
Result<void, E>& operator=(Result<void, E>&& other);
|
||||
|
||||
~Result();
|
||||
|
@ -89,23 +89,23 @@ constexpr size_t alignof_if_defined_else_default<T, Default, decltype(alignof(T)
|
|||
// tagged pointer. The tag for Success is 0 so that returning the value is fastest.
|
||||
|
||||
namespace detail {
|
||||
// Utility functions to manipulate the tagged pointer. Some of them don't need to be templated
|
||||
// but we really want them inlined so we keep them in the headers
|
||||
enum PayloadType {
|
||||
// Utility functions to manipulate the tagged pointer. Some of them don't need to be templated
|
||||
// but we really want them inlined so we keep them in the headers
|
||||
enum PayloadType {
|
||||
Success = 0,
|
||||
Error = 1,
|
||||
Empty = 2,
|
||||
};
|
||||
};
|
||||
|
||||
intptr_t MakePayload(const void* pointer, PayloadType type);
|
||||
PayloadType GetPayloadType(intptr_t payload);
|
||||
intptr_t MakePayload(const void* pointer, PayloadType type);
|
||||
PayloadType GetPayloadType(intptr_t payload);
|
||||
|
||||
template <typename T>
|
||||
static T* GetSuccessFromPayload(intptr_t payload);
|
||||
template <typename E>
|
||||
static E* GetErrorFromPayload(intptr_t payload);
|
||||
template <typename T>
|
||||
static T* GetSuccessFromPayload(intptr_t payload);
|
||||
template <typename E>
|
||||
static E* GetErrorFromPayload(intptr_t payload);
|
||||
|
||||
constexpr static intptr_t kEmptyPayload = Empty;
|
||||
constexpr static intptr_t kEmptyPayload = Empty;
|
||||
} // namespace detail
|
||||
|
||||
template <typename T, typename E>
|
||||
|
@ -116,12 +116,12 @@ class [[nodiscard]] Result<T*, E> {
|
|||
static_assert(alignof_if_defined_else_default<E, 4> >= 4,
|
||||
"Result<T*, E*> reserves two bits for tagging pointers");
|
||||
|
||||
Result(T * success);
|
||||
Result(T* success);
|
||||
Result(std::unique_ptr<E> error);
|
||||
|
||||
// Support returning a Result<T*, E*> from a Result<TChild*, E*>
|
||||
template <typename TChild>
|
||||
Result(Result<TChild*, E> && other);
|
||||
Result(Result<TChild*, E>&& other);
|
||||
template <typename TChild>
|
||||
Result<T*, E>& operator=(Result<TChild*, E>&& other);
|
||||
|
||||
|
@ -151,7 +151,7 @@ class [[nodiscard]] Result<const T*, E> {
|
|||
Result(const T* success);
|
||||
Result(std::unique_ptr<E> error);
|
||||
|
||||
Result(Result<const T*, E> && other);
|
||||
Result(Result<const T*, E>&& other);
|
||||
Result<const T*, E>& operator=(Result<const T*, E>&& other);
|
||||
|
||||
~Result();
|
||||
|
@ -178,13 +178,13 @@ class [[nodiscard]] Result<Ref<T>, E> {
|
|||
"Result<Ref<T>, E> reserves two bits for tagging pointers");
|
||||
|
||||
template <typename U>
|
||||
Result(Ref<U> && success);
|
||||
Result(Ref<U>&& success);
|
||||
template <typename U>
|
||||
Result(const Ref<U>& success);
|
||||
Result(std::unique_ptr<E> error);
|
||||
|
||||
template <typename U>
|
||||
Result(Result<Ref<U>, E> && other);
|
||||
Result(Result<Ref<U>, E>&& other);
|
||||
template <typename U>
|
||||
Result<Ref<U>, E>& operator=(Result<Ref<U>, E>&& other);
|
||||
|
||||
|
@ -209,10 +209,10 @@ class [[nodiscard]] Result<Ref<T>, E> {
|
|||
template <typename T, typename E>
|
||||
class [[nodiscard]] Result {
|
||||
public:
|
||||
Result(T && success);
|
||||
Result(T&& success);
|
||||
Result(std::unique_ptr<E> error);
|
||||
|
||||
Result(Result<T, E> && other);
|
||||
Result(Result<T, E>&& other);
|
||||
Result<T, E>& operator=(Result<T, E>&& other);
|
||||
|
||||
~Result();
|
||||
|
@ -237,16 +237,13 @@ class [[nodiscard]] Result {
|
|||
|
||||
// Implementation of Result<void, E>
|
||||
template <typename E>
|
||||
Result<void, E>::Result() {
|
||||
}
|
||||
Result<void, E>::Result() {}
|
||||
|
||||
template <typename E>
|
||||
Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {
|
||||
}
|
||||
Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {}
|
||||
|
||||
template <typename E>
|
||||
Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {
|
||||
}
|
||||
Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {}
|
||||
|
||||
template <typename E>
|
||||
Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) {
|
||||
|
@ -271,8 +268,7 @@ bool Result<void, E>::IsSuccess() const {
|
|||
}
|
||||
|
||||
template <typename E>
|
||||
void Result<void, E>::AcquireSuccess() {
|
||||
}
|
||||
void Result<void, E>::AcquireSuccess() {}
|
||||
|
||||
template <typename E>
|
||||
std::unique_ptr<E> Result<void, E>::AcquireError() {
|
||||
|
@ -282,29 +278,27 @@ std::unique_ptr<E> Result<void, E>::AcquireError() {
|
|||
// Implementation details of the tagged pointer Results
|
||||
namespace detail {
|
||||
|
||||
template <typename T>
|
||||
T* GetSuccessFromPayload(intptr_t payload) {
|
||||
template <typename T>
|
||||
T* GetSuccessFromPayload(intptr_t payload) {
|
||||
ASSERT(GetPayloadType(payload) == Success);
|
||||
return reinterpret_cast<T*>(payload);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename E>
|
||||
E* GetErrorFromPayload(intptr_t payload) {
|
||||
template <typename E>
|
||||
E* GetErrorFromPayload(intptr_t payload) {
|
||||
ASSERT(GetPayloadType(payload) == Error);
|
||||
return reinterpret_cast<E*>(payload ^ 1);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
// Implementation of Result<T*, E>
|
||||
template <typename T, typename E>
|
||||
Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {
|
||||
}
|
||||
Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
Result<T*, E>::Result(std::unique_ptr<E> error)
|
||||
: mPayload(detail::MakePayload(error.release(), detail::Error)) {
|
||||
}
|
||||
: mPayload(detail::MakePayload(error.release(), detail::Error)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
template <typename TChild>
|
||||
|
@ -355,13 +349,11 @@ std::unique_ptr<E> Result<T*, E>::AcquireError() {
|
|||
// Implementation of Result<const T*, E*>
|
||||
template <typename T, typename E>
|
||||
Result<const T*, E>::Result(const T* success)
|
||||
: mPayload(detail::MakePayload(success, detail::Success)) {
|
||||
}
|
||||
: mPayload(detail::MakePayload(success, detail::Success)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
Result<const T*, E>::Result(std::unique_ptr<E> error)
|
||||
: mPayload(detail::MakePayload(error.release(), detail::Error)) {
|
||||
}
|
||||
: mPayload(detail::MakePayload(error.release(), detail::Error)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) {
|
||||
|
@ -415,13 +407,11 @@ Result<Ref<T>, E>::Result(Ref<U>&& success)
|
|||
|
||||
template <typename T, typename E>
|
||||
template <typename U>
|
||||
Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {
|
||||
}
|
||||
Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
Result<Ref<T>, E>::Result(std::unique_ptr<E> error)
|
||||
: mPayload(detail::MakePayload(error.release(), detail::Error)) {
|
||||
}
|
||||
: mPayload(detail::MakePayload(error.release(), detail::Error)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
template <typename U>
|
||||
|
@ -473,12 +463,10 @@ std::unique_ptr<E> Result<Ref<T>, E>::AcquireError() {
|
|||
|
||||
// Implementation of Result<T, E>
|
||||
template <typename T, typename E>
|
||||
Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {
|
||||
}
|
||||
Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {
|
||||
}
|
||||
Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {}
|
||||
|
||||
template <typename T, typename E>
|
||||
Result<T, E>::~Result() {
|
||||
|
|
|
@ -193,8 +193,7 @@ typename SerialStorage<Derived>::StorageIterator SerialStorage<Derived>::FindUpT
|
|||
template <typename Derived>
|
||||
SerialStorage<Derived>::BeginEnd::BeginEnd(typename SerialStorage<Derived>::StorageIterator start,
|
||||
typename SerialStorage<Derived>::StorageIterator end)
|
||||
: mStartIt(start), mEndIt(end) {
|
||||
}
|
||||
: mStartIt(start), mEndIt(end) {}
|
||||
|
||||
template <typename Derived>
|
||||
typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::begin() const {
|
||||
|
@ -210,8 +209,7 @@ typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::end(
|
|||
|
||||
template <typename Derived>
|
||||
SerialStorage<Derived>::Iterator::Iterator(typename SerialStorage<Derived>::StorageIterator start)
|
||||
: mStorageIterator(start), mSerialIterator(nullptr) {
|
||||
}
|
||||
: mStorageIterator(start), mSerialIterator(nullptr) {}
|
||||
|
||||
template <typename Derived>
|
||||
typename SerialStorage<Derived>::Iterator& SerialStorage<Derived>::Iterator::operator++() {
|
||||
|
@ -257,8 +255,7 @@ template <typename Derived>
|
|||
SerialStorage<Derived>::ConstBeginEnd::ConstBeginEnd(
|
||||
typename SerialStorage<Derived>::ConstStorageIterator start,
|
||||
typename SerialStorage<Derived>::ConstStorageIterator end)
|
||||
: mStartIt(start), mEndIt(end) {
|
||||
}
|
||||
: mStartIt(start), mEndIt(end) {}
|
||||
|
||||
template <typename Derived>
|
||||
typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::begin()
|
||||
|
@ -276,8 +273,7 @@ typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBegi
|
|||
template <typename Derived>
|
||||
SerialStorage<Derived>::ConstIterator::ConstIterator(
|
||||
typename SerialStorage<Derived>::ConstStorageIterator start)
|
||||
: mStorageIterator(start), mSerialIterator(nullptr) {
|
||||
}
|
||||
: mStorageIterator(start), mSerialIterator(nullptr) {}
|
||||
|
||||
template <typename Derived>
|
||||
typename SerialStorage<Derived>::ConstIterator&
|
||||
|
|
|
@ -25,19 +25,16 @@
|
|||
// IndexLinkNode
|
||||
|
||||
SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
|
||||
: index(index), nextIndex(nextIndex) {
|
||||
}
|
||||
: index(index), nextIndex(nextIndex) {}
|
||||
|
||||
// Slab
|
||||
|
||||
SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head)
|
||||
: allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {
|
||||
}
|
||||
: allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {}
|
||||
|
||||
SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default;
|
||||
|
||||
SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {
|
||||
}
|
||||
SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {}
|
||||
|
||||
SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default;
|
||||
|
||||
|
@ -83,8 +80,7 @@ SlabAllocatorImpl::SlabAllocatorImpl(SlabAllocatorImpl&& rhs)
|
|||
mTotalAllocationSize(rhs.mTotalAllocationSize),
|
||||
mAvailableSlabs(std::move(rhs.mAvailableSlabs)),
|
||||
mFullSlabs(std::move(rhs.mFullSlabs)),
|
||||
mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {
|
||||
}
|
||||
mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {}
|
||||
|
||||
SlabAllocatorImpl::~SlabAllocatorImpl() = default;
|
||||
|
||||
|
|
|
@ -168,8 +168,7 @@ class SlabAllocator : public SlabAllocatorImpl {
|
|||
SlabAllocator(size_t totalObjectBytes,
|
||||
uint32_t objectSize = u32_sizeof<T>,
|
||||
uint32_t objectAlignment = u32_alignof<T>)
|
||||
: SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {
|
||||
}
|
||||
: SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {}
|
||||
|
||||
template <typename... Args>
|
||||
T* Allocate(Args&&... args) {
|
||||
|
@ -177,9 +176,7 @@ class SlabAllocator : public SlabAllocatorImpl {
|
|||
return new (ptr) T(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
void Deallocate(T* object) {
|
||||
SlabAllocatorImpl::Deallocate(object);
|
||||
}
|
||||
void Deallocate(T* object) { SlabAllocatorImpl::Deallocate(object); }
|
||||
};
|
||||
|
||||
#endif // SRC_DAWN_COMMON_SLABALLOCATOR_H_
|
||||
|
|
|
@ -41,16 +41,11 @@ class StackAllocator : public std::allocator<T> {
|
|||
// maintaining this for as long as any containers using this allocator are
|
||||
// live.
|
||||
struct Source {
|
||||
Source() : used_stack_buffer_(false) {
|
||||
}
|
||||
Source() : used_stack_buffer_(false) {}
|
||||
|
||||
// Casts the buffer in its right type.
|
||||
T* stack_buffer() {
|
||||
return reinterpret_cast<T*>(stack_buffer_);
|
||||
}
|
||||
const T* stack_buffer() const {
|
||||
return reinterpret_cast<const T*>(&stack_buffer_);
|
||||
}
|
||||
T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); }
|
||||
const T* stack_buffer() const { return reinterpret_cast<const T*>(&stack_buffer_); }
|
||||
|
||||
// The buffer itself. It is not of type T because we don't want the
|
||||
// constructors and destructors to be automatically called. Define a POD
|
||||
|
@ -73,8 +68,7 @@ class StackAllocator : public std::allocator<T> {
|
|||
|
||||
// For the straight up copy c-tor, we can share storage.
|
||||
StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
|
||||
: std::allocator<T>(), source_(rhs.source_) {
|
||||
}
|
||||
: std::allocator<T>(), source_(rhs.source_) {}
|
||||
|
||||
// ISO C++ requires the following constructor to be defined,
|
||||
// and std::vector in VC++2008SP1 Release fails with an error
|
||||
|
@ -84,18 +78,15 @@ class StackAllocator : public std::allocator<T> {
|
|||
// no guarantee that the Source buffer of Ts is large enough
|
||||
// for Us.
|
||||
template <typename U, size_t other_capacity>
|
||||
StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {
|
||||
}
|
||||
StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {}
|
||||
|
||||
// This constructor must exist. It creates a default allocator that doesn't
|
||||
// actually have a stack buffer. glibc's std::string() will compare the
|
||||
// current allocator against the default-constructed allocator, so this
|
||||
// should be fast.
|
||||
StackAllocator() : source_(nullptr) {
|
||||
}
|
||||
StackAllocator() : source_(nullptr) {}
|
||||
|
||||
explicit StackAllocator(Source* source) : source_(source) {
|
||||
}
|
||||
explicit StackAllocator(Source* source) : source_(source) {}
|
||||
|
||||
// Actually do the allocation. Use the stack buffer if nobody has used it yet
|
||||
// and the size requested fits. Otherwise, fall through to the standard
|
||||
|
@ -154,28 +145,18 @@ class StackContainer {
|
|||
// shorter lifetimes than the source. The copy will share the same allocator
|
||||
// and therefore the same stack buffer as the original. Use std::copy to
|
||||
// copy into a "real" container for longer-lived objects.
|
||||
ContainerType& container() {
|
||||
return container_;
|
||||
}
|
||||
const ContainerType& container() const {
|
||||
return container_;
|
||||
}
|
||||
ContainerType& container() { return container_; }
|
||||
const ContainerType& container() const { return container_; }
|
||||
|
||||
// Support operator-> to get to the container. This allows nicer syntax like:
|
||||
// StackContainer<...> foo;
|
||||
// std::sort(foo->begin(), foo->end());
|
||||
ContainerType* operator->() {
|
||||
return &container_;
|
||||
}
|
||||
const ContainerType* operator->() const {
|
||||
return &container_;
|
||||
}
|
||||
ContainerType* operator->() { return &container_; }
|
||||
const ContainerType* operator->() const { return &container_; }
|
||||
|
||||
// Retrieves the stack source so that that unit tests can verify that the
|
||||
// buffer is being used properly.
|
||||
const typename Allocator::Source& stack_data() const {
|
||||
return stack_data_;
|
||||
}
|
||||
const typename Allocator::Source& stack_data() const { return stack_data_; }
|
||||
|
||||
protected:
|
||||
typename Allocator::Source stack_data_;
|
||||
|
@ -225,8 +206,7 @@ class StackVector
|
|||
: public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> {
|
||||
public:
|
||||
StackVector()
|
||||
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
|
||||
}
|
||||
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {}
|
||||
|
||||
// We need to put this in STL containers sometimes, which requires a copy
|
||||
// constructor. We can't call the regular copy constructor because that will
|
||||
|
@ -244,12 +224,8 @@ class StackVector
|
|||
|
||||
// Vectors are commonly indexed, which isn't very convenient even with
|
||||
// operator-> (using "->at()" does exception stuff we don't want).
|
||||
T& operator[](size_t i) {
|
||||
return this->container().operator[](i);
|
||||
}
|
||||
const T& operator[](size_t i) const {
|
||||
return this->container().operator[](i);
|
||||
}
|
||||
T& operator[](size_t i) { return this->container().operator[](i); }
|
||||
const T& operator[](size_t i) const { return this->container().operator[](i); }
|
||||
|
||||
private:
|
||||
// StackVector(const StackVector& rhs) = delete;
|
||||
|
|
|
@ -18,17 +18,17 @@
|
|||
#include "dawn/common/Log.h"
|
||||
|
||||
#if defined(DAWN_PLATFORM_WINDOWS)
|
||||
# include <Windows.h>
|
||||
# include <vector>
|
||||
#include <Windows.h>
|
||||
#include <vector>
|
||||
#elif defined(DAWN_PLATFORM_LINUX)
|
||||
# include <dlfcn.h>
|
||||
# include <limits.h>
|
||||
# include <unistd.h>
|
||||
# include <cstdlib>
|
||||
#include <dlfcn.h>
|
||||
#include <limits.h>
|
||||
#include <unistd.h>
|
||||
#include <cstdlib>
|
||||
#elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
|
||||
# include <dlfcn.h>
|
||||
# include <mach-o/dyld.h>
|
||||
# include <vector>
|
||||
#include <dlfcn.h>
|
||||
#include <mach-o/dyld.h>
|
||||
#include <vector>
|
||||
#endif
|
||||
|
||||
#include <array>
|
||||
|
@ -84,7 +84,7 @@ bool SetEnvironmentVar(const char* variableName, const char* value) {
|
|||
return setenv(variableName, value, 1) == 0;
|
||||
}
|
||||
#else
|
||||
# error "Implement Get/SetEnvironmentVar for your platform."
|
||||
#error "Implement Get/SetEnvironmentVar for your platform."
|
||||
#endif
|
||||
|
||||
#if defined(DAWN_PLATFORM_WINDOWS)
|
||||
|
@ -134,7 +134,7 @@ std::optional<std::string> GetExecutablePath() {
|
|||
return {};
|
||||
}
|
||||
#else
|
||||
# error "Implement GetExecutablePath for your platform."
|
||||
#error "Implement GetExecutablePath for your platform."
|
||||
#endif
|
||||
|
||||
std::optional<std::string> GetExecutableDirectory() {
|
||||
|
@ -168,15 +168,15 @@ std::optional<std::string> GetModulePath() {
|
|||
static int placeholderSymbol = 0;
|
||||
HMODULE module = nullptr;
|
||||
// GetModuleHandleEx is unavailable on UWP
|
||||
# if defined(DAWN_IS_WINUWP)
|
||||
#if defined(DAWN_IS_WINUWP)
|
||||
return {};
|
||||
# else
|
||||
#else
|
||||
if (!GetModuleHandleExA(
|
||||
GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
|
||||
reinterpret_cast<LPCSTR>(&placeholderSymbol), &module)) {
|
||||
return {};
|
||||
}
|
||||
# endif
|
||||
#endif
|
||||
return GetHModulePath(module);
|
||||
}
|
||||
#elif defined(DAWN_PLATFORM_FUCHSIA)
|
||||
|
@ -188,7 +188,7 @@ std::optional<std::string> GetModulePath() {
|
|||
return {};
|
||||
}
|
||||
#else
|
||||
# error "Implement GetModulePath for your platform."
|
||||
#error "Implement GetModulePath for your platform."
|
||||
#endif
|
||||
|
||||
std::optional<std::string> GetModuleDirectory() {
|
||||
|
@ -208,8 +208,7 @@ std::optional<std::string> GetModuleDirectory() {
|
|||
ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value)
|
||||
: mName(variableName),
|
||||
mOriginalValue(GetEnvironmentVar(variableName)),
|
||||
mIsSet(SetEnvironmentVar(variableName, value)) {
|
||||
}
|
||||
mIsSet(SetEnvironmentVar(variableName, value)) {}
|
||||
|
||||
ScopedEnvironmentVar::~ScopedEnvironmentVar() {
|
||||
if (mIsSet) {
|
||||
|
|
|
@ -50,8 +50,8 @@
|
|||
// uint32_t aValue = static_cast<uint32_t>(a);
|
||||
//
|
||||
namespace detail {
|
||||
template <typename Tag, typename T>
|
||||
class TypedIntegerImpl;
|
||||
template <typename Tag, typename T>
|
||||
class TypedIntegerImpl;
|
||||
} // namespace detail
|
||||
|
||||
template <typename Tag, typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
|
||||
|
@ -62,8 +62,8 @@ using TypedInteger = T;
|
|||
#endif
|
||||
|
||||
namespace detail {
|
||||
template <typename Tag, typename T>
|
||||
class alignas(T) TypedIntegerImpl {
|
||||
template <typename Tag, typename T>
|
||||
class alignas(T) TypedIntegerImpl {
|
||||
static_assert(std::is_integral<T>::value, "TypedInteger must be integral");
|
||||
T mValue;
|
||||
|
||||
|
@ -75,25 +75,20 @@ namespace detail {
|
|||
|
||||
// Construction from non-narrowing integral types.
|
||||
template <typename I,
|
||||
typename = std::enable_if_t<
|
||||
std::is_integral<I>::value &&
|
||||
typename =
|
||||
std::enable_if_t<std::is_integral<I>::value &&
|
||||
std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
|
||||
std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
|
||||
explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {
|
||||
}
|
||||
explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {}
|
||||
|
||||
// Allow explicit casts only to the underlying type. If you're casting out of an
|
||||
// TypedInteger, you should know what what you're doing, and exactly what type you
|
||||
// expect.
|
||||
explicit constexpr operator T() const {
|
||||
return static_cast<T>(this->mValue);
|
||||
}
|
||||
explicit constexpr operator T() const { return static_cast<T>(this->mValue); }
|
||||
|
||||
// Same-tag TypedInteger comparison operators
|
||||
#define TYPED_COMPARISON(op) \
|
||||
constexpr bool operator op(const TypedIntegerImpl& rhs) const { \
|
||||
return mValue op rhs.mValue; \
|
||||
}
|
||||
constexpr bool operator op(const TypedIntegerImpl& rhs) const { return mValue op rhs.mValue; }
|
||||
TYPED_COMPARISON(<)
|
||||
TYPED_COMPARISON(<=)
|
||||
TYPED_COMPARISON(>)
|
||||
|
@ -132,8 +127,9 @@ namespace detail {
|
|||
}
|
||||
|
||||
template <typename T2 = T>
|
||||
static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))>
|
||||
AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))> AddImpl(
|
||||
TypedIntegerImpl<Tag, T> lhs,
|
||||
TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static_assert(std::is_same<T, T2>::value);
|
||||
|
||||
// Overflow would wrap around
|
||||
|
@ -142,8 +138,9 @@ namespace detail {
|
|||
}
|
||||
|
||||
template <typename T2 = T>
|
||||
static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))>
|
||||
AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))> AddImpl(
|
||||
TypedIntegerImpl<Tag, T> lhs,
|
||||
TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static_assert(std::is_same<T, T2>::value);
|
||||
|
||||
if (lhs.mValue > 0) {
|
||||
|
@ -160,8 +157,9 @@ namespace detail {
|
|||
}
|
||||
|
||||
template <typename T2 = T>
|
||||
static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))>
|
||||
SubImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))> SubImpl(
|
||||
TypedIntegerImpl<Tag, T> lhs,
|
||||
TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static_assert(std::is_same<T, T2>::value);
|
||||
|
||||
// Overflow would wrap around
|
||||
|
@ -207,14 +205,14 @@ namespace detail {
|
|||
static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Sub instead.");
|
||||
return TypedIntegerImpl(result);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
namespace std {
|
||||
|
||||
template <typename Tag, typename T>
|
||||
class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
|
||||
template <typename Tag, typename T>
|
||||
class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
|
||||
public:
|
||||
static detail::TypedIntegerImpl<Tag, T> max() noexcept {
|
||||
return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max());
|
||||
|
@ -222,40 +220,40 @@ namespace std {
|
|||
static detail::TypedIntegerImpl<Tag, T> min() noexcept {
|
||||
return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min());
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace std
|
||||
|
||||
namespace ityp {
|
||||
|
||||
// These helpers below are provided since the default arithmetic operators for small integer
|
||||
// types like uint8_t and uint16_t return integers, not their same type. To avoid lots of
|
||||
// casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and
|
||||
// ityp::Sub(a, b) instead.
|
||||
// These helpers below are provided since the default arithmetic operators for small integer
|
||||
// types like uint8_t and uint16_t return integers, not their same type. To avoid lots of
|
||||
// casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and
|
||||
// ityp::Sub(a, b) instead.
|
||||
|
||||
template <typename Tag, typename T>
|
||||
constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs,
|
||||
template <typename Tag, typename T>
|
||||
constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs,
|
||||
::detail::TypedIntegerImpl<Tag, T> rhs) {
|
||||
return ::detail::TypedIntegerImpl<Tag, T>(
|
||||
static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::AddImpl(lhs, rhs)));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Tag, typename T>
|
||||
constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs,
|
||||
template <typename Tag, typename T>
|
||||
constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs,
|
||||
::detail::TypedIntegerImpl<Tag, T> rhs) {
|
||||
return ::detail::TypedIntegerImpl<Tag, T>(
|
||||
static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::SubImpl(lhs, rhs)));
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) {
|
||||
template <typename T>
|
||||
constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) {
|
||||
return static_cast<T>(lhs + rhs);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) {
|
||||
template <typename T>
|
||||
constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) {
|
||||
return static_cast<T>(lhs - rhs);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace ityp
|
||||
|
||||
|
|
|
@ -22,27 +22,27 @@
|
|||
// template parameter. It includes a specialization for detail::TypedIntegerImpl which yields
|
||||
// the wrapped integer type.
|
||||
namespace detail {
|
||||
template <typename T, typename Enable = void>
|
||||
struct UnderlyingTypeImpl;
|
||||
template <typename T, typename Enable = void>
|
||||
struct UnderlyingTypeImpl;
|
||||
|
||||
template <typename I>
|
||||
struct UnderlyingTypeImpl<I, typename std::enable_if_t<std::is_integral<I>::value>> {
|
||||
template <typename I>
|
||||
struct UnderlyingTypeImpl<I, typename std::enable_if_t<std::is_integral<I>::value>> {
|
||||
using type = I;
|
||||
};
|
||||
};
|
||||
|
||||
template <typename E>
|
||||
struct UnderlyingTypeImpl<E, typename std::enable_if_t<std::is_enum<E>::value>> {
|
||||
template <typename E>
|
||||
struct UnderlyingTypeImpl<E, typename std::enable_if_t<std::is_enum<E>::value>> {
|
||||
using type = std::underlying_type_t<E>;
|
||||
};
|
||||
};
|
||||
|
||||
// Forward declare the TypedInteger impl.
|
||||
template <typename Tag, typename T>
|
||||
class TypedIntegerImpl;
|
||||
// Forward declare the TypedInteger impl.
|
||||
template <typename Tag, typename T>
|
||||
class TypedIntegerImpl;
|
||||
|
||||
template <typename Tag, typename I>
|
||||
struct UnderlyingTypeImpl<TypedIntegerImpl<Tag, I>> {
|
||||
template <typename Tag, typename I>
|
||||
struct UnderlyingTypeImpl<TypedIntegerImpl<Tag, I>> {
|
||||
using type = typename UnderlyingTypeImpl<I>::type;
|
||||
};
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -26,12 +26,12 @@
|
|||
|
||||
namespace ityp {
|
||||
|
||||
// ityp::array is a helper class that wraps std::array with the restriction that
|
||||
// indices must be a particular type |Index|. Dawn uses multiple flat maps of
|
||||
// index-->data, and this class helps ensure an indices cannot be passed interchangably
|
||||
// to a flat map of a different type.
|
||||
template <typename Index, typename Value, size_t Size>
|
||||
class array : private std::array<Value, Size> {
|
||||
// ityp::array is a helper class that wraps std::array with the restriction that
|
||||
// indices must be a particular type |Index|. Dawn uses multiple flat maps of
|
||||
// index-->data, and this class helps ensure an indices cannot be passed interchangably
|
||||
// to a flat map of a different type.
|
||||
template <typename Index, typename Value, size_t Size>
|
||||
class array : private std::array<Value, Size> {
|
||||
using I = UnderlyingType<Index>;
|
||||
using Base = std::array<Value, Size>;
|
||||
|
||||
|
@ -42,8 +42,7 @@ namespace ityp {
|
|||
|
||||
template <typename... Values>
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {
|
||||
}
|
||||
constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {}
|
||||
|
||||
Value& operator[](Index i) {
|
||||
I index = static_cast<I>(i);
|
||||
|
@ -69,32 +68,22 @@ namespace ityp {
|
|||
return Base::at(index);
|
||||
}
|
||||
|
||||
typename Base::iterator begin() noexcept {
|
||||
return Base::begin();
|
||||
}
|
||||
typename Base::iterator begin() noexcept { return Base::begin(); }
|
||||
|
||||
typename Base::const_iterator begin() const noexcept {
|
||||
return Base::begin();
|
||||
}
|
||||
typename Base::const_iterator begin() const noexcept { return Base::begin(); }
|
||||
|
||||
typename Base::iterator end() noexcept {
|
||||
return Base::end();
|
||||
}
|
||||
typename Base::iterator end() noexcept { return Base::end(); }
|
||||
|
||||
typename Base::const_iterator end() const noexcept {
|
||||
return Base::end();
|
||||
}
|
||||
typename Base::const_iterator end() const noexcept { return Base::end(); }
|
||||
|
||||
constexpr Index size() const {
|
||||
return Index(I(Size));
|
||||
}
|
||||
constexpr Index size() const { return Index(I(Size)); }
|
||||
|
||||
using Base::back;
|
||||
using Base::data;
|
||||
using Base::empty;
|
||||
using Base::fill;
|
||||
using Base::front;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace ityp
|
||||
|
||||
|
|
|
@ -21,39 +21,30 @@
|
|||
|
||||
namespace ityp {
|
||||
|
||||
// ityp::bitset is a helper class that wraps std::bitset with the restriction that
|
||||
// indices must be a particular type |Index|.
|
||||
template <typename Index, size_t N>
|
||||
class bitset : private std::bitset<N> {
|
||||
// ityp::bitset is a helper class that wraps std::bitset with the restriction that
|
||||
// indices must be a particular type |Index|.
|
||||
template <typename Index, size_t N>
|
||||
class bitset : private std::bitset<N> {
|
||||
using I = UnderlyingType<Index>;
|
||||
using Base = std::bitset<N>;
|
||||
|
||||
static_assert(sizeof(I) <= sizeof(size_t));
|
||||
|
||||
explicit constexpr bitset(const Base& rhs) : Base(rhs) {
|
||||
}
|
||||
explicit constexpr bitset(const Base& rhs) : Base(rhs) {}
|
||||
|
||||
public:
|
||||
using reference = typename Base::reference;
|
||||
|
||||
constexpr bitset() noexcept : Base() {
|
||||
}
|
||||
constexpr bitset() noexcept : Base() {}
|
||||
|
||||
// NOLINTNEXTLINE(runtime/explicit)
|
||||
constexpr bitset(uint64_t value) noexcept : Base(value) {
|
||||
}
|
||||
constexpr bitset(uint64_t value) noexcept : Base(value) {}
|
||||
|
||||
constexpr bool operator[](Index i) const {
|
||||
return Base::operator[](static_cast<I>(i));
|
||||
}
|
||||
constexpr bool operator[](Index i) const { return Base::operator[](static_cast<I>(i)); }
|
||||
|
||||
typename Base::reference operator[](Index i) {
|
||||
return Base::operator[](static_cast<I>(i));
|
||||
}
|
||||
typename Base::reference operator[](Index i) { return Base::operator[](static_cast<I>(i)); }
|
||||
|
||||
bool test(Index i) const {
|
||||
return Base::test(static_cast<I>(i));
|
||||
}
|
||||
bool test(Index i) const { return Base::test(static_cast<I>(i)); }
|
||||
|
||||
using Base::all;
|
||||
using Base::any;
|
||||
|
@ -81,33 +72,21 @@ namespace ityp {
|
|||
return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
|
||||
}
|
||||
|
||||
bitset operator~() const noexcept {
|
||||
return bitset(*this).flip();
|
||||
}
|
||||
bitset operator~() const noexcept { return bitset(*this).flip(); }
|
||||
|
||||
bitset& set() noexcept {
|
||||
return static_cast<bitset&>(Base::set());
|
||||
}
|
||||
bitset& set() noexcept { return static_cast<bitset&>(Base::set()); }
|
||||
|
||||
bitset& set(Index i, bool value = true) {
|
||||
return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
|
||||
}
|
||||
|
||||
bitset& reset() noexcept {
|
||||
return static_cast<bitset&>(Base::reset());
|
||||
}
|
||||
bitset& reset() noexcept { return static_cast<bitset&>(Base::reset()); }
|
||||
|
||||
bitset& reset(Index i) {
|
||||
return static_cast<bitset&>(Base::reset(static_cast<I>(i)));
|
||||
}
|
||||
bitset& reset(Index i) { return static_cast<bitset&>(Base::reset(static_cast<I>(i))); }
|
||||
|
||||
bitset& flip() noexcept {
|
||||
return static_cast<bitset&>(Base::flip());
|
||||
}
|
||||
bitset& flip() noexcept { return static_cast<bitset&>(Base::flip()); }
|
||||
|
||||
bitset& flip(Index i) {
|
||||
return static_cast<bitset&>(Base::flip(static_cast<I>(i)));
|
||||
}
|
||||
bitset& flip(Index i) { return static_cast<bitset&>(Base::flip(static_cast<I>(i))); }
|
||||
|
||||
using Base::to_string;
|
||||
using Base::to_ullong;
|
||||
|
@ -130,7 +109,7 @@ namespace ityp {
|
|||
}
|
||||
|
||||
friend struct std::hash<bitset>;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace ityp
|
||||
|
||||
|
@ -147,7 +126,7 @@ Index GetHighestBitIndexPlusOne(const ityp::bitset<Index, N>& bitset) {
|
|||
using I = UnderlyingType<Index>;
|
||||
#if defined(DAWN_COMPILER_MSVC)
|
||||
if constexpr (N > 32) {
|
||||
# if defined(DAWN_PLATFORM_64_BIT)
|
||||
#if defined(DAWN_PLATFORM_64_BIT)
|
||||
// NOLINTNEXTLINE(runtime/int)
|
||||
unsigned long firstBitIndex = 0ul;
|
||||
unsigned char ret = _BitScanReverse64(&firstBitIndex, bitset.to_ullong());
|
||||
|
@ -155,7 +134,7 @@ Index GetHighestBitIndexPlusOne(const ityp::bitset<Index, N>& bitset) {
|
|||
return Index(static_cast<I>(0));
|
||||
}
|
||||
return Index(static_cast<I>(firstBitIndex + 1));
|
||||
# else // defined(DAWN_PLATFORM_64_BIT)
|
||||
#else // defined(DAWN_PLATFORM_64_BIT)
|
||||
if (bitset.none()) {
|
||||
return Index(static_cast<I>(0));
|
||||
}
|
||||
|
@ -165,7 +144,7 @@ Index GetHighestBitIndexPlusOne(const ityp::bitset<Index, N>& bitset) {
|
|||
}
|
||||
}
|
||||
UNREACHABLE();
|
||||
# endif // defined(DAWN_PLATFORM_64_BIT)
|
||||
#endif // defined(DAWN_PLATFORM_64_BIT)
|
||||
} else {
|
||||
// NOLINTNEXTLINE(runtime/int)
|
||||
unsigned long firstBitIndex = 0ul;
|
||||
|
|
|
@ -22,48 +22,34 @@
|
|||
|
||||
namespace ityp {
|
||||
|
||||
// ityp::span is a helper class that wraps an unowned packed array of type |Value|.
|
||||
// It stores the size and pointer to first element. It has the restriction that
|
||||
// indices must be a particular type |Index|. This provides a type-safe way to index
|
||||
// raw pointers.
|
||||
template <typename Index, typename Value>
|
||||
class span {
|
||||
// ityp::span is a helper class that wraps an unowned packed array of type |Value|.
|
||||
// It stores the size and pointer to first element. It has the restriction that
|
||||
// indices must be a particular type |Index|. This provides a type-safe way to index
|
||||
// raw pointers.
|
||||
template <typename Index, typename Value>
|
||||
class span {
|
||||
using I = UnderlyingType<Index>;
|
||||
|
||||
public:
|
||||
constexpr span() : mData(nullptr), mSize(0) {
|
||||
}
|
||||
constexpr span(Value* data, Index size) : mData(data), mSize(size) {
|
||||
}
|
||||
constexpr span() : mData(nullptr), mSize(0) {}
|
||||
constexpr span(Value* data, Index size) : mData(data), mSize(size) {}
|
||||
|
||||
constexpr Value& operator[](Index i) const {
|
||||
ASSERT(i < mSize);
|
||||
return mData[static_cast<I>(i)];
|
||||
}
|
||||
|
||||
Value* data() noexcept {
|
||||
return mData;
|
||||
}
|
||||
Value* data() noexcept { return mData; }
|
||||
|
||||
const Value* data() const noexcept {
|
||||
return mData;
|
||||
}
|
||||
const Value* data() const noexcept { return mData; }
|
||||
|
||||
Value* begin() noexcept {
|
||||
return mData;
|
||||
}
|
||||
Value* begin() noexcept { return mData; }
|
||||
|
||||
const Value* begin() const noexcept {
|
||||
return mData;
|
||||
}
|
||||
const Value* begin() const noexcept { return mData; }
|
||||
|
||||
Value* end() noexcept {
|
||||
return mData + static_cast<I>(mSize);
|
||||
}
|
||||
Value* end() noexcept { return mData + static_cast<I>(mSize); }
|
||||
|
||||
const Value* end() const noexcept {
|
||||
return mData + static_cast<I>(mSize);
|
||||
}
|
||||
const Value* end() const noexcept { return mData + static_cast<I>(mSize); }
|
||||
|
||||
Value& front() {
|
||||
ASSERT(mData != nullptr);
|
||||
|
@ -89,14 +75,12 @@ namespace ityp {
|
|||
return *(mData + static_cast<I>(mSize) - 1);
|
||||
}
|
||||
|
||||
Index size() const {
|
||||
return mSize;
|
||||
}
|
||||
Index size() const { return mSize; }
|
||||
|
||||
private:
|
||||
Value* mData;
|
||||
Index mSize;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace ityp
|
||||
|
||||
|
|
|
@ -24,19 +24,16 @@
|
|||
|
||||
namespace ityp {
|
||||
|
||||
template <typename Index, typename Value, size_t StaticCapacity>
|
||||
class stack_vec : private StackVector<Value, StaticCapacity> {
|
||||
template <typename Index, typename Value, size_t StaticCapacity>
|
||||
class stack_vec : private StackVector<Value, StaticCapacity> {
|
||||
using I = UnderlyingType<Index>;
|
||||
using Base = StackVector<Value, StaticCapacity>;
|
||||
using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
|
||||
static_assert(StaticCapacity <= std::numeric_limits<I>::max());
|
||||
|
||||
public:
|
||||
stack_vec() : Base() {
|
||||
}
|
||||
explicit stack_vec(Index size) : Base() {
|
||||
this->container().resize(static_cast<I>(size));
|
||||
}
|
||||
stack_vec() : Base() {}
|
||||
explicit stack_vec(Index size) : Base() { this->container().resize(static_cast<I>(size)); }
|
||||
|
||||
Value& operator[](Index i) {
|
||||
ASSERT(i < size());
|
||||
|
@ -48,58 +45,32 @@ namespace ityp {
|
|||
return Base::operator[](static_cast<I>(i));
|
||||
}
|
||||
|
||||
void resize(Index size) {
|
||||
this->container().resize(static_cast<I>(size));
|
||||
}
|
||||
void resize(Index size) { this->container().resize(static_cast<I>(size)); }
|
||||
|
||||
void reserve(Index size) {
|
||||
this->container().reserve(static_cast<I>(size));
|
||||
}
|
||||
void reserve(Index size) { this->container().reserve(static_cast<I>(size)); }
|
||||
|
||||
Value* data() {
|
||||
return this->container().data();
|
||||
}
|
||||
Value* data() { return this->container().data(); }
|
||||
|
||||
const Value* data() const {
|
||||
return this->container().data();
|
||||
}
|
||||
const Value* data() const { return this->container().data(); }
|
||||
|
||||
typename VectorBase::iterator begin() noexcept {
|
||||
return this->container().begin();
|
||||
}
|
||||
typename VectorBase::iterator begin() noexcept { return this->container().begin(); }
|
||||
|
||||
typename VectorBase::const_iterator begin() const noexcept {
|
||||
return this->container().begin();
|
||||
}
|
||||
typename VectorBase::const_iterator begin() const noexcept { return this->container().begin(); }
|
||||
|
||||
typename VectorBase::iterator end() noexcept {
|
||||
return this->container().end();
|
||||
}
|
||||
typename VectorBase::iterator end() noexcept { return this->container().end(); }
|
||||
|
||||
typename VectorBase::const_iterator end() const noexcept {
|
||||
return this->container().end();
|
||||
}
|
||||
typename VectorBase::const_iterator end() const noexcept { return this->container().end(); }
|
||||
|
||||
typename VectorBase::reference front() {
|
||||
return this->container().front();
|
||||
}
|
||||
typename VectorBase::reference front() { return this->container().front(); }
|
||||
|
||||
typename VectorBase::const_reference front() const {
|
||||
return this->container().front();
|
||||
}
|
||||
typename VectorBase::const_reference front() const { return this->container().front(); }
|
||||
|
||||
typename VectorBase::reference back() {
|
||||
return this->container().back();
|
||||
}
|
||||
typename VectorBase::reference back() { return this->container().back(); }
|
||||
|
||||
typename VectorBase::const_reference back() const {
|
||||
return this->container().back();
|
||||
}
|
||||
typename VectorBase::const_reference back() const { return this->container().back(); }
|
||||
|
||||
Index size() const {
|
||||
return Index(static_cast<I>(this->container().size()));
|
||||
}
|
||||
};
|
||||
Index size() const { return Index(static_cast<I>(this->container().size())); }
|
||||
};
|
||||
|
||||
} // namespace ityp
|
||||
|
||||
|
|
|
@ -24,10 +24,10 @@
|
|||
|
||||
namespace ityp {
|
||||
|
||||
// ityp::vector is a helper class that wraps std::vector with the restriction that
|
||||
// indices must be a particular type |Index|.
|
||||
template <typename Index, typename Value>
|
||||
class vector : public std::vector<Value> {
|
||||
// ityp::vector is a helper class that wraps std::vector with the restriction that
|
||||
// indices must be a particular type |Index|.
|
||||
template <typename Index, typename Value>
|
||||
class vector : public std::vector<Value> {
|
||||
using I = UnderlyingType<Index>;
|
||||
using Base = std::vector<Value>;
|
||||
|
||||
|
@ -42,23 +42,17 @@ namespace ityp {
|
|||
using Base::size;
|
||||
|
||||
public:
|
||||
vector() : Base() {
|
||||
}
|
||||
vector() : Base() {}
|
||||
|
||||
explicit vector(Index size) : Base(static_cast<I>(size)) {
|
||||
}
|
||||
explicit vector(Index size) : Base(static_cast<I>(size)) {}
|
||||
|
||||
vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {
|
||||
}
|
||||
vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {}
|
||||
|
||||
vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {
|
||||
}
|
||||
vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {}
|
||||
|
||||
vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {
|
||||
}
|
||||
vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {}
|
||||
|
||||
vector(std::initializer_list<Value> init) : Base(init) {
|
||||
}
|
||||
vector(std::initializer_list<Value> init) : Base(init) {}
|
||||
|
||||
vector& operator=(const vector& rhs) {
|
||||
Base::operator=(static_cast<const Base&>(rhs));
|
||||
|
@ -95,14 +89,10 @@ namespace ityp {
|
|||
return Index(static_cast<I>(Base::size()));
|
||||
}
|
||||
|
||||
void resize(Index size) {
|
||||
Base::resize(static_cast<I>(size));
|
||||
}
|
||||
void resize(Index size) { Base::resize(static_cast<I>(size)); }
|
||||
|
||||
void reserve(Index size) {
|
||||
Base::reserve(static_cast<I>(size));
|
||||
}
|
||||
};
|
||||
void reserve(Index size) { Base::reserve(static_cast<I>(size)); }
|
||||
};
|
||||
|
||||
} // namespace ityp
|
||||
|
||||
|
|
|
@ -16,10 +16,10 @@
|
|||
#define SRC_DAWN_COMMON_VULKAN_PLATFORM_H_
|
||||
|
||||
#if !defined(DAWN_ENABLE_BACKEND_VULKAN)
|
||||
# error "vulkan_platform.h included without the Vulkan backend enabled"
|
||||
#error "vulkan_platform.h included without the Vulkan backend enabled"
|
||||
#endif
|
||||
#if defined(VULKAN_CORE_H_)
|
||||
# error "vulkan.h included before vulkan_platform.h"
|
||||
#error "vulkan.h included before vulkan_platform.h"
|
||||
#endif
|
||||
|
||||
#include <cstddef>
|
||||
|
@ -36,7 +36,7 @@
|
|||
// (like vulkan.h on 64 bit) but makes sure the types are different on 32 bit architectures.
|
||||
|
||||
#if defined(DAWN_PLATFORM_64_BIT)
|
||||
# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
|
||||
#define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
|
||||
// This function is needed because MSVC doesn't accept reinterpret_cast from uint64_t from uint64_t
|
||||
// TODO(cwallez@chromium.org): Remove this once we rework vulkan_platform.h
|
||||
template <typename T>
|
||||
|
@ -44,13 +44,13 @@ T NativeNonDispatachableHandleFromU64(uint64_t u64) {
|
|||
return reinterpret_cast<T>(u64);
|
||||
}
|
||||
#elif defined(DAWN_PLATFORM_32_BIT)
|
||||
# define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
|
||||
#define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
|
||||
template <typename T>
|
||||
T NativeNonDispatachableHandleFromU64(uint64_t u64) {
|
||||
return u64;
|
||||
}
|
||||
#else
|
||||
# error "Unsupported platform"
|
||||
#error "Unsupported platform"
|
||||
#endif
|
||||
|
||||
// Define a placeholder Vulkan handle for use before we include vulkan.h
|
||||
|
@ -67,78 +67,62 @@ DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(VkSomeHandle)
|
|||
|
||||
namespace dawn::native::vulkan {
|
||||
|
||||
namespace detail {
|
||||
template <typename T>
|
||||
struct WrapperStruct {
|
||||
namespace detail {
|
||||
template <typename T>
|
||||
struct WrapperStruct {
|
||||
T member;
|
||||
};
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
|
||||
template <typename T>
|
||||
static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
|
||||
|
||||
static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
|
||||
static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
|
||||
static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
|
||||
static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
|
||||
|
||||
// Simple handle types that supports "nullptr_t" as a 0 value.
|
||||
template <typename Tag, typename HandleType>
|
||||
class alignas(detail::kNativeVkHandleAlignment) VkHandle {
|
||||
// Simple handle types that supports "nullptr_t" as a 0 value.
|
||||
template <typename Tag, typename HandleType>
|
||||
class alignas(detail::kNativeVkHandleAlignment) VkHandle {
|
||||
public:
|
||||
// Default constructor and assigning of VK_NULL_HANDLE
|
||||
VkHandle() = default;
|
||||
VkHandle(std::nullptr_t) {
|
||||
}
|
||||
VkHandle(std::nullptr_t) {}
|
||||
|
||||
// Use default copy constructor/assignment
|
||||
VkHandle(const VkHandle<Tag, HandleType>& other) = default;
|
||||
VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
|
||||
|
||||
// Comparisons between handles
|
||||
bool operator==(VkHandle<Tag, HandleType> other) const {
|
||||
return mHandle == other.mHandle;
|
||||
}
|
||||
bool operator!=(VkHandle<Tag, HandleType> other) const {
|
||||
return mHandle != other.mHandle;
|
||||
}
|
||||
bool operator==(VkHandle<Tag, HandleType> other) const { return mHandle == other.mHandle; }
|
||||
bool operator!=(VkHandle<Tag, HandleType> other) const { return mHandle != other.mHandle; }
|
||||
|
||||
// Comparisons between handles and VK_NULL_HANDLE
|
||||
bool operator==(std::nullptr_t) const {
|
||||
return mHandle == 0;
|
||||
}
|
||||
bool operator!=(std::nullptr_t) const {
|
||||
return mHandle != 0;
|
||||
}
|
||||
bool operator==(std::nullptr_t) const { return mHandle == 0; }
|
||||
bool operator!=(std::nullptr_t) const { return mHandle != 0; }
|
||||
|
||||
// Implicit conversion to real Vulkan types.
|
||||
operator HandleType() const {
|
||||
return GetHandle();
|
||||
}
|
||||
operator HandleType() const { return GetHandle(); }
|
||||
|
||||
HandleType GetHandle() const {
|
||||
return mHandle;
|
||||
}
|
||||
HandleType GetHandle() const { return mHandle; }
|
||||
|
||||
HandleType& operator*() {
|
||||
return mHandle;
|
||||
}
|
||||
HandleType& operator*() { return mHandle; }
|
||||
|
||||
static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
|
||||
return VkHandle{handle};
|
||||
}
|
||||
|
||||
private:
|
||||
explicit VkHandle(HandleType handle) : mHandle(handle) {
|
||||
}
|
||||
explicit VkHandle(HandleType handle) : mHandle(handle) {}
|
||||
|
||||
HandleType mHandle = 0;
|
||||
};
|
||||
} // namespace detail
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
|
||||
static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
|
||||
|
||||
template <typename Tag, typename HandleType>
|
||||
HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
|
||||
template <typename Tag, typename HandleType>
|
||||
HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
|
||||
return reinterpret_cast<HandleType*>(handle);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native::vulkan
|
||||
|
||||
|
@ -157,36 +141,36 @@ namespace dawn::native::vulkan {
|
|||
// defines are defined already in the Vulkan-Header BUILD.gn, but are needed when building with
|
||||
// CMake, hence they cannot be removed at the moment.
|
||||
#if defined(DAWN_PLATFORM_WINDOWS)
|
||||
# ifndef VK_USE_PLATFORM_WIN32_KHR
|
||||
# define VK_USE_PLATFORM_WIN32_KHR
|
||||
# endif
|
||||
# include "dawn/common/windows_with_undefs.h"
|
||||
#ifndef VK_USE_PLATFORM_WIN32_KHR
|
||||
#define VK_USE_PLATFORM_WIN32_KHR
|
||||
#endif
|
||||
#include "dawn/common/windows_with_undefs.h"
|
||||
#endif // DAWN_PLATFORM_WINDOWS
|
||||
|
||||
#if defined(DAWN_USE_X11)
|
||||
# define VK_USE_PLATFORM_XLIB_KHR
|
||||
# ifndef VK_USE_PLATFORM_XCB_KHR
|
||||
# define VK_USE_PLATFORM_XCB_KHR
|
||||
# endif
|
||||
# include "dawn/common/xlib_with_undefs.h"
|
||||
#define VK_USE_PLATFORM_XLIB_KHR
|
||||
#ifndef VK_USE_PLATFORM_XCB_KHR
|
||||
#define VK_USE_PLATFORM_XCB_KHR
|
||||
#endif
|
||||
#include "dawn/common/xlib_with_undefs.h"
|
||||
#endif // defined(DAWN_USE_X11)
|
||||
|
||||
#if defined(DAWN_ENABLE_BACKEND_METAL)
|
||||
# ifndef VK_USE_PLATFORM_METAL_EXT
|
||||
# define VK_USE_PLATFORM_METAL_EXT
|
||||
# endif
|
||||
#ifndef VK_USE_PLATFORM_METAL_EXT
|
||||
#define VK_USE_PLATFORM_METAL_EXT
|
||||
#endif
|
||||
#endif // defined(DAWN_ENABLE_BACKEND_METAL)
|
||||
|
||||
#if defined(DAWN_PLATFORM_ANDROID)
|
||||
# ifndef VK_USE_PLATFORM_ANDROID_KHR
|
||||
# define VK_USE_PLATFORM_ANDROID_KHR
|
||||
# endif
|
||||
#ifndef VK_USE_PLATFORM_ANDROID_KHR
|
||||
#define VK_USE_PLATFORM_ANDROID_KHR
|
||||
#endif
|
||||
#endif // defined(DAWN_PLATFORM_ANDROID)
|
||||
|
||||
#if defined(DAWN_PLATFORM_FUCHSIA)
|
||||
# ifndef VK_USE_PLATFORM_FUCHSIA
|
||||
# define VK_USE_PLATFORM_FUCHSIA
|
||||
# endif
|
||||
#ifndef VK_USE_PLATFORM_FUCHSIA
|
||||
#define VK_USE_PLATFORM_FUCHSIA
|
||||
#endif
|
||||
#endif // defined(DAWN_PLATFORM_FUCHSIA)
|
||||
|
||||
// The actual inclusion of vulkan.h!
|
||||
|
@ -200,7 +184,7 @@ static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
|
|||
#elif defined(DAWN_PLATFORM_32_BIT)
|
||||
static constexpr uint64_t VK_NULL_HANDLE = 0;
|
||||
#else
|
||||
# error "Unsupported platform"
|
||||
#error "Unsupported platform"
|
||||
#endif
|
||||
|
||||
#endif // SRC_DAWN_COMMON_VULKAN_PLATFORM_H_
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include "dawn/common/Platform.h"
|
||||
|
||||
#if !defined(DAWN_PLATFORM_WINDOWS)
|
||||
# error "windows_with_undefs.h included on non-Windows"
|
||||
#error "windows_with_undefs.h included on non-Windows"
|
||||
#endif
|
||||
|
||||
// This header includes <windows.h> but removes all the extra defines that conflict with identifiers
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include "dawn/common/Platform.h"
|
||||
|
||||
#if !defined(DAWN_PLATFORM_LINUX)
|
||||
# error "xlib_with_undefs.h included on non-Linux"
|
||||
#error "xlib_with_undefs.h included on non-Linux"
|
||||
#endif
|
||||
|
||||
// This header includes <X11/Xlib.h> but removes all the extra defines that conflict with
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
|
||||
namespace {
|
||||
|
||||
class DevNull : public dawn::wire::CommandSerializer {
|
||||
class DevNull : public dawn::wire::CommandSerializer {
|
||||
public:
|
||||
size_t GetMaximumAllocationSize() const override {
|
||||
// Some fuzzer bots have a 2GB allocation limit. Pick a value reasonably below that.
|
||||
|
@ -41,27 +41,25 @@ namespace {
|
|||
}
|
||||
return buf.data();
|
||||
}
|
||||
bool Flush() override {
|
||||
return true;
|
||||
}
|
||||
bool Flush() override { return true; }
|
||||
|
||||
private:
|
||||
std::vector<char> buf;
|
||||
};
|
||||
};
|
||||
|
||||
std::unique_ptr<dawn::native::Instance> sInstance;
|
||||
WGPUProcDeviceCreateSwapChain sOriginalDeviceCreateSwapChain = nullptr;
|
||||
std::unique_ptr<dawn::native::Instance> sInstance;
|
||||
WGPUProcDeviceCreateSwapChain sOriginalDeviceCreateSwapChain = nullptr;
|
||||
|
||||
bool sCommandsComplete = false;
|
||||
bool sCommandsComplete = false;
|
||||
|
||||
WGPUSwapChain ErrorDeviceCreateSwapChain(WGPUDevice device,
|
||||
WGPUSwapChain ErrorDeviceCreateSwapChain(WGPUDevice device,
|
||||
WGPUSurface surface,
|
||||
const WGPUSwapChainDescriptor*) {
|
||||
WGPUSwapChainDescriptor desc = {};
|
||||
// A 0 implementation will trigger a swapchain creation error.
|
||||
desc.implementation = 0;
|
||||
return sOriginalDeviceCreateSwapChain(device, surface, &desc);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
|
|
|
@ -22,17 +22,17 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
class Instance;
|
||||
class Instance;
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
namespace DawnWireServerFuzzer {
|
||||
|
||||
using MakeDeviceFn = std::function<wgpu::Device(dawn::native::Instance*)>;
|
||||
using MakeDeviceFn = std::function<wgpu::Device(dawn::native::Instance*)>;
|
||||
|
||||
int Initialize(int* argc, char*** argv);
|
||||
int Initialize(int* argc, char*** argv);
|
||||
|
||||
int Run(const uint8_t* data, size_t size, MakeDeviceFn MakeDevice, bool supportsErrorInjection);
|
||||
int Run(const uint8_t* data, size_t size, MakeDeviceFn MakeDevice, bool supportsErrorInjection);
|
||||
|
||||
} // namespace DawnWireServerFuzzer
|
||||
|
||||
|
|
|
@ -24,13 +24,13 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
|
||||
AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
|
||||
: mInstance(instance), mBackend(backend) {
|
||||
mSupportedFeatures.EnableFeature(Feature::DawnNative);
|
||||
mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages);
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError AdapterBase::Initialize() {
|
||||
MaybeError AdapterBase::Initialize() {
|
||||
DAWN_TRY_CONTEXT(InitializeImpl(), "initializing adapter (backend=%s)", mBackend);
|
||||
DAWN_TRY_CONTEXT(
|
||||
InitializeSupportedFeaturesImpl(),
|
||||
|
@ -53,14 +53,14 @@ namespace dawn::native {
|
|||
std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
|
||||
mLimits.v1.maxInterStageShaderComponents =
|
||||
std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
|
||||
mLimits.v1.maxSampledTexturesPerShaderStage = std::min(
|
||||
mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
|
||||
mLimits.v1.maxSampledTexturesPerShaderStage =
|
||||
std::min(mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
|
||||
mLimits.v1.maxSamplersPerShaderStage =
|
||||
std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
|
||||
mLimits.v1.maxStorageBuffersPerShaderStage =
|
||||
std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
|
||||
mLimits.v1.maxStorageTexturesPerShaderStage = std::min(
|
||||
mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
|
||||
mLimits.v1.maxStorageTexturesPerShaderStage =
|
||||
std::min(mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
|
||||
mLimits.v1.maxUniformBuffersPerShaderStage =
|
||||
std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
|
||||
mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
|
||||
|
@ -71,30 +71,30 @@ namespace dawn::native {
|
|||
kMaxDynamicStorageBuffersPerPipelineLayout);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
bool AdapterBase::APIGetLimits(SupportedLimits* limits) const {
|
||||
bool AdapterBase::APIGetLimits(SupportedLimits* limits) const {
|
||||
return GetLimits(limits);
|
||||
}
|
||||
}
|
||||
|
||||
void AdapterBase::APIGetProperties(AdapterProperties* properties) const {
|
||||
void AdapterBase::APIGetProperties(AdapterProperties* properties) const {
|
||||
properties->vendorID = mVendorId;
|
||||
properties->deviceID = mDeviceId;
|
||||
properties->name = mName.c_str();
|
||||
properties->driverDescription = mDriverDescription.c_str();
|
||||
properties->adapterType = mAdapterType;
|
||||
properties->backendType = mBackend;
|
||||
}
|
||||
}
|
||||
|
||||
bool AdapterBase::APIHasFeature(wgpu::FeatureName feature) const {
|
||||
bool AdapterBase::APIHasFeature(wgpu::FeatureName feature) const {
|
||||
return mSupportedFeatures.IsEnabled(feature);
|
||||
}
|
||||
}
|
||||
|
||||
size_t AdapterBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
|
||||
size_t AdapterBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
|
||||
return mSupportedFeatures.EnumerateFeatures(features);
|
||||
}
|
||||
}
|
||||
|
||||
DeviceBase* AdapterBase::APICreateDevice(const DeviceDescriptor* descriptor) {
|
||||
DeviceBase* AdapterBase::APICreateDevice(const DeviceDescriptor* descriptor) {
|
||||
DeviceDescriptor defaultDesc = {};
|
||||
if (descriptor == nullptr) {
|
||||
descriptor = &defaultDesc;
|
||||
|
@ -105,9 +105,9 @@ namespace dawn::native {
|
|||
return nullptr;
|
||||
}
|
||||
return result.AcquireSuccess().Detach();
|
||||
}
|
||||
}
|
||||
|
||||
void AdapterBase::APIRequestDevice(const DeviceDescriptor* descriptor,
|
||||
void AdapterBase::APIRequestDevice(const DeviceDescriptor* descriptor,
|
||||
WGPURequestDeviceCallback callback,
|
||||
void* userdata) {
|
||||
static constexpr DeviceDescriptor kDefaultDescriptor = {};
|
||||
|
@ -119,8 +119,8 @@ namespace dawn::native {
|
|||
if (result.IsError()) {
|
||||
std::unique_ptr<ErrorData> errorData = result.AcquireError();
|
||||
// TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
|
||||
callback(WGPURequestDeviceStatus_Error, nullptr,
|
||||
errorData->GetFormattedMessage().c_str(), userdata);
|
||||
callback(WGPURequestDeviceStatus_Error, nullptr, errorData->GetFormattedMessage().c_str(),
|
||||
userdata);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -130,29 +130,29 @@ namespace dawn::native {
|
|||
device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
|
||||
// TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
|
||||
callback(status, ToAPI(device.Detach()), nullptr, userdata);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t AdapterBase::GetVendorId() const {
|
||||
uint32_t AdapterBase::GetVendorId() const {
|
||||
return mVendorId;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t AdapterBase::GetDeviceId() const {
|
||||
uint32_t AdapterBase::GetDeviceId() const {
|
||||
return mDeviceId;
|
||||
}
|
||||
}
|
||||
|
||||
wgpu::BackendType AdapterBase::GetBackendType() const {
|
||||
wgpu::BackendType AdapterBase::GetBackendType() const {
|
||||
return mBackend;
|
||||
}
|
||||
}
|
||||
|
||||
InstanceBase* AdapterBase::GetInstance() const {
|
||||
InstanceBase* AdapterBase::GetInstance() const {
|
||||
return mInstance;
|
||||
}
|
||||
}
|
||||
|
||||
FeaturesSet AdapterBase::GetSupportedFeatures() const {
|
||||
FeaturesSet AdapterBase::GetSupportedFeatures() const {
|
||||
return mSupportedFeatures;
|
||||
}
|
||||
}
|
||||
|
||||
bool AdapterBase::SupportsAllRequiredFeatures(
|
||||
bool AdapterBase::SupportsAllRequiredFeatures(
|
||||
const ityp::span<size_t, const wgpu::FeatureName>& features) const {
|
||||
for (wgpu::FeatureName f : features) {
|
||||
if (!mSupportedFeatures.IsEnabled(f)) {
|
||||
|
@ -160,9 +160,9 @@ namespace dawn::native {
|
|||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
|
||||
WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
|
||||
WGPUDeviceProperties adapterProperties = {};
|
||||
adapterProperties.deviceID = mDeviceId;
|
||||
adapterProperties.vendorID = mVendorId;
|
||||
|
@ -176,9 +176,9 @@ namespace dawn::native {
|
|||
// send the adapter properties across the wire.
|
||||
GetLimits(FromAPI(&adapterProperties.limits));
|
||||
return adapterProperties;
|
||||
}
|
||||
}
|
||||
|
||||
bool AdapterBase::GetLimits(SupportedLimits* limits) const {
|
||||
bool AdapterBase::GetLimits(SupportedLimits* limits) const {
|
||||
ASSERT(limits != nullptr);
|
||||
if (limits->nextInChain != nullptr) {
|
||||
return false;
|
||||
|
@ -189,22 +189,21 @@ namespace dawn::native {
|
|||
limits->limits = mLimits.v1;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
ResultOrError<Ref<DeviceBase>> AdapterBase::CreateDeviceInternal(
|
||||
ResultOrError<Ref<DeviceBase>> AdapterBase::CreateDeviceInternal(
|
||||
const DeviceDescriptor* descriptor) {
|
||||
ASSERT(descriptor != nullptr);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) {
|
||||
wgpu::FeatureName f = descriptor->requiredFeatures[i];
|
||||
DAWN_TRY(ValidateFeatureName(f));
|
||||
DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f),
|
||||
"Requested feature %s is not supported.", f);
|
||||
DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f), "Requested feature %s is not supported.",
|
||||
f);
|
||||
}
|
||||
|
||||
if (descriptor->requiredLimits != nullptr) {
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
|
||||
DAWN_TRY_CONTEXT(ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
|
||||
descriptor->requiredLimits->limits),
|
||||
"validating required limits");
|
||||
|
||||
|
@ -212,19 +211,19 @@ namespace dawn::native {
|
|||
"nextInChain is not nullptr.");
|
||||
}
|
||||
return CreateDeviceImpl(descriptor);
|
||||
}
|
||||
}
|
||||
|
||||
void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
|
||||
void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
|
||||
mUseTieredLimits = useTieredLimits;
|
||||
}
|
||||
}
|
||||
|
||||
void AdapterBase::ResetInternalDeviceForTesting() {
|
||||
void AdapterBase::ResetInternalDeviceForTesting() {
|
||||
mInstance->ConsumedError(ResetInternalDeviceForTestingImpl());
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() {
|
||||
MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() {
|
||||
return DAWN_INTERNAL_ERROR(
|
||||
"ResetInternalDeviceForTesting should only be used with the D3D12 backend.");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -28,9 +28,9 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
class DeviceBase;
|
||||
class DeviceBase;
|
||||
|
||||
class AdapterBase : public RefCounted {
|
||||
class AdapterBase : public RefCounted {
|
||||
public:
|
||||
AdapterBase(InstanceBase* instance, wgpu::BackendType backend);
|
||||
virtual ~AdapterBase() = default;
|
||||
|
@ -74,8 +74,7 @@ namespace dawn::native {
|
|||
FeaturesSet mSupportedFeatures;
|
||||
|
||||
private:
|
||||
virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
|
||||
const DeviceDescriptor* descriptor) = 0;
|
||||
virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) = 0;
|
||||
|
||||
virtual MaybeError InitializeImpl() = 0;
|
||||
|
||||
|
@ -92,7 +91,7 @@ namespace dawn::native {
|
|||
wgpu::BackendType mBackend;
|
||||
CombinedLimits mLimits;
|
||||
bool mUseTieredLimits = false;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -20,11 +20,10 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
|
||||
: mWorkerTaskPool(workerTaskPool) {
|
||||
}
|
||||
AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
|
||||
: mWorkerTaskPool(workerTaskPool) {}
|
||||
|
||||
void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
|
||||
void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
|
||||
// If these allocations becomes expensive, we can slab-allocate tasks.
|
||||
Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask());
|
||||
waitableTask->taskManager = this;
|
||||
|
@ -44,17 +43,17 @@ namespace dawn::native {
|
|||
waitableTask->Reference();
|
||||
waitableTask->waitableEvent =
|
||||
mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
|
||||
}
|
||||
}
|
||||
|
||||
void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
|
||||
void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
|
||||
std::lock_guard<std::mutex> lock(mPendingTasksMutex);
|
||||
auto iter = mPendingTasks.find(task);
|
||||
if (iter != mPendingTasks.end()) {
|
||||
mPendingTasks.erase(iter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AsyncTaskManager::WaitAllPendingTasks() {
|
||||
void AsyncTaskManager::WaitAllPendingTasks() {
|
||||
std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks;
|
||||
|
||||
{
|
||||
|
@ -65,17 +64,17 @@ namespace dawn::native {
|
|||
for (auto& [_, task] : allPendingTasks) {
|
||||
task->waitableEvent->Wait();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool AsyncTaskManager::HasPendingTasks() {
|
||||
bool AsyncTaskManager::HasPendingTasks() {
|
||||
std::lock_guard<std::mutex> lock(mPendingTasksMutex);
|
||||
return !mPendingTasks.empty();
|
||||
}
|
||||
}
|
||||
|
||||
void AsyncTaskManager::DoWaitableTask(void* task) {
|
||||
void AsyncTaskManager::DoWaitableTask(void* task) {
|
||||
Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task));
|
||||
waitableTask->asyncTask();
|
||||
waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -23,20 +23,20 @@
|
|||
#include "dawn/common/RefCounted.h"
|
||||
|
||||
namespace dawn::platform {
|
||||
class WaitableEvent;
|
||||
class WorkerTaskPool;
|
||||
class WaitableEvent;
|
||||
class WorkerTaskPool;
|
||||
} // namespace dawn::platform
|
||||
|
||||
namespace dawn::native {
|
||||
|
||||
// TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like
|
||||
// Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just
|
||||
// shutting down the device. RunNow() could be used for more advanced scenarios, for example
|
||||
// always doing ShaderModule initial compilation asynchronously, but being able to steal the
|
||||
// task if we need it for synchronous pipeline compilation.
|
||||
using AsyncTask = std::function<void()>;
|
||||
// TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like
|
||||
// Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just
|
||||
// shutting down the device. RunNow() could be used for more advanced scenarios, for example
|
||||
// always doing ShaderModule initial compilation asynchronously, but being able to steal the
|
||||
// task if we need it for synchronous pipeline compilation.
|
||||
using AsyncTask = std::function<void()>;
|
||||
|
||||
class AsyncTaskManager {
|
||||
class AsyncTaskManager {
|
||||
public:
|
||||
explicit AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool);
|
||||
|
||||
|
@ -58,7 +58,7 @@ namespace dawn::native {
|
|||
std::mutex mPendingTasksMutex;
|
||||
std::unordered_map<WaitableTask*, Ref<WaitableTask>> mPendingTasks;
|
||||
dawn::platform::WorkerTaskPool* mWorkerTaskPool;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -21,8 +21,7 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(
|
||||
const RenderBundleEncoderDescriptor* descriptor)
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor)
|
||||
: mSampleCount(descriptor->sampleCount) {
|
||||
ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
|
||||
for (ColorAttachmentIndex i(uint8_t(0));
|
||||
|
@ -34,9 +33,9 @@ namespace dawn::native {
|
|||
}
|
||||
}
|
||||
mDepthStencilFormat = descriptor->depthStencilFormat;
|
||||
}
|
||||
}
|
||||
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
|
||||
: mSampleCount(descriptor->multisample.count) {
|
||||
if (descriptor->fragment != nullptr) {
|
||||
ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
|
||||
|
@ -54,14 +53,12 @@ namespace dawn::native {
|
|||
if (descriptor->depthStencil != nullptr) {
|
||||
mDepthStencilFormat = descriptor->depthStencil->format;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
|
||||
for (ColorAttachmentIndex i(uint8_t(0));
|
||||
i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount));
|
||||
++i) {
|
||||
TextureViewBase* attachment =
|
||||
descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
|
||||
i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount)); ++i) {
|
||||
TextureViewBase* attachment = descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
|
||||
if (attachment == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
@ -83,12 +80,11 @@ namespace dawn::native {
|
|||
}
|
||||
}
|
||||
ASSERT(mSampleCount > 0);
|
||||
}
|
||||
}
|
||||
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) =
|
||||
default;
|
||||
AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) = default;
|
||||
|
||||
size_t AttachmentStateBlueprint::HashFunc::operator()(
|
||||
size_t AttachmentStateBlueprint::HashFunc::operator()(
|
||||
const AttachmentStateBlueprint* attachmentState) const {
|
||||
size_t hash = 0;
|
||||
|
||||
|
@ -105,10 +101,9 @@ namespace dawn::native {
|
|||
HashCombine(&hash, attachmentState->mSampleCount);
|
||||
|
||||
return hash;
|
||||
}
|
||||
}
|
||||
|
||||
bool AttachmentStateBlueprint::EqualityFunc::operator()(
|
||||
const AttachmentStateBlueprint* a,
|
||||
bool AttachmentStateBlueprint::EqualityFunc::operator()(const AttachmentStateBlueprint* a,
|
||||
const AttachmentStateBlueprint* b) const {
|
||||
// Check set attachments
|
||||
if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
|
||||
|
@ -133,43 +128,41 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
|
||||
: AttachmentStateBlueprint(blueprint), ObjectBase(device) {
|
||||
}
|
||||
AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
|
||||
: AttachmentStateBlueprint(blueprint), ObjectBase(device) {}
|
||||
|
||||
AttachmentState::~AttachmentState() {
|
||||
AttachmentState::~AttachmentState() {
|
||||
GetDevice()->UncacheAttachmentState(this);
|
||||
}
|
||||
}
|
||||
|
||||
size_t AttachmentState::ComputeContentHash() {
|
||||
size_t AttachmentState::ComputeContentHash() {
|
||||
// TODO(dawn:549): skip this traversal and reuse the blueprint.
|
||||
return AttachmentStateBlueprint::HashFunc()(this);
|
||||
}
|
||||
}
|
||||
|
||||
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
|
||||
AttachmentState::GetColorAttachmentsMask() const {
|
||||
ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> AttachmentState::GetColorAttachmentsMask()
|
||||
const {
|
||||
return mColorAttachmentsSet;
|
||||
}
|
||||
}
|
||||
|
||||
wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(
|
||||
ColorAttachmentIndex index) const {
|
||||
wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(ColorAttachmentIndex index) const {
|
||||
ASSERT(mColorAttachmentsSet[index]);
|
||||
return mColorFormats[index];
|
||||
}
|
||||
}
|
||||
|
||||
bool AttachmentState::HasDepthStencilAttachment() const {
|
||||
bool AttachmentState::HasDepthStencilAttachment() const {
|
||||
return mDepthStencilFormat != wgpu::TextureFormat::Undefined;
|
||||
}
|
||||
}
|
||||
|
||||
wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
|
||||
wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
|
||||
ASSERT(HasDepthStencilAttachment());
|
||||
return mDepthStencilFormat;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t AttachmentState::GetSampleCount() const {
|
||||
uint32_t AttachmentState::GetSampleCount() const {
|
||||
return mSampleCount;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -29,12 +29,12 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
class DeviceBase;
|
||||
class DeviceBase;
|
||||
|
||||
// AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState
|
||||
// can be constructed by copying the blueprint state instead of traversing descriptors.
|
||||
// Also, AttachmentStateBlueprint does not need a refcount like AttachmentState.
|
||||
class AttachmentStateBlueprint {
|
||||
// AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState
|
||||
// can be constructed by copying the blueprint state instead of traversing descriptors.
|
||||
// Also, AttachmentStateBlueprint does not need a refcount like AttachmentState.
|
||||
class AttachmentStateBlueprint {
|
||||
public:
|
||||
// Note: Descriptors must be validated before the AttachmentState is constructed.
|
||||
explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor);
|
||||
|
@ -48,8 +48,7 @@ namespace dawn::native {
|
|||
size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
|
||||
};
|
||||
struct EqualityFunc {
|
||||
bool operator()(const AttachmentStateBlueprint* a,
|
||||
const AttachmentStateBlueprint* b) const;
|
||||
bool operator()(const AttachmentStateBlueprint* a, const AttachmentStateBlueprint* b) const;
|
||||
};
|
||||
|
||||
protected:
|
||||
|
@ -58,9 +57,9 @@ namespace dawn::native {
|
|||
// Default (texture format Undefined) indicates there is no depth stencil attachment.
|
||||
wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
|
||||
uint32_t mSampleCount = 0;
|
||||
};
|
||||
};
|
||||
|
||||
class AttachmentState final : public AttachmentStateBlueprint,
|
||||
class AttachmentState final : public AttachmentStateBlueprint,
|
||||
public ObjectBase,
|
||||
public CachedObject {
|
||||
public:
|
||||
|
@ -76,7 +75,7 @@ namespace dawn::native {
|
|||
|
||||
private:
|
||||
~AttachmentState() override;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -16,21 +16,20 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
|
||||
: mInstance(instance), mType(type) {
|
||||
}
|
||||
BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
|
||||
: mInstance(instance), mType(type) {}
|
||||
|
||||
wgpu::BackendType BackendConnection::GetType() const {
|
||||
wgpu::BackendType BackendConnection::GetType() const {
|
||||
return mType;
|
||||
}
|
||||
}
|
||||
|
||||
InstanceBase* BackendConnection::GetInstance() const {
|
||||
InstanceBase* BackendConnection::GetInstance() const {
|
||||
return mInstance;
|
||||
}
|
||||
}
|
||||
|
||||
ResultOrError<std::vector<Ref<AdapterBase>>> BackendConnection::DiscoverAdapters(
|
||||
ResultOrError<std::vector<Ref<AdapterBase>>> BackendConnection::DiscoverAdapters(
|
||||
const AdapterDiscoveryOptionsBase* options) {
|
||||
return DAWN_FORMAT_VALIDATION_ERROR("DiscoverAdapters not implemented for this backend.");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -23,9 +23,9 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
// An common interface for all backends. Mostly used to create adapters for a particular
|
||||
// backend.
|
||||
class BackendConnection {
|
||||
// An common interface for all backends. Mostly used to create adapters for a particular
|
||||
// backend.
|
||||
class BackendConnection {
|
||||
public:
|
||||
BackendConnection(InstanceBase* instance, wgpu::BackendType type);
|
||||
virtual ~BackendConnection() = default;
|
||||
|
@ -44,7 +44,7 @@ namespace dawn::native {
|
|||
private:
|
||||
InstanceBase* mInstance = nullptr;
|
||||
wgpu::BackendType mType;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -29,11 +29,11 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
namespace {
|
||||
namespace {
|
||||
|
||||
// Helper functions to perform binding-type specific validation
|
||||
// Helper functions to perform binding-type specific validation
|
||||
|
||||
MaybeError ValidateBufferBinding(const DeviceBase* device,
|
||||
MaybeError ValidateBufferBinding(const DeviceBase* device,
|
||||
const BindGroupEntry& entry,
|
||||
const BindingInfo& bindingInfo) {
|
||||
DAWN_INVALID_IF(entry.buffer == nullptr, "Binding entry buffer not set.");
|
||||
|
@ -54,21 +54,18 @@ namespace dawn::native {
|
|||
case wgpu::BufferBindingType::Uniform:
|
||||
requiredUsage = wgpu::BufferUsage::Uniform;
|
||||
maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
|
||||
requiredBindingAlignment =
|
||||
device->GetLimits().v1.minUniformBufferOffsetAlignment;
|
||||
requiredBindingAlignment = device->GetLimits().v1.minUniformBufferOffsetAlignment;
|
||||
break;
|
||||
case wgpu::BufferBindingType::Storage:
|
||||
case wgpu::BufferBindingType::ReadOnlyStorage:
|
||||
requiredUsage = wgpu::BufferUsage::Storage;
|
||||
maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
|
||||
requiredBindingAlignment =
|
||||
device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
break;
|
||||
case kInternalStorageBufferBinding:
|
||||
requiredUsage = kInternalStorageBuffer;
|
||||
maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
|
||||
requiredBindingAlignment =
|
||||
device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
|
||||
break;
|
||||
case wgpu::BufferBindingType::Undefined:
|
||||
UNREACHABLE();
|
||||
|
@ -92,31 +89,30 @@ namespace dawn::native {
|
|||
|
||||
// Note that no overflow can happen because we already checked that
|
||||
// bufferSize >= bindingSize
|
||||
DAWN_INVALID_IF(
|
||||
entry.offset > bufferSize - bindingSize,
|
||||
DAWN_INVALID_IF(entry.offset > bufferSize - bindingSize,
|
||||
"Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
|
||||
entry.offset, bufferSize, bindingSize, entry.buffer);
|
||||
|
||||
DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
|
||||
"Offset (%u) does not satisfy the minimum %s alignment (%u).",
|
||||
entry.offset, bindingInfo.buffer.type, requiredBindingAlignment);
|
||||
"Offset (%u) does not satisfy the minimum %s alignment (%u).", entry.offset,
|
||||
bindingInfo.buffer.type, requiredBindingAlignment);
|
||||
|
||||
DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
|
||||
"Binding usage (%s) of %s doesn't match expected usage (%s).",
|
||||
entry.buffer->GetUsageExternalOnly(), entry.buffer, requiredUsage);
|
||||
|
||||
DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
|
||||
"Binding size (%u) is smaller than the minimum binding size (%u).",
|
||||
bindingSize, bindingInfo.buffer.minBindingSize);
|
||||
"Binding size (%u) is smaller than the minimum binding size (%u).", bindingSize,
|
||||
bindingInfo.buffer.minBindingSize);
|
||||
|
||||
DAWN_INVALID_IF(bindingSize > maxBindingSize,
|
||||
"Binding size (%u) is larger than the maximum binding size (%u).",
|
||||
bindingSize, maxBindingSize);
|
||||
"Binding size (%u) is larger than the maximum binding size (%u).", bindingSize,
|
||||
maxBindingSize);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateTextureBinding(DeviceBase* device,
|
||||
MaybeError ValidateTextureBinding(DeviceBase* device,
|
||||
const BindGroupEntry& entry,
|
||||
const BindingInfo& bindingInfo) {
|
||||
DAWN_INVALID_IF(entry.textureView == nullptr, "Binding entry textureView not set.");
|
||||
|
@ -131,24 +127,20 @@ namespace dawn::native {
|
|||
TextureViewBase* view = entry.textureView;
|
||||
|
||||
Aspect aspect = view->GetAspects();
|
||||
DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect,
|
||||
view);
|
||||
DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect, view);
|
||||
|
||||
TextureBase* texture = view->GetTexture();
|
||||
switch (bindingInfo.bindingType) {
|
||||
case BindingInfoType::Texture: {
|
||||
SampleTypeBit supportedTypes =
|
||||
texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
|
||||
SampleTypeBit requiredType =
|
||||
SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
|
||||
SampleTypeBit requiredType = SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
!(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
|
||||
DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
|
||||
"Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
|
||||
texture->GetUsage(), texture);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
|
||||
DAWN_INVALID_IF(texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
|
||||
"Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
|
||||
texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
|
||||
|
||||
|
@ -158,29 +150,25 @@ namespace dawn::native {
|
|||
"types (%s).",
|
||||
supportedTypes, texture, requiredType);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
|
||||
DAWN_INVALID_IF(entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
|
||||
"Dimension (%s) of %s doesn't match the expected dimension (%s).",
|
||||
entry.textureView->GetDimension(), entry.textureView,
|
||||
bindingInfo.texture.viewDimension);
|
||||
break;
|
||||
}
|
||||
case BindingInfoType::StorageTexture: {
|
||||
DAWN_INVALID_IF(
|
||||
!(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
|
||||
DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
|
||||
"Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
|
||||
texture->GetUsage(), texture);
|
||||
|
||||
ASSERT(!texture->IsMultisampledTexture());
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
texture->GetFormat().format != bindingInfo.storageTexture.format,
|
||||
DAWN_INVALID_IF(texture->GetFormat().format != bindingInfo.storageTexture.format,
|
||||
"Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
|
||||
texture, bindingInfo.storageTexture.format);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
entry.textureView->GetDimension() !=
|
||||
bindingInfo.storageTexture.viewDimension,
|
||||
entry.textureView->GetDimension() != bindingInfo.storageTexture.viewDimension,
|
||||
"Dimension (%s) of %s doesn't match the expected dimension (%s).",
|
||||
entry.textureView->GetDimension(), entry.textureView,
|
||||
bindingInfo.storageTexture.viewDimension);
|
||||
|
@ -196,9 +184,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateSamplerBinding(const DeviceBase* device,
|
||||
MaybeError ValidateSamplerBinding(const DeviceBase* device,
|
||||
const BindGroupEntry& entry,
|
||||
const BindingInfo& bindingInfo) {
|
||||
DAWN_INVALID_IF(entry.sampler == nullptr, "Binding entry sampler not set.");
|
||||
|
@ -214,22 +202,19 @@ namespace dawn::native {
|
|||
|
||||
switch (bindingInfo.sampler.type) {
|
||||
case wgpu::SamplerBindingType::NonFiltering:
|
||||
DAWN_INVALID_IF(
|
||||
entry.sampler->IsFiltering(),
|
||||
DAWN_INVALID_IF(entry.sampler->IsFiltering(),
|
||||
"Filtering sampler %s is incompatible with non-filtering sampler "
|
||||
"binding.",
|
||||
entry.sampler);
|
||||
[[fallthrough]];
|
||||
case wgpu::SamplerBindingType::Filtering:
|
||||
DAWN_INVALID_IF(
|
||||
entry.sampler->IsComparison(),
|
||||
DAWN_INVALID_IF(entry.sampler->IsComparison(),
|
||||
"Comparison sampler %s is incompatible with non-comparison sampler "
|
||||
"binding.",
|
||||
entry.sampler);
|
||||
break;
|
||||
case wgpu::SamplerBindingType::Comparison:
|
||||
DAWN_INVALID_IF(
|
||||
!entry.sampler->IsComparison(),
|
||||
DAWN_INVALID_IF(!entry.sampler->IsComparison(),
|
||||
"Non-comparison sampler %s is imcompatible with comparison sampler "
|
||||
"binding.",
|
||||
entry.sampler);
|
||||
|
@ -240,9 +225,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateExternalTextureBinding(
|
||||
MaybeError ValidateExternalTextureBinding(
|
||||
const DeviceBase* device,
|
||||
const BindGroupEntry& entry,
|
||||
const ExternalTextureBindingEntry* externalTextureBindingEntry,
|
||||
|
@ -254,8 +239,7 @@ namespace dawn::native {
|
|||
entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
|
||||
"Expected only external texture to be set for binding entry.");
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
expansions.find(BindingNumber(entry.binding)) == expansions.end(),
|
||||
DAWN_INVALID_IF(expansions.find(BindingNumber(entry.binding)) == expansions.end(),
|
||||
"External texture binding entry %u is not present in the bind group layout.",
|
||||
entry.binding);
|
||||
|
||||
|
@ -265,12 +249,11 @@ namespace dawn::native {
|
|||
DAWN_TRY(device->ValidateObject(externalTextureBindingEntry->externalTexture));
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
} // anonymous namespace
|
||||
|
||||
MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
|
||||
const BindGroupDescriptor* descriptor) {
|
||||
MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor) {
|
||||
DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
|
||||
|
||||
DAWN_TRY(device->ValidateObject(descriptor->layout));
|
||||
|
@ -357,11 +340,11 @@ namespace dawn::native {
|
|||
ASSERT(bindingsSet.count() == descriptor->layout->GetUnexpandedBindingCount());
|
||||
|
||||
return {};
|
||||
} // anonymous namespace
|
||||
} // anonymous namespace
|
||||
|
||||
// BindGroup
|
||||
// BindGroup
|
||||
|
||||
BindGroupBase::BindGroupBase(DeviceBase* device,
|
||||
BindGroupBase::BindGroupBase(DeviceBase* device,
|
||||
const BindGroupDescriptor* descriptor,
|
||||
void* bindingDataStart)
|
||||
: ApiObjectBase(device, descriptor->label),
|
||||
|
@ -460,86 +443,84 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
TrackInDevice();
|
||||
}
|
||||
}
|
||||
|
||||
BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
|
||||
BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
|
||||
TrackInDevice();
|
||||
}
|
||||
}
|
||||
|
||||
BindGroupBase::~BindGroupBase() = default;
|
||||
BindGroupBase::~BindGroupBase() = default;
|
||||
|
||||
void BindGroupBase::DestroyImpl() {
|
||||
void BindGroupBase::DestroyImpl() {
|
||||
if (mLayout != nullptr) {
|
||||
ASSERT(!IsError());
|
||||
for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
|
||||
mBindingData.bindings[i].~Ref<ObjectBase>();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BindGroupBase::DeleteThis() {
|
||||
void BindGroupBase::DeleteThis() {
|
||||
// Add another ref to the layout so that if this is the last ref, the layout
|
||||
// is destroyed after the bind group. The bind group is slab-allocated inside
|
||||
// memory owned by the layout (except for the null backend).
|
||||
Ref<BindGroupLayoutBase> layout = mLayout;
|
||||
ApiObjectBase::DeleteThis();
|
||||
}
|
||||
}
|
||||
|
||||
BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag), mBindingData() {
|
||||
}
|
||||
BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag), mBindingData() {}
|
||||
|
||||
// static
|
||||
BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
|
||||
// static
|
||||
BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
|
||||
return new BindGroupBase(device, ObjectBase::kError);
|
||||
}
|
||||
}
|
||||
|
||||
ObjectType BindGroupBase::GetType() const {
|
||||
ObjectType BindGroupBase::GetType() const {
|
||||
return ObjectType::BindGroup;
|
||||
}
|
||||
}
|
||||
|
||||
BindGroupLayoutBase* BindGroupBase::GetLayout() {
|
||||
BindGroupLayoutBase* BindGroupBase::GetLayout() {
|
||||
ASSERT(!IsError());
|
||||
return mLayout.Get();
|
||||
}
|
||||
}
|
||||
|
||||
const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
|
||||
const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
|
||||
ASSERT(!IsError());
|
||||
return mLayout.Get();
|
||||
}
|
||||
}
|
||||
|
||||
const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
|
||||
const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
|
||||
ASSERT(!IsError());
|
||||
return mBindingData.unverifiedBufferSizes;
|
||||
}
|
||||
}
|
||||
|
||||
BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
|
||||
BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
|
||||
ASSERT(!IsError());
|
||||
ASSERT(bindingIndex < mLayout->GetBindingCount());
|
||||
ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Buffer);
|
||||
BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get());
|
||||
return {buffer, mBindingData.bufferData[bindingIndex].offset,
|
||||
mBindingData.bufferData[bindingIndex].size};
|
||||
}
|
||||
}
|
||||
|
||||
SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
|
||||
SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
|
||||
ASSERT(!IsError());
|
||||
ASSERT(bindingIndex < mLayout->GetBindingCount());
|
||||
ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Sampler);
|
||||
return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get());
|
||||
}
|
||||
}
|
||||
|
||||
TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
|
||||
TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
|
||||
ASSERT(!IsError());
|
||||
ASSERT(bindingIndex < mLayout->GetBindingCount());
|
||||
ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
|
||||
mLayout->GetBindingInfo(bindingIndex).bindingType ==
|
||||
BindingInfoType::StorageTexture);
|
||||
mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::StorageTexture);
|
||||
return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
|
||||
}
|
||||
}
|
||||
|
||||
const std::vector<Ref<ExternalTextureBase>>& BindGroupBase::GetBoundExternalTextures() const {
|
||||
const std::vector<Ref<ExternalTextureBase>>& BindGroupBase::GetBoundExternalTextures() const {
|
||||
return mBoundExternalTextures;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -29,18 +29,17 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
class DeviceBase;
|
||||
class DeviceBase;
|
||||
|
||||
MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
|
||||
const BindGroupDescriptor* descriptor);
|
||||
MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor);
|
||||
|
||||
struct BufferBinding {
|
||||
struct BufferBinding {
|
||||
BufferBase* buffer;
|
||||
uint64_t offset;
|
||||
uint64_t size;
|
||||
};
|
||||
};
|
||||
|
||||
class BindGroupBase : public ApiObjectBase {
|
||||
class BindGroupBase : public ApiObjectBase {
|
||||
public:
|
||||
static BindGroupBase* MakeError(DeviceBase* device);
|
||||
|
||||
|
@ -90,7 +89,7 @@ namespace dawn::native {
|
|||
// TODO(dawn:1293): Store external textures in
|
||||
// BindGroupLayoutBase::BindingDataPointers::bindings
|
||||
std::vector<Ref<ExternalTextureBase>> mBoundExternalTextures;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -31,21 +31,20 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
namespace {
|
||||
MaybeError ValidateStorageTextureFormat(DeviceBase* device,
|
||||
namespace {
|
||||
MaybeError ValidateStorageTextureFormat(DeviceBase* device,
|
||||
wgpu::TextureFormat storageTextureFormat) {
|
||||
const Format* format = nullptr;
|
||||
DAWN_TRY_ASSIGN(format, device->GetInternalFormat(storageTextureFormat));
|
||||
|
||||
ASSERT(format != nullptr);
|
||||
DAWN_INVALID_IF(!format->supportsStorageUsage,
|
||||
"Texture format (%s) does not support storage textures.",
|
||||
storageTextureFormat);
|
||||
"Texture format (%s) does not support storage textures.", storageTextureFormat);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateStorageTextureViewDimension(wgpu::TextureViewDimension dimension) {
|
||||
MaybeError ValidateStorageTextureViewDimension(wgpu::TextureViewDimension dimension) {
|
||||
switch (dimension) {
|
||||
case wgpu::TextureViewDimension::Cube:
|
||||
case wgpu::TextureViewDimension::CubeArray:
|
||||
|
@ -62,9 +61,9 @@ namespace dawn::native {
|
|||
break;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateBindGroupLayoutEntry(DeviceBase* device,
|
||||
MaybeError ValidateBindGroupLayoutEntry(DeviceBase* device,
|
||||
const BindGroupLayoutEntry& entry,
|
||||
bool allowInternalBinding) {
|
||||
DAWN_TRY(ValidateShaderStage(entry.visibility));
|
||||
|
@ -111,8 +110,7 @@ namespace dawn::native {
|
|||
viewDimension = texture.viewDimension;
|
||||
}
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
|
||||
DAWN_INVALID_IF(texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
|
||||
"View dimension (%s) for a multisampled texture bindings was not %s.",
|
||||
viewDimension, wgpu::TextureViewDimension::e2D);
|
||||
}
|
||||
|
@ -150,16 +148,14 @@ namespace dawn::native {
|
|||
"BindGroupLayoutEntry had more than one of buffer, sampler, texture, "
|
||||
"storageTexture, or externalTexture set");
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
!IsSubset(entry.visibility, allowedStages),
|
||||
DAWN_INVALID_IF(!IsSubset(entry.visibility, allowedStages),
|
||||
"%s bindings cannot be used with a visibility of %s. Only %s are allowed.",
|
||||
bindingType, entry.visibility, allowedStages);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(
|
||||
uint32_t binding,
|
||||
BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(uint32_t binding,
|
||||
wgpu::ShaderStage visibility) {
|
||||
BindGroupLayoutEntry entry;
|
||||
entry.binding = binding;
|
||||
|
@ -168,9 +164,9 @@ namespace dawn::native {
|
|||
entry.texture.multisampled = false;
|
||||
entry.texture.sampleType = wgpu::TextureSampleType::Float;
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
|
||||
BindGroupLayoutEntry CreateUniformBindingForExternalTexture(uint32_t binding,
|
||||
BindGroupLayoutEntry CreateUniformBindingForExternalTexture(uint32_t binding,
|
||||
wgpu::ShaderStage visibility) {
|
||||
BindGroupLayoutEntry entry;
|
||||
entry.binding = binding;
|
||||
|
@ -178,9 +174,9 @@ namespace dawn::native {
|
|||
entry.buffer.hasDynamicOffset = false;
|
||||
entry.buffer.type = wgpu::BufferBindingType::Uniform;
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<BindGroupLayoutEntry> ExtractAndExpandBglEntries(
|
||||
std::vector<BindGroupLayoutEntry> ExtractAndExpandBglEntries(
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
BindingCounts* bindingCounts,
|
||||
ExternalTextureBindingExpansionMap* externalTextureBindingExpansions) {
|
||||
|
@ -217,13 +213,11 @@ namespace dawn::native {
|
|||
dawn_native::ExternalTextureBindingExpansion bindingExpansion;
|
||||
|
||||
BindGroupLayoutEntry plane0Entry =
|
||||
CreateSampledTextureBindingForExternalTexture(entry.binding,
|
||||
entry.visibility);
|
||||
CreateSampledTextureBindingForExternalTexture(entry.binding, entry.visibility);
|
||||
bindingExpansion.plane0 = BindingNumber(plane0Entry.binding);
|
||||
expandedOutput.push_back(plane0Entry);
|
||||
|
||||
BindGroupLayoutEntry plane1Entry =
|
||||
CreateSampledTextureBindingForExternalTexture(
|
||||
BindGroupLayoutEntry plane1Entry = CreateSampledTextureBindingForExternalTexture(
|
||||
nextOpenBindingNumberForNewEntry++, entry.visibility);
|
||||
bindingExpansion.plane1 = BindingNumber(plane1Entry.binding);
|
||||
expandedOutput.push_back(plane1Entry);
|
||||
|
@ -241,10 +235,10 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return expandedOutput;
|
||||
}
|
||||
} // anonymous namespace
|
||||
}
|
||||
} // anonymous namespace
|
||||
|
||||
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
|
||||
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
bool allowInternalBinding) {
|
||||
DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
|
||||
|
@ -260,8 +254,8 @@ namespace dawn::native {
|
|||
"Binding number (%u) exceeds the maximum binding number (%u).",
|
||||
uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped));
|
||||
DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0,
|
||||
"On entries[%u]: binding index (%u) was specified by a previous entry.",
|
||||
i, entry.binding);
|
||||
"On entries[%u]: binding index (%u) was specified by a previous entry.", i,
|
||||
entry.binding);
|
||||
|
||||
DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding),
|
||||
"validating entries[%u]", i);
|
||||
|
@ -274,11 +268,11 @@ namespace dawn::native {
|
|||
DAWN_TRY_CONTEXT(ValidateBindingCounts(bindingCounts), "validating binding counts");
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
namespace {
|
||||
|
||||
bool operator!=(const BindingInfo& a, const BindingInfo& b) {
|
||||
bool operator!=(const BindingInfo& a, const BindingInfo& b) {
|
||||
if (a.visibility != b.visibility || a.bindingType != b.bindingType) {
|
||||
return true;
|
||||
}
|
||||
|
@ -302,20 +296,20 @@ namespace dawn::native {
|
|||
return false;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
bool IsBufferBinding(const BindGroupLayoutEntry& binding) {
|
||||
bool IsBufferBinding(const BindGroupLayoutEntry& binding) {
|
||||
return binding.buffer.type != wgpu::BufferBindingType::Undefined;
|
||||
}
|
||||
}
|
||||
|
||||
bool BindingHasDynamicOffset(const BindGroupLayoutEntry& binding) {
|
||||
bool BindingHasDynamicOffset(const BindGroupLayoutEntry& binding) {
|
||||
if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
|
||||
return binding.buffer.hasDynamicOffset;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
BindingInfo CreateBindGroupLayoutInfo(const BindGroupLayoutEntry& binding) {
|
||||
BindingInfo CreateBindGroupLayoutInfo(const BindGroupLayoutEntry& binding) {
|
||||
BindingInfo bindingInfo;
|
||||
bindingInfo.binding = BindingNumber(binding.binding);
|
||||
bindingInfo.visibility = binding.visibility;
|
||||
|
@ -349,9 +343,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return bindingInfo;
|
||||
}
|
||||
}
|
||||
|
||||
bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
|
||||
bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
|
||||
const bool aIsBuffer = IsBufferBinding(a);
|
||||
const bool bIsBuffer = IsBufferBinding(b);
|
||||
if (aIsBuffer != bIsBuffer) {
|
||||
|
@ -419,8 +413,7 @@ namespace dawn::native {
|
|||
return aInfo.storageTexture.access < bInfo.storageTexture.access;
|
||||
}
|
||||
if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) {
|
||||
return aInfo.storageTexture.viewDimension <
|
||||
bInfo.storageTexture.viewDimension;
|
||||
return aInfo.storageTexture.viewDimension < bInfo.storageTexture.viewDimension;
|
||||
}
|
||||
if (aInfo.storageTexture.format != bInfo.storageTexture.format) {
|
||||
return aInfo.storageTexture.format < bInfo.storageTexture.format;
|
||||
|
@ -430,11 +423,11 @@ namespace dawn::native {
|
|||
break;
|
||||
}
|
||||
return a.binding < b.binding;
|
||||
}
|
||||
}
|
||||
|
||||
// This is a utility function to help ASSERT that the BGL-binding comparator places buffers
|
||||
// first.
|
||||
bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
|
||||
// This is a utility function to help ASSERT that the BGL-binding comparator places buffers
|
||||
// first.
|
||||
bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
|
||||
BindingIndex lastBufferIndex{0};
|
||||
BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
|
||||
for (BindingIndex i{0}; i < bindings.size(); ++i) {
|
||||
|
@ -448,13 +441,13 @@ namespace dawn::native {
|
|||
// If there are no buffers, then |lastBufferIndex| is initialized to 0 and
|
||||
// |firstNonBufferIndex| gets set to 0.
|
||||
return firstNonBufferIndex >= lastBufferIndex;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace
|
||||
|
||||
// BindGroupLayoutBase
|
||||
// BindGroupLayoutBase
|
||||
|
||||
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
|
||||
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken,
|
||||
ApiObjectBase::UntrackedByDeviceTag tag)
|
||||
|
@ -482,59 +475,58 @@ namespace dawn::native {
|
|||
}
|
||||
ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), GetBindingCount()}));
|
||||
ASSERT(mBindingInfo.size() <= kMaxBindingsPerPipelineLayoutTyped);
|
||||
}
|
||||
}
|
||||
|
||||
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
|
||||
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken)
|
||||
: BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken, kUntrackedByDevice) {
|
||||
TrackInDevice();
|
||||
}
|
||||
}
|
||||
|
||||
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag) {
|
||||
}
|
||||
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag) {}
|
||||
|
||||
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
|
||||
BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
|
||||
: ApiObjectBase(device, kLabelNotImplemented) {
|
||||
TrackInDevice();
|
||||
}
|
||||
}
|
||||
|
||||
BindGroupLayoutBase::~BindGroupLayoutBase() = default;
|
||||
BindGroupLayoutBase::~BindGroupLayoutBase() = default;
|
||||
|
||||
void BindGroupLayoutBase::DestroyImpl() {
|
||||
void BindGroupLayoutBase::DestroyImpl() {
|
||||
if (IsCachedReference()) {
|
||||
// Do not uncache the actual cached object if we are a blueprint.
|
||||
GetDevice()->UncacheBindGroupLayout(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
BindGroupLayoutBase* BindGroupLayoutBase::MakeError(DeviceBase* device) {
|
||||
// static
|
||||
BindGroupLayoutBase* BindGroupLayoutBase::MakeError(DeviceBase* device) {
|
||||
return new BindGroupLayoutBase(device, ObjectBase::kError);
|
||||
}
|
||||
}
|
||||
|
||||
ObjectType BindGroupLayoutBase::GetType() const {
|
||||
ObjectType BindGroupLayoutBase::GetType() const {
|
||||
return ObjectType::BindGroupLayout;
|
||||
}
|
||||
}
|
||||
|
||||
const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
|
||||
const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
|
||||
ASSERT(!IsError());
|
||||
return mBindingMap;
|
||||
}
|
||||
}
|
||||
|
||||
bool BindGroupLayoutBase::HasBinding(BindingNumber bindingNumber) const {
|
||||
bool BindGroupLayoutBase::HasBinding(BindingNumber bindingNumber) const {
|
||||
return mBindingMap.count(bindingNumber) != 0;
|
||||
}
|
||||
}
|
||||
|
||||
BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
|
||||
BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
|
||||
ASSERT(!IsError());
|
||||
const auto& it = mBindingMap.find(bindingNumber);
|
||||
ASSERT(it != mBindingMap.end());
|
||||
return it->second;
|
||||
}
|
||||
}
|
||||
|
||||
size_t BindGroupLayoutBase::ComputeContentHash() {
|
||||
size_t BindGroupLayoutBase::ComputeContentHash() {
|
||||
ObjectContentHasher recorder;
|
||||
recorder.Record(mPipelineCompatibilityToken);
|
||||
|
||||
|
@ -552,50 +544,50 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return recorder.GetContentHash();
|
||||
}
|
||||
}
|
||||
|
||||
bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
|
||||
bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
|
||||
const BindGroupLayoutBase* b) const {
|
||||
return a->IsLayoutEqual(b);
|
||||
}
|
||||
}
|
||||
|
||||
BindingIndex BindGroupLayoutBase::GetBindingCount() const {
|
||||
BindingIndex BindGroupLayoutBase::GetBindingCount() const {
|
||||
return mBindingInfo.size();
|
||||
}
|
||||
}
|
||||
|
||||
BindingIndex BindGroupLayoutBase::GetBufferCount() const {
|
||||
BindingIndex BindGroupLayoutBase::GetBufferCount() const {
|
||||
return BindingIndex(mBindingCounts.bufferCount);
|
||||
}
|
||||
}
|
||||
|
||||
BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
|
||||
BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
|
||||
// This is a binding index because dynamic buffers are packed at the front of the binding
|
||||
// info.
|
||||
return static_cast<BindingIndex>(mBindingCounts.dynamicStorageBufferCount +
|
||||
mBindingCounts.dynamicUniformBufferCount);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
|
||||
uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
|
||||
return mBindingCounts.unverifiedBufferCount;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t BindGroupLayoutBase::GetExternalTextureBindingCount() const {
|
||||
uint32_t BindGroupLayoutBase::GetExternalTextureBindingCount() const {
|
||||
return mExternalTextureBindingExpansionMap.size();
|
||||
}
|
||||
}
|
||||
|
||||
const BindingCounts& BindGroupLayoutBase::GetBindingCountInfo() const {
|
||||
const BindingCounts& BindGroupLayoutBase::GetBindingCountInfo() const {
|
||||
return mBindingCounts;
|
||||
}
|
||||
}
|
||||
|
||||
const ExternalTextureBindingExpansionMap&
|
||||
BindGroupLayoutBase::GetExternalTextureBindingExpansionMap() const {
|
||||
const ExternalTextureBindingExpansionMap&
|
||||
BindGroupLayoutBase::GetExternalTextureBindingExpansionMap() const {
|
||||
return mExternalTextureBindingExpansionMap;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t BindGroupLayoutBase::GetUnexpandedBindingCount() const {
|
||||
uint32_t BindGroupLayoutBase::GetUnexpandedBindingCount() const {
|
||||
return mUnexpandedBindingCount;
|
||||
}
|
||||
}
|
||||
|
||||
bool BindGroupLayoutBase::IsLayoutEqual(const BindGroupLayoutBase* other,
|
||||
bool BindGroupLayoutBase::IsLayoutEqual(const BindGroupLayoutBase* other,
|
||||
bool excludePipelineCompatibiltyToken) const {
|
||||
if (!excludePipelineCompatibiltyToken &&
|
||||
GetPipelineCompatibilityToken() != other->GetPipelineCompatibilityToken()) {
|
||||
|
@ -610,13 +602,13 @@ namespace dawn::native {
|
|||
}
|
||||
}
|
||||
return mBindingMap == other->mBindingMap;
|
||||
}
|
||||
}
|
||||
|
||||
PipelineCompatibilityToken BindGroupLayoutBase::GetPipelineCompatibilityToken() const {
|
||||
PipelineCompatibilityToken BindGroupLayoutBase::GetPipelineCompatibilityToken() const {
|
||||
return mPipelineCompatibilityToken;
|
||||
}
|
||||
}
|
||||
|
||||
size_t BindGroupLayoutBase::GetBindingDataSize() const {
|
||||
size_t BindGroupLayoutBase::GetBindingDataSize() const {
|
||||
// | ------ buffer-specific ----------| ------------ object pointers -------------|
|
||||
// | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
|
||||
// Followed by:
|
||||
|
@ -624,14 +616,13 @@ namespace dawn::native {
|
|||
// |-uint64_t[mUnverifiedBufferCount]-|
|
||||
size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
|
||||
ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
|
||||
size_t bufferSizeArrayStart =
|
||||
Align(objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>),
|
||||
sizeof(uint64_t));
|
||||
size_t bufferSizeArrayStart = Align(
|
||||
objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>), sizeof(uint64_t));
|
||||
ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
|
||||
return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
|
||||
}
|
||||
}
|
||||
|
||||
BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
|
||||
BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
|
||||
void* dataStart) const {
|
||||
BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
|
||||
auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBindingCounts.bufferCount);
|
||||
|
@ -645,9 +636,9 @@ namespace dawn::native {
|
|||
return {{bufferData, GetBufferCount()},
|
||||
{bindings, GetBindingCount()},
|
||||
{unverifiedBufferSizes, mBindingCounts.unverifiedBufferCount}};
|
||||
}
|
||||
}
|
||||
|
||||
bool BindGroupLayoutBase::IsStorageBufferBinding(BindingIndex bindingIndex) const {
|
||||
bool BindGroupLayoutBase::IsStorageBufferBinding(BindingIndex bindingIndex) const {
|
||||
ASSERT(bindingIndex < GetBufferCount());
|
||||
switch (GetBindingInfo(bindingIndex).buffer.type) {
|
||||
case wgpu::BufferBindingType::Uniform:
|
||||
|
@ -660,9 +651,9 @@ namespace dawn::native {
|
|||
break;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
std::string BindGroupLayoutBase::EntriesToString() const {
|
||||
std::string BindGroupLayoutBase::EntriesToString() const {
|
||||
std::string entries = "[";
|
||||
std::string sep = "";
|
||||
const BindGroupLayoutBase::BindingMap& bindingMap = GetBindingMap();
|
||||
|
@ -673,6 +664,6 @@ namespace dawn::native {
|
|||
}
|
||||
entries += "]";
|
||||
return entries;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -34,24 +34,23 @@
|
|||
#include "dawn/native/dawn_platform.h"
|
||||
|
||||
namespace dawn::native {
|
||||
// TODO(dawn:1082): Minor optimization to use BindingIndex instead of BindingNumber
|
||||
struct ExternalTextureBindingExpansion {
|
||||
// TODO(dawn:1082): Minor optimization to use BindingIndex instead of BindingNumber
|
||||
struct ExternalTextureBindingExpansion {
|
||||
BindingNumber plane0;
|
||||
BindingNumber plane1;
|
||||
BindingNumber params;
|
||||
};
|
||||
};
|
||||
|
||||
using ExternalTextureBindingExpansionMap =
|
||||
std::map<BindingNumber, ExternalTextureBindingExpansion>;
|
||||
using ExternalTextureBindingExpansionMap = std::map<BindingNumber, ExternalTextureBindingExpansion>;
|
||||
|
||||
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
|
||||
MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
bool allowInternalBinding = false);
|
||||
|
||||
// Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
|
||||
// These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
|
||||
// into a packed range of |BindingIndex| integers.
|
||||
class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
|
||||
// Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
|
||||
// These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
|
||||
// into a packed range of |BindingIndex| integers.
|
||||
class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
|
||||
public:
|
||||
BindGroupLayoutBase(DeviceBase* device,
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
|
@ -161,11 +160,10 @@ namespace dawn::native {
|
|||
ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
|
||||
|
||||
// Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
|
||||
const PipelineCompatibilityToken mPipelineCompatibilityToken =
|
||||
PipelineCompatibilityToken(0);
|
||||
const PipelineCompatibilityToken mPipelineCompatibilityToken = PipelineCompatibilityToken(0);
|
||||
|
||||
uint32_t mUnexpandedBindingCount;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -25,12 +25,12 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
// Keeps track of the dirty bind groups so they can be lazily applied when we know the
|
||||
// pipeline state or it changes.
|
||||
// |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
|
||||
// in other backends.
|
||||
template <bool CanInheritBindGroups, typename DynamicOffset>
|
||||
class BindGroupTrackerBase {
|
||||
// Keeps track of the dirty bind groups so they can be lazily applied when we know the
|
||||
// pipeline state or it changes.
|
||||
// |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
|
||||
// in other backends.
|
||||
template <bool CanInheritBindGroups, typename DynamicOffset>
|
||||
class BindGroupTrackerBase {
|
||||
public:
|
||||
void OnSetBindGroup(BindGroupIndex index,
|
||||
BindGroupBase* bindGroup,
|
||||
|
@ -58,9 +58,7 @@ namespace dawn::native {
|
|||
SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
|
||||
}
|
||||
|
||||
void OnSetPipeline(PipelineBase* pipeline) {
|
||||
mPipelineLayout = pipeline->GetLayout();
|
||||
}
|
||||
void OnSetPipeline(PipelineBase* pipeline) { mPipelineLayout = pipeline->GetLayout(); }
|
||||
|
||||
protected:
|
||||
// The Derived class should call this before it applies bind groups.
|
||||
|
@ -137,7 +135,7 @@ namespace dawn::native {
|
|||
memcpy(data, dynamicOffsets, sizeof(uint32_t) * dynamicOffsetCount);
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
|
||||
void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
|
||||
bindingCounts->totalCount += 1;
|
||||
|
||||
uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr;
|
||||
|
@ -71,9 +71,9 @@ namespace dawn::native {
|
|||
for (SingleShaderStage stage : IterateStages(entry.visibility)) {
|
||||
++(bindingCounts->perStage[stage].*perStageBindingCountMember);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
|
||||
void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
|
||||
bindingCounts->totalCount += rhs.totalCount;
|
||||
bindingCounts->bufferCount += rhs.bufferCount;
|
||||
bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
|
||||
|
@ -84,18 +84,16 @@ namespace dawn::native {
|
|||
bindingCounts->perStage[stage].sampledTextureCount +=
|
||||
rhs.perStage[stage].sampledTextureCount;
|
||||
bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
|
||||
bindingCounts->perStage[stage].storageBufferCount +=
|
||||
rhs.perStage[stage].storageBufferCount;
|
||||
bindingCounts->perStage[stage].storageBufferCount += rhs.perStage[stage].storageBufferCount;
|
||||
bindingCounts->perStage[stage].storageTextureCount +=
|
||||
rhs.perStage[stage].storageTextureCount;
|
||||
bindingCounts->perStage[stage].uniformBufferCount +=
|
||||
rhs.perStage[stage].uniformBufferCount;
|
||||
bindingCounts->perStage[stage].uniformBufferCount += rhs.perStage[stage].uniformBufferCount;
|
||||
bindingCounts->perStage[stage].externalTextureCount +=
|
||||
rhs.perStage[stage].externalTextureCount;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
|
||||
MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
|
||||
DAWN_INVALID_IF(
|
||||
bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout,
|
||||
"The number of dynamic uniform buffers (%u) exceeds the maximum per-pipeline-layout "
|
||||
|
@ -110,8 +108,7 @@ namespace dawn::native {
|
|||
|
||||
for (SingleShaderStage stage : IterateStages(kAllStages)) {
|
||||
DAWN_INVALID_IF(
|
||||
bindingCounts.perStage[stage].sampledTextureCount >
|
||||
kMaxSampledTexturesPerShaderStage,
|
||||
bindingCounts.perStage[stage].sampledTextureCount > kMaxSampledTexturesPerShaderStage,
|
||||
"The number of sampled textures (%u) in the %s stage exceeds the maximum "
|
||||
"per-stage limit (%u).",
|
||||
bindingCounts.perStage[stage].sampledTextureCount, stage,
|
||||
|
@ -119,8 +116,7 @@ namespace dawn::native {
|
|||
|
||||
// The per-stage number of external textures is bound by the maximum sampled textures
|
||||
// per stage.
|
||||
DAWN_INVALID_IF(
|
||||
bindingCounts.perStage[stage].externalTextureCount >
|
||||
DAWN_INVALID_IF(bindingCounts.perStage[stage].externalTextureCount >
|
||||
kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
|
||||
"The number of external textures (%u) in the %s stage exceeds the maximum "
|
||||
"per-stage limit (%u).",
|
||||
|
@ -152,8 +148,7 @@ namespace dawn::native {
|
|||
"The combination of samplers (%u) and external textures (%u) in the %s stage "
|
||||
"exceeds the maximum per-stage limit (%u).",
|
||||
bindingCounts.perStage[stage].samplerCount,
|
||||
bindingCounts.perStage[stage].externalTextureCount, stage,
|
||||
kMaxSamplersPerShaderStage);
|
||||
bindingCounts.perStage[stage].externalTextureCount, stage, kMaxSamplersPerShaderStage);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
|
||||
|
@ -163,8 +158,7 @@ namespace dawn::native {
|
|||
kMaxStorageBuffersPerShaderStage);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
bindingCounts.perStage[stage].storageTextureCount >
|
||||
kMaxStorageTexturesPerShaderStage,
|
||||
bindingCounts.perStage[stage].storageTextureCount > kMaxStorageTexturesPerShaderStage,
|
||||
"The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
|
||||
"limit (%u).",
|
||||
bindingCounts.perStage[stage].storageTextureCount, stage,
|
||||
|
@ -190,6 +184,6 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -29,29 +29,29 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
// Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
|
||||
static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
|
||||
// Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
|
||||
static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
|
||||
kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout;
|
||||
|
||||
static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
|
||||
static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
|
||||
BindingIndex(kMaxDynamicBuffersPerPipelineLayout);
|
||||
|
||||
// Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
|
||||
// API. There should never be more bindings than the max per stage, for each stage.
|
||||
static constexpr uint32_t kMaxBindingsPerPipelineLayout =
|
||||
// Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
|
||||
// API. There should never be more bindings than the max per stage, for each stage.
|
||||
static constexpr uint32_t kMaxBindingsPerPipelineLayout =
|
||||
3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage +
|
||||
kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage +
|
||||
kMaxUniformBuffersPerShaderStage);
|
||||
|
||||
static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
|
||||
static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
|
||||
BindingIndex(kMaxBindingsPerPipelineLayout);
|
||||
|
||||
// TODO(enga): Figure out a good number for this.
|
||||
static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
|
||||
// TODO(enga): Figure out a good number for this.
|
||||
static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
|
||||
|
||||
enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture };
|
||||
enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture };
|
||||
|
||||
struct BindingInfo {
|
||||
struct BindingInfo {
|
||||
BindingNumber binding;
|
||||
wgpu::ShaderStage visibility;
|
||||
|
||||
|
@ -62,37 +62,37 @@ namespace dawn::native {
|
|||
SamplerBindingLayout sampler;
|
||||
TextureBindingLayout texture;
|
||||
StorageTextureBindingLayout storageTexture;
|
||||
};
|
||||
};
|
||||
|
||||
struct BindingSlot {
|
||||
struct BindingSlot {
|
||||
BindGroupIndex group;
|
||||
BindingNumber binding;
|
||||
};
|
||||
};
|
||||
|
||||
struct PerStageBindingCounts {
|
||||
struct PerStageBindingCounts {
|
||||
uint32_t sampledTextureCount;
|
||||
uint32_t samplerCount;
|
||||
uint32_t storageBufferCount;
|
||||
uint32_t storageTextureCount;
|
||||
uint32_t uniformBufferCount;
|
||||
uint32_t externalTextureCount;
|
||||
};
|
||||
};
|
||||
|
||||
struct BindingCounts {
|
||||
struct BindingCounts {
|
||||
uint32_t totalCount;
|
||||
uint32_t bufferCount;
|
||||
uint32_t unverifiedBufferCount; // Buffers with minimum buffer size unspecified
|
||||
uint32_t dynamicUniformBufferCount;
|
||||
uint32_t dynamicStorageBufferCount;
|
||||
PerStage<PerStageBindingCounts> perStage;
|
||||
};
|
||||
};
|
||||
|
||||
void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
|
||||
void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
|
||||
MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
|
||||
void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
|
||||
void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
|
||||
MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
|
||||
|
||||
// For buffer size validation
|
||||
using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
|
||||
// For buffer size validation
|
||||
using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -21,52 +21,51 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
CachedBlob::CachedBlob(size_t size) {
|
||||
CachedBlob::CachedBlob(size_t size) {
|
||||
if (size != 0) {
|
||||
Reset(size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool CachedBlob::Empty() const {
|
||||
bool CachedBlob::Empty() const {
|
||||
return mSize == 0;
|
||||
}
|
||||
}
|
||||
|
||||
const uint8_t* CachedBlob::Data() const {
|
||||
const uint8_t* CachedBlob::Data() const {
|
||||
return mData.get();
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t* CachedBlob::Data() {
|
||||
uint8_t* CachedBlob::Data() {
|
||||
return mData.get();
|
||||
}
|
||||
}
|
||||
|
||||
size_t CachedBlob::Size() const {
|
||||
size_t CachedBlob::Size() const {
|
||||
return mSize;
|
||||
}
|
||||
}
|
||||
|
||||
void CachedBlob::Reset(size_t size) {
|
||||
void CachedBlob::Reset(size_t size) {
|
||||
mSize = size;
|
||||
mData = std::make_unique<uint8_t[]>(size);
|
||||
}
|
||||
}
|
||||
|
||||
BlobCache::BlobCache(dawn::platform::CachingInterface* cachingInterface)
|
||||
: mCache(cachingInterface) {
|
||||
}
|
||||
BlobCache::BlobCache(dawn::platform::CachingInterface* cachingInterface)
|
||||
: mCache(cachingInterface) {}
|
||||
|
||||
CachedBlob BlobCache::Load(const CacheKey& key) {
|
||||
CachedBlob BlobCache::Load(const CacheKey& key) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
return LoadInternal(key);
|
||||
}
|
||||
}
|
||||
|
||||
void BlobCache::Store(const CacheKey& key, size_t valueSize, const void* value) {
|
||||
void BlobCache::Store(const CacheKey& key, size_t valueSize, const void* value) {
|
||||
std::lock_guard<std::mutex> lock(mMutex);
|
||||
StoreInternal(key, valueSize, value);
|
||||
}
|
||||
}
|
||||
|
||||
void BlobCache::Store(const CacheKey& key, const CachedBlob& value) {
|
||||
void BlobCache::Store(const CacheKey& key, const CachedBlob& value) {
|
||||
Store(key, value.Size(), value.Data());
|
||||
}
|
||||
}
|
||||
|
||||
CachedBlob BlobCache::LoadInternal(const CacheKey& key) {
|
||||
CachedBlob BlobCache::LoadInternal(const CacheKey& key) {
|
||||
CachedBlob result;
|
||||
if (mCache == nullptr) {
|
||||
return result;
|
||||
|
@ -79,15 +78,15 @@ namespace dawn::native {
|
|||
ASSERT(expectedSize == actualSize);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
void BlobCache::StoreInternal(const CacheKey& key, size_t valueSize, const void* value) {
|
||||
void BlobCache::StoreInternal(const CacheKey& key, size_t valueSize, const void* value) {
|
||||
ASSERT(value != nullptr);
|
||||
ASSERT(valueSize > 0);
|
||||
if (mCache == nullptr) {
|
||||
return;
|
||||
}
|
||||
mCache->StoreData(nullptr, key.data(), key.size(), value, valueSize);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -19,16 +19,16 @@
|
|||
#include <mutex>
|
||||
|
||||
namespace dawn::platform {
|
||||
class CachingInterface;
|
||||
class CachingInterface;
|
||||
}
|
||||
|
||||
namespace dawn::native {
|
||||
|
||||
class BlobCache;
|
||||
class CacheKey;
|
||||
class InstanceBase;
|
||||
class BlobCache;
|
||||
class CacheKey;
|
||||
class InstanceBase;
|
||||
|
||||
class CachedBlob {
|
||||
class CachedBlob {
|
||||
public:
|
||||
explicit CachedBlob(size_t size = 0);
|
||||
|
||||
|
@ -41,11 +41,11 @@ namespace dawn::native {
|
|||
private:
|
||||
std::unique_ptr<uint8_t[]> mData = nullptr;
|
||||
size_t mSize = 0;
|
||||
};
|
||||
};
|
||||
|
||||
// This class should always be thread-safe because it may be called asynchronously. Its purpose
|
||||
// is to wrap the CachingInterface provided via a platform.
|
||||
class BlobCache {
|
||||
// This class should always be thread-safe because it may be called asynchronously. Its purpose
|
||||
// is to wrap the CachingInterface provided via a platform.
|
||||
class BlobCache {
|
||||
public:
|
||||
explicit BlobCache(dawn::platform::CachingInterface* cachingInterface = nullptr);
|
||||
|
||||
|
@ -69,7 +69,7 @@ namespace dawn::native {
|
|||
// call, but this might be unnecessary. This class just passes nullptr for those calls
|
||||
// right now. Eventually we can just change the interface to be more generic.
|
||||
dawn::platform::CachingInterface* mCache;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
|
||||
BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
|
||||
ASSERT(IsPowerOfTwo(maxSize));
|
||||
|
||||
mFreeLists.resize(Log2(mMaxBlockSize) + 1);
|
||||
|
@ -27,19 +27,19 @@ namespace dawn::native {
|
|||
// Insert the level0 free block.
|
||||
mRoot = new BuddyBlock(maxSize, /*offset*/ 0);
|
||||
mFreeLists[0] = {mRoot};
|
||||
}
|
||||
}
|
||||
|
||||
BuddyAllocator::~BuddyAllocator() {
|
||||
BuddyAllocator::~BuddyAllocator() {
|
||||
if (mRoot) {
|
||||
DeleteBlock(mRoot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const {
|
||||
uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const {
|
||||
return ComputeNumOfFreeBlocks(mRoot);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const {
|
||||
uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const {
|
||||
if (block->mState == BlockState::Free) {
|
||||
return 1;
|
||||
} else if (block->mState == BlockState::Split) {
|
||||
|
@ -47,16 +47,16 @@ namespace dawn::native {
|
|||
ComputeNumOfFreeBlocks(block->split.pLeft->pBuddy);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const {
|
||||
uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const {
|
||||
// Every level in the buddy system can be indexed by order-n where n = log2(blockSize).
|
||||
// However, mFreeList zero-indexed by level.
|
||||
// For example, blockSize=4 is Level1 if MAX_BLOCK is 8.
|
||||
return Log2(mMaxBlockSize) - Log2(blockSize);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel,
|
||||
uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel,
|
||||
uint64_t alignment) const {
|
||||
ASSERT(IsPowerOfTwo(alignment));
|
||||
// The current level is the level that corresponds to the allocation size. The free list may
|
||||
|
@ -89,13 +89,13 @@ namespace dawn::native {
|
|||
}
|
||||
}
|
||||
return kInvalidOffset; // No free block exists at any level.
|
||||
}
|
||||
}
|
||||
|
||||
// Inserts existing free block into the free-list.
|
||||
// Called by allocate upon splitting to insert a child block into a free-list.
|
||||
// Note: Always insert into the head of the free-list. As when a larger free block at a lower
|
||||
// level was split, there were no smaller free blocks at a higher level to allocate.
|
||||
void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
|
||||
// Inserts existing free block into the free-list.
|
||||
// Called by allocate upon splitting to insert a child block into a free-list.
|
||||
// Note: Always insert into the head of the free-list. As when a larger free block at a lower
|
||||
// level was split, there were no smaller free blocks at a higher level to allocate.
|
||||
void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
|
||||
ASSERT(block->mState == BlockState::Free);
|
||||
|
||||
// Inserted block is now the front (no prev).
|
||||
|
@ -111,9 +111,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
mFreeLists[level].head = block;
|
||||
}
|
||||
}
|
||||
|
||||
void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
|
||||
void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
|
||||
ASSERT(block->mState == BlockState::Free);
|
||||
|
||||
if (mFreeLists[level].head == block) {
|
||||
|
@ -134,9 +134,9 @@ namespace dawn::native {
|
|||
pNext->free.pPrev = pPrev;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
|
||||
uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
|
||||
if (allocationSize == 0 || allocationSize > mMaxBlockSize) {
|
||||
return kInvalidOffset;
|
||||
}
|
||||
|
@ -197,9 +197,9 @@ namespace dawn::native {
|
|||
currBlock->mState = BlockState::Allocated;
|
||||
|
||||
return currBlock->mOffset;
|
||||
}
|
||||
}
|
||||
|
||||
void BuddyAllocator::Deallocate(uint64_t offset) {
|
||||
void BuddyAllocator::Deallocate(uint64_t offset) {
|
||||
BuddyBlock* curr = mRoot;
|
||||
|
||||
// TODO(crbug.com/dawn/827): Optimize de-allocation.
|
||||
|
@ -247,10 +247,10 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
InsertFreeBlock(curr, currBlockLevel);
|
||||
}
|
||||
}
|
||||
|
||||
// Helper which deletes a block in the tree recursively (post-order).
|
||||
void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
|
||||
// Helper which deletes a block in the tree recursively (post-order).
|
||||
void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
|
||||
ASSERT(block != nullptr);
|
||||
|
||||
if (block->mState == BlockState::Split) {
|
||||
|
@ -259,6 +259,6 @@ namespace dawn::native {
|
|||
DeleteBlock(block->split.pLeft);
|
||||
}
|
||||
delete block;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -22,18 +22,18 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
// Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
|
||||
// Memory is split into halves until just large enough to fit to the request. This
|
||||
// requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
|
||||
// returning the starting offset whose size is guaranteed to be greater than or equal to the
|
||||
// allocation size. To deallocate, the same offset is used to find the corresponding block.
|
||||
//
|
||||
// Internally, it manages a free list to track free blocks in a full binary tree.
|
||||
// Every index in the free list corresponds to a level in the tree. That level also determines
|
||||
// the size of the block to be used to satisfy the request. The first level (index=0) represents
|
||||
// the root whose size is also called the max block size.
|
||||
//
|
||||
class BuddyAllocator {
|
||||
// Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
|
||||
// Memory is split into halves until just large enough to fit to the request. This
|
||||
// requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
|
||||
// returning the starting offset whose size is guaranteed to be greater than or equal to the
|
||||
// allocation size. To deallocate, the same offset is used to find the corresponding block.
|
||||
//
|
||||
// Internally, it manages a free list to track free blocks in a full binary tree.
|
||||
// Every index in the free list corresponds to a level in the tree. That level also determines
|
||||
// the size of the block to be used to satisfy the request. The first level (index=0) represents
|
||||
// the root whose size is also called the max block size.
|
||||
//
|
||||
class BuddyAllocator {
|
||||
public:
|
||||
explicit BuddyAllocator(uint64_t maxSize);
|
||||
~BuddyAllocator();
|
||||
|
@ -110,7 +110,7 @@ namespace dawn::native {
|
|||
// List of linked-lists of free blocks where the index is a level that
|
||||
// corresponds to a power-of-two sized block.
|
||||
std::vector<BlockList> mFreeLists;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
|
||||
BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
|
||||
uint64_t memoryBlockSize,
|
||||
ResourceHeapAllocator* heapAllocator)
|
||||
: mMemoryBlockSize(memoryBlockSize),
|
||||
|
@ -32,14 +32,14 @@ namespace dawn::native {
|
|||
ASSERT(maxSystemSize % mMemoryBlockSize == 0);
|
||||
|
||||
mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
|
||||
uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
|
||||
ASSERT(offset != BuddyAllocator::kInvalidOffset);
|
||||
return offset / mMemoryBlockSize;
|
||||
}
|
||||
}
|
||||
|
||||
ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
|
||||
ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
|
||||
uint64_t alignment) {
|
||||
ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
|
||||
|
||||
|
@ -83,11 +83,11 @@ namespace dawn::native {
|
|||
// Allocation offset is always local to the memory.
|
||||
const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
|
||||
|
||||
return ResourceMemoryAllocation{
|
||||
info, memoryOffset, mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
|
||||
}
|
||||
return ResourceMemoryAllocation{info, memoryOffset,
|
||||
mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
|
||||
}
|
||||
|
||||
void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
|
||||
void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
|
||||
const AllocationInfo info = allocation.GetInfo();
|
||||
|
||||
ASSERT(info.mMethod == AllocationMethod::kSubAllocated);
|
||||
|
@ -103,13 +103,13 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
|
||||
uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
|
||||
return mMemoryBlockSize;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
|
||||
uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
|
||||
uint64_t count = 0;
|
||||
for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
|
||||
if (allocation.refcount > 0) {
|
||||
|
@ -117,6 +117,6 @@ namespace dawn::native {
|
|||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -24,28 +24,27 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
class ResourceHeapAllocator;
|
||||
class ResourceHeapAllocator;
|
||||
|
||||
// BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
|
||||
// memory created by MemoryAllocator clients. It creates a very large buddy system
|
||||
// where backing device memory blocks equal a specified level in the system.
|
||||
//
|
||||
// Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
|
||||
// memory index and should the memory not exist, it is created. If two sub-allocations share the
|
||||
// same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
|
||||
// release the other prematurely.
|
||||
//
|
||||
// The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
|
||||
// It should also outlive all the resources that are in the buddy allocator.
|
||||
class BuddyMemoryAllocator {
|
||||
// BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
|
||||
// memory created by MemoryAllocator clients. It creates a very large buddy system
|
||||
// where backing device memory blocks equal a specified level in the system.
|
||||
//
|
||||
// Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
|
||||
// memory index and should the memory not exist, it is created. If two sub-allocations share the
|
||||
// same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
|
||||
// release the other prematurely.
|
||||
//
|
||||
// The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
|
||||
// It should also outlive all the resources that are in the buddy allocator.
|
||||
class BuddyMemoryAllocator {
|
||||
public:
|
||||
BuddyMemoryAllocator(uint64_t maxSystemSize,
|
||||
uint64_t memoryBlockSize,
|
||||
ResourceHeapAllocator* heapAllocator);
|
||||
~BuddyMemoryAllocator() = default;
|
||||
|
||||
ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize,
|
||||
uint64_t alignment);
|
||||
ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize, uint64_t alignment);
|
||||
void Deallocate(const ResourceMemoryAllocation& allocation);
|
||||
|
||||
uint64_t GetMemoryBlockSize() const;
|
||||
|
@ -67,7 +66,7 @@ namespace dawn::native {
|
|||
};
|
||||
|
||||
std::vector<TrackedSubAllocations> mTrackedSubAllocations;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -33,11 +33,9 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
namespace {
|
||||
struct MapRequestTask : QueueBase::TaskInFlight {
|
||||
MapRequestTask(Ref<BufferBase> buffer, MapRequestID id)
|
||||
: buffer(std::move(buffer)), id(id) {
|
||||
}
|
||||
namespace {
|
||||
struct MapRequestTask : QueueBase::TaskInFlight {
|
||||
MapRequestTask(Ref<BufferBase> buffer, MapRequestID id) : buffer(std::move(buffer)), id(id) {}
|
||||
void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
|
||||
TRACE_EVENT1(platform, General, "Buffer::TaskInFlight::Finished", "serial",
|
||||
uint64_t(serial));
|
||||
|
@ -51,9 +49,9 @@ namespace dawn::native {
|
|||
private:
|
||||
Ref<BufferBase> buffer;
|
||||
MapRequestID id;
|
||||
};
|
||||
};
|
||||
|
||||
class ErrorBuffer final : public BufferBase {
|
||||
class ErrorBuffer final : public BufferBase {
|
||||
public:
|
||||
ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
|
||||
: BufferBase(device, descriptor, ObjectBase::kError) {
|
||||
|
@ -61,8 +59,7 @@ namespace dawn::native {
|
|||
// Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
|
||||
// is invalid, and on 32bit systems we should avoid a narrowing conversion that
|
||||
// would make size = 1 << 32 + 1 allocate one byte.
|
||||
bool isValidSize =
|
||||
descriptor->size != 0 &&
|
||||
bool isValidSize = descriptor->size != 0 &&
|
||||
descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
|
||||
|
||||
if (isValidSize) {
|
||||
|
@ -76,32 +73,24 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
private:
|
||||
bool IsCPUWritableAtCreation() const override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
bool IsCPUWritableAtCreation() const override { UNREACHABLE(); }
|
||||
|
||||
MaybeError MapAtCreationImpl() override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
MaybeError MapAtCreationImpl() override { UNREACHABLE(); }
|
||||
|
||||
MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void* GetMappedPointerImpl() override {
|
||||
return mFakeMappedData.get();
|
||||
}
|
||||
void* GetMappedPointerImpl() override { return mFakeMappedData.get(); }
|
||||
|
||||
void UnmapImpl() override {
|
||||
mFakeMappedData.reset();
|
||||
}
|
||||
void UnmapImpl() override { mFakeMappedData.reset(); }
|
||||
|
||||
std::unique_ptr<uint8_t[]> mFakeMappedData;
|
||||
};
|
||||
};
|
||||
|
||||
} // anonymous namespace
|
||||
} // anonymous namespace
|
||||
|
||||
MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
|
||||
MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
|
||||
DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
|
||||
DAWN_TRY(ValidateBufferUsage(descriptor->usage));
|
||||
|
||||
|
@ -130,11 +119,11 @@ namespace dawn::native {
|
|||
descriptor->size);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
// Buffer
|
||||
// Buffer
|
||||
|
||||
BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
|
||||
BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
|
||||
: ApiObjectBase(device, descriptor->label),
|
||||
mSize(descriptor->size),
|
||||
mUsage(descriptor->usage),
|
||||
|
@ -164,9 +153,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
TrackInDevice();
|
||||
}
|
||||
}
|
||||
|
||||
BufferBase::BufferBase(DeviceBase* device,
|
||||
BufferBase::BufferBase(DeviceBase* device,
|
||||
const BufferDescriptor* descriptor,
|
||||
ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
|
||||
|
@ -175,18 +164,18 @@ namespace dawn::native {
|
|||
mMapOffset = 0;
|
||||
mMapSize = mSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BufferBase::BufferBase(DeviceBase* device, BufferState state)
|
||||
BufferBase::BufferBase(DeviceBase* device, BufferState state)
|
||||
: ApiObjectBase(device, kLabelNotImplemented), mState(state) {
|
||||
TrackInDevice();
|
||||
}
|
||||
}
|
||||
|
||||
BufferBase::~BufferBase() {
|
||||
BufferBase::~BufferBase() {
|
||||
ASSERT(mState == BufferState::Unmapped || mState == BufferState::Destroyed);
|
||||
}
|
||||
}
|
||||
|
||||
void BufferBase::DestroyImpl() {
|
||||
void BufferBase::DestroyImpl() {
|
||||
if (mState == BufferState::Mapped) {
|
||||
UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
|
||||
} else if (mState == BufferState::MappedAtCreation) {
|
||||
|
@ -197,39 +186,39 @@ namespace dawn::native {
|
|||
}
|
||||
}
|
||||
mState = BufferState::Destroyed;
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
|
||||
// static
|
||||
BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
|
||||
return new ErrorBuffer(device, descriptor);
|
||||
}
|
||||
}
|
||||
|
||||
ObjectType BufferBase::GetType() const {
|
||||
ObjectType BufferBase::GetType() const {
|
||||
return ObjectType::Buffer;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t BufferBase::GetSize() const {
|
||||
uint64_t BufferBase::GetSize() const {
|
||||
ASSERT(!IsError());
|
||||
return mSize;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t BufferBase::GetAllocatedSize() const {
|
||||
uint64_t BufferBase::GetAllocatedSize() const {
|
||||
ASSERT(!IsError());
|
||||
// The backend must initialize this value.
|
||||
ASSERT(mAllocatedSize != 0);
|
||||
return mAllocatedSize;
|
||||
}
|
||||
}
|
||||
|
||||
wgpu::BufferUsage BufferBase::GetUsage() const {
|
||||
wgpu::BufferUsage BufferBase::GetUsage() const {
|
||||
ASSERT(!IsError());
|
||||
return mUsage;
|
||||
}
|
||||
}
|
||||
|
||||
wgpu::BufferUsage BufferBase::GetUsageExternalOnly() const {
|
||||
wgpu::BufferUsage BufferBase::GetUsageExternalOnly() const {
|
||||
return GetUsage() & ~kAllInternalBufferUsages;
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError BufferBase::MapAtCreation() {
|
||||
MaybeError BufferBase::MapAtCreation() {
|
||||
DAWN_TRY(MapAtCreationInternal());
|
||||
|
||||
void* ptr;
|
||||
|
@ -258,9 +247,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError BufferBase::MapAtCreationInternal() {
|
||||
MaybeError BufferBase::MapAtCreationInternal() {
|
||||
ASSERT(!IsError());
|
||||
mMapOffset = 0;
|
||||
mMapSize = mSize;
|
||||
|
@ -279,8 +268,7 @@ namespace dawn::native {
|
|||
// is initialized.
|
||||
// TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
|
||||
// buffer so we don't create many small buffers.
|
||||
DAWN_TRY_ASSIGN(mStagingBuffer,
|
||||
GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
|
||||
DAWN_TRY_ASSIGN(mStagingBuffer, GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -289,9 +277,9 @@ namespace dawn::native {
|
|||
// staging buffer, we will have issues when we try to destroy the buffer.
|
||||
mState = BufferState::MappedAtCreation;
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
|
||||
MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
|
||||
ASSERT(!IsError());
|
||||
|
||||
switch (mState) {
|
||||
|
@ -304,9 +292,9 @@ namespace dawn::native {
|
|||
return {};
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
|
||||
void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
|
||||
ASSERT(!IsError());
|
||||
if (mMapCallback != nullptr && mapID == mLastMapID) {
|
||||
// Tag the callback as fired before firing it, otherwise it could fire a second time if
|
||||
|
@ -320,9 +308,9 @@ namespace dawn::native {
|
|||
callback(status, mMapUserdata);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BufferBase::APIMapAsync(wgpu::MapMode mode,
|
||||
void BufferBase::APIMapAsync(wgpu::MapMode mode,
|
||||
size_t offset,
|
||||
size_t size,
|
||||
WGPUBufferMapCallback callback,
|
||||
|
@ -357,23 +345,21 @@ namespace dawn::native {
|
|||
CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
|
||||
return;
|
||||
}
|
||||
std::unique_ptr<MapRequestTask> request =
|
||||
std::make_unique<MapRequestTask>(this, mLastMapID);
|
||||
std::unique_ptr<MapRequestTask> request = std::make_unique<MapRequestTask>(this, mLastMapID);
|
||||
TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Buffer::APIMapAsync", "serial",
|
||||
uint64_t(GetDevice()->GetPendingCommandSerial()));
|
||||
GetDevice()->GetQueue()->TrackTask(std::move(request),
|
||||
GetDevice()->GetPendingCommandSerial());
|
||||
}
|
||||
GetDevice()->GetQueue()->TrackTask(std::move(request), GetDevice()->GetPendingCommandSerial());
|
||||
}
|
||||
|
||||
void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
|
||||
void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
|
||||
return GetMappedRange(offset, size, true);
|
||||
}
|
||||
}
|
||||
|
||||
const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
|
||||
const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
|
||||
return GetMappedRange(offset, size, false);
|
||||
}
|
||||
}
|
||||
|
||||
void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
|
||||
void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
|
||||
if (!CanGetMappedRange(writable, offset, size)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -386,13 +372,13 @@ namespace dawn::native {
|
|||
}
|
||||
uint8_t* start = static_cast<uint8_t*>(GetMappedPointerImpl());
|
||||
return start == nullptr ? nullptr : start + offset;
|
||||
}
|
||||
}
|
||||
|
||||
void BufferBase::APIDestroy() {
|
||||
void BufferBase::APIDestroy() {
|
||||
Destroy();
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError BufferBase::CopyFromStagingBuffer() {
|
||||
MaybeError BufferBase::CopyFromStagingBuffer() {
|
||||
ASSERT(mStagingBuffer);
|
||||
if (mSize == 0) {
|
||||
// Staging buffer is not created if zero size.
|
||||
|
@ -400,27 +386,27 @@ namespace dawn::native {
|
|||
return {};
|
||||
}
|
||||
|
||||
DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0,
|
||||
GetAllocatedSize()));
|
||||
DAWN_TRY(
|
||||
GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0, GetAllocatedSize()));
|
||||
|
||||
DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
|
||||
uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
void BufferBase::APIUnmap() {
|
||||
void BufferBase::APIUnmap() {
|
||||
if (GetDevice()->ConsumedError(ValidateUnmap(), "calling %s.Unmap().", this)) {
|
||||
return;
|
||||
}
|
||||
Unmap();
|
||||
}
|
||||
}
|
||||
|
||||
void BufferBase::Unmap() {
|
||||
void BufferBase::Unmap() {
|
||||
UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
|
||||
}
|
||||
}
|
||||
|
||||
void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
|
||||
void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
|
||||
if (mState == BufferState::Mapped) {
|
||||
// A map request can only be called once, so this will fire only if the request wasn't
|
||||
// completed before the Unmap.
|
||||
|
@ -440,9 +426,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
mState = BufferState::Unmapped;
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
|
||||
MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
|
||||
size_t offset,
|
||||
size_t size,
|
||||
WGPUBufferMapAsyncStatus* status) const {
|
||||
|
@ -453,8 +439,7 @@ namespace dawn::native {
|
|||
DAWN_TRY(GetDevice()->ValidateObject(this));
|
||||
|
||||
DAWN_INVALID_IF(uint64_t(offset) > mSize,
|
||||
"Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize,
|
||||
this);
|
||||
"Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize, this);
|
||||
|
||||
DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
|
||||
DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
|
||||
|
@ -491,9 +476,9 @@ namespace dawn::native {
|
|||
|
||||
*status = WGPUBufferMapAsyncStatus_Success;
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
|
||||
bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
|
||||
if (offset % 8 != 0 || offset < mMapOffset || offset > mSize) {
|
||||
return false;
|
||||
}
|
||||
|
@ -523,8 +508,7 @@ namespace dawn::native {
|
|||
return true;
|
||||
|
||||
case BufferState::Mapped:
|
||||
ASSERT(bool{mMapMode & wgpu::MapMode::Read} ^
|
||||
bool{mMapMode & wgpu::MapMode::Write});
|
||||
ASSERT(bool{mMapMode & wgpu::MapMode::Read} ^ bool{mMapMode & wgpu::MapMode::Write});
|
||||
return !writable || (mMapMode & wgpu::MapMode::Write);
|
||||
|
||||
case BufferState::Unmapped:
|
||||
|
@ -532,9 +516,9 @@ namespace dawn::native {
|
|||
return false;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError BufferBase::ValidateUnmap() const {
|
||||
MaybeError BufferBase::ValidateUnmap() const {
|
||||
DAWN_TRY(GetDevice()->ValidateIsAlive());
|
||||
|
||||
switch (mState) {
|
||||
|
@ -549,27 +533,26 @@ namespace dawn::native {
|
|||
return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
void BufferBase::OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
|
||||
void BufferBase::OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
|
||||
CallMapCallback(mapID, status);
|
||||
}
|
||||
}
|
||||
|
||||
bool BufferBase::NeedsInitialization() const {
|
||||
return !mIsDataInitialized &&
|
||||
GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
|
||||
}
|
||||
bool BufferBase::NeedsInitialization() const {
|
||||
return !mIsDataInitialized && GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
|
||||
}
|
||||
|
||||
bool BufferBase::IsDataInitialized() const {
|
||||
bool BufferBase::IsDataInitialized() const {
|
||||
return mIsDataInitialized;
|
||||
}
|
||||
}
|
||||
|
||||
void BufferBase::SetIsDataInitialized() {
|
||||
void BufferBase::SetIsDataInitialized() {
|
||||
mIsDataInitialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
|
||||
bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
|
||||
return offset == 0 && size == GetSize();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -26,21 +26,21 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
struct CopyTextureToBufferCmd;
|
||||
struct CopyTextureToBufferCmd;
|
||||
|
||||
enum class MapType : uint32_t;
|
||||
enum class MapType : uint32_t;
|
||||
|
||||
MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
|
||||
MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
|
||||
|
||||
static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
|
||||
static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
|
||||
wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
|
||||
wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer |
|
||||
wgpu::BufferUsage::Indirect;
|
||||
|
||||
static constexpr wgpu::BufferUsage kMappableBufferUsages =
|
||||
static constexpr wgpu::BufferUsage kMappableBufferUsages =
|
||||
wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
|
||||
|
||||
class BufferBase : public ApiObjectBase {
|
||||
class BufferBase : public ApiObjectBase {
|
||||
public:
|
||||
enum class BufferState {
|
||||
Unmapped,
|
||||
|
@ -88,9 +88,7 @@ namespace dawn::native {
|
|||
void APIDestroy();
|
||||
|
||||
protected:
|
||||
BufferBase(DeviceBase* device,
|
||||
const BufferDescriptor* descriptor,
|
||||
ObjectBase::ErrorTag tag);
|
||||
BufferBase(DeviceBase* device, const BufferDescriptor* descriptor, ObjectBase::ErrorTag tag);
|
||||
|
||||
// Constructor used only for mocking and testing.
|
||||
BufferBase(DeviceBase* device, BufferState state);
|
||||
|
@ -133,7 +131,7 @@ namespace dawn::native {
|
|||
wgpu::MapMode mMapMode = wgpu::MapMode::None;
|
||||
size_t mMapOffset = 0;
|
||||
size_t mMapSize = 0;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -18,26 +18,26 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const CacheKey& key) {
|
||||
std::ostream& operator<<(std::ostream& os, const CacheKey& key) {
|
||||
os << std::hex;
|
||||
for (const int b : key) {
|
||||
os << std::setfill('0') << std::setw(2) << b << " ";
|
||||
}
|
||||
os << std::dec;
|
||||
return os;
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void CacheKeySerializer<std::string>::Serialize(CacheKey* key, const std::string& t) {
|
||||
template <>
|
||||
void CacheKeySerializer<std::string>::Serialize(CacheKey* key, const std::string& t) {
|
||||
key->Record(static_cast<size_t>(t.length()));
|
||||
key->insert(key->end(), t.begin(), t.end());
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void CacheKeySerializer<CacheKey>::Serialize(CacheKey* key, const CacheKey& t) {
|
||||
template <>
|
||||
void CacheKeySerializer<CacheKey>::Serialize(CacheKey* key, const CacheKey& t) {
|
||||
// For nested cache keys, we do not record the length, and just copy the key so that it
|
||||
// appears we just flatten the keys into a single key.
|
||||
key->insert(key->end(), t.begin(), t.end());
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -27,22 +27,22 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
// Forward declare classes because of co-dependency.
|
||||
class CacheKey;
|
||||
class CachedObject;
|
||||
// Forward declare classes because of co-dependency.
|
||||
class CacheKey;
|
||||
class CachedObject;
|
||||
|
||||
// Stream operator for CacheKey for debugging.
|
||||
std::ostream& operator<<(std::ostream& os, const CacheKey& key);
|
||||
// Stream operator for CacheKey for debugging.
|
||||
std::ostream& operator<<(std::ostream& os, const CacheKey& key);
|
||||
|
||||
// Overridable serializer struct that should be implemented for cache key serializable
|
||||
// types/classes.
|
||||
template <typename T, typename SFINAE = void>
|
||||
class CacheKeySerializer {
|
||||
// Overridable serializer struct that should be implemented for cache key serializable
|
||||
// types/classes.
|
||||
template <typename T, typename SFINAE = void>
|
||||
class CacheKeySerializer {
|
||||
public:
|
||||
static void Serialize(CacheKey* key, const T& t);
|
||||
};
|
||||
};
|
||||
|
||||
class CacheKey : public std::vector<uint8_t> {
|
||||
class CacheKey : public std::vector<uint8_t> {
|
||||
public:
|
||||
using std::vector<uint8_t>::vector;
|
||||
|
||||
|
@ -87,30 +87,28 @@ namespace dawn::native {
|
|||
}
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// Specialized overload for fundamental types.
|
||||
template <typename T>
|
||||
class CacheKeySerializer<T, std::enable_if_t<std::is_fundamental_v<T>>> {
|
||||
// Specialized overload for fundamental types.
|
||||
template <typename T>
|
||||
class CacheKeySerializer<T, std::enable_if_t<std::is_fundamental_v<T>>> {
|
||||
public:
|
||||
static void Serialize(CacheKey* key, const T t) {
|
||||
const char* it = reinterpret_cast<const char*>(&t);
|
||||
key->insert(key->end(), it, (it + sizeof(T)));
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// Specialized overload for bitsets that are smaller than 64.
|
||||
template <size_t N>
|
||||
class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> {
|
||||
// Specialized overload for bitsets that are smaller than 64.
|
||||
template <size_t N>
|
||||
class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> {
|
||||
public:
|
||||
static void Serialize(CacheKey* key, const std::bitset<N>& t) {
|
||||
key->Record(t.to_ullong());
|
||||
}
|
||||
};
|
||||
static void Serialize(CacheKey* key, const std::bitset<N>& t) { key->Record(t.to_ullong()); }
|
||||
};
|
||||
|
||||
// Specialized overload for bitsets since using the built-in to_ullong have a size limit.
|
||||
template <size_t N>
|
||||
class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N > 64)>> {
|
||||
// Specialized overload for bitsets since using the built-in to_ullong have a size limit.
|
||||
template <size_t N>
|
||||
class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N > 64)>> {
|
||||
public:
|
||||
static void Serialize(CacheKey* key, const std::bitset<N>& t) {
|
||||
// Serializes the bitset into series of uint8_t, along with recording the size.
|
||||
|
@ -132,32 +130,32 @@ namespace dawn::native {
|
|||
key->Record(value);
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// Specialized overload for enums.
|
||||
template <typename T>
|
||||
class CacheKeySerializer<T, std::enable_if_t<std::is_enum_v<T>>> {
|
||||
// Specialized overload for enums.
|
||||
template <typename T>
|
||||
class CacheKeySerializer<T, std::enable_if_t<std::is_enum_v<T>>> {
|
||||
public:
|
||||
static void Serialize(CacheKey* key, const T t) {
|
||||
CacheKeySerializer<std::underlying_type_t<T>>::Serialize(
|
||||
key, static_cast<std::underlying_type_t<T>>(t));
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// Specialized overload for TypedInteger.
|
||||
template <typename Tag, typename Integer>
|
||||
class CacheKeySerializer<::detail::TypedIntegerImpl<Tag, Integer>> {
|
||||
// Specialized overload for TypedInteger.
|
||||
template <typename Tag, typename Integer>
|
||||
class CacheKeySerializer<::detail::TypedIntegerImpl<Tag, Integer>> {
|
||||
public:
|
||||
static void Serialize(CacheKey* key, const ::detail::TypedIntegerImpl<Tag, Integer> t) {
|
||||
CacheKeySerializer<Integer>::Serialize(key, static_cast<Integer>(t));
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// Specialized overload for pointers. Since we are serializing for a cache key, we always
|
||||
// serialize via value, not by pointer. To handle nullptr scenarios, we always serialize whether
|
||||
// the pointer was nullptr followed by the contents if applicable.
|
||||
template <typename T>
|
||||
class CacheKeySerializer<T, std::enable_if_t<std::is_pointer_v<T>>> {
|
||||
// Specialized overload for pointers. Since we are serializing for a cache key, we always
|
||||
// serialize via value, not by pointer. To handle nullptr scenarios, we always serialize whether
|
||||
// the pointer was nullptr followed by the contents if applicable.
|
||||
template <typename T>
|
||||
class CacheKeySerializer<T, std::enable_if_t<std::is_pointer_v<T>>> {
|
||||
public:
|
||||
static void Serialize(CacheKey* key, const T t) {
|
||||
key->Record(t == nullptr);
|
||||
|
@ -165,11 +163,11 @@ namespace dawn::native {
|
|||
CacheKeySerializer<std::remove_cv_t<std::remove_pointer_t<T>>>::Serialize(key, *t);
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// Specialized overload for fixed arrays of primitives.
|
||||
template <typename T, size_t N>
|
||||
class CacheKeySerializer<T[N], std::enable_if_t<std::is_fundamental_v<T>>> {
|
||||
// Specialized overload for fixed arrays of primitives.
|
||||
template <typename T, size_t N>
|
||||
class CacheKeySerializer<T[N], std::enable_if_t<std::is_fundamental_v<T>>> {
|
||||
public:
|
||||
static void Serialize(CacheKey* key, const T (&t)[N]) {
|
||||
static_assert(N > 0);
|
||||
|
@ -177,11 +175,11 @@ namespace dawn::native {
|
|||
const char* it = reinterpret_cast<const char*>(t);
|
||||
key->insert(key->end(), it, it + sizeof(t));
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// Specialized overload for fixed arrays of non-primitives.
|
||||
template <typename T, size_t N>
|
||||
class CacheKeySerializer<T[N], std::enable_if_t<!std::is_fundamental_v<T>>> {
|
||||
// Specialized overload for fixed arrays of non-primitives.
|
||||
template <typename T, size_t N>
|
||||
class CacheKeySerializer<T[N], std::enable_if_t<!std::is_fundamental_v<T>>> {
|
||||
public:
|
||||
static void Serialize(CacheKey* key, const T (&t)[N]) {
|
||||
static_assert(N > 0);
|
||||
|
@ -190,16 +188,14 @@ namespace dawn::native {
|
|||
key->Record(t[i]);
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// Specialized overload for CachedObjects.
|
||||
template <typename T>
|
||||
class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> {
|
||||
// Specialized overload for CachedObjects.
|
||||
template <typename T>
|
||||
class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> {
|
||||
public:
|
||||
static void Serialize(CacheKey* key, const T& t) {
|
||||
key->Record(t.GetCacheKey());
|
||||
}
|
||||
};
|
||||
static void Serialize(CacheKey* key, const T& t) { key->Record(t.GetCacheKey()); }
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -19,35 +19,35 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
bool CachedObject::IsCachedReference() const {
|
||||
bool CachedObject::IsCachedReference() const {
|
||||
return mIsCachedReference;
|
||||
}
|
||||
}
|
||||
|
||||
void CachedObject::SetIsCachedReference() {
|
||||
void CachedObject::SetIsCachedReference() {
|
||||
mIsCachedReference = true;
|
||||
}
|
||||
}
|
||||
|
||||
size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
|
||||
size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
|
||||
return obj->GetContentHash();
|
||||
}
|
||||
}
|
||||
|
||||
size_t CachedObject::GetContentHash() const {
|
||||
size_t CachedObject::GetContentHash() const {
|
||||
ASSERT(mIsContentHashInitialized);
|
||||
return mContentHash;
|
||||
}
|
||||
}
|
||||
|
||||
void CachedObject::SetContentHash(size_t contentHash) {
|
||||
void CachedObject::SetContentHash(size_t contentHash) {
|
||||
ASSERT(!mIsContentHashInitialized);
|
||||
mContentHash = contentHash;
|
||||
mIsContentHashInitialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
const CacheKey& CachedObject::GetCacheKey() const {
|
||||
const CacheKey& CachedObject::GetCacheKey() const {
|
||||
return mCacheKey;
|
||||
}
|
||||
}
|
||||
|
||||
CacheKey* CachedObject::GetCacheKey() {
|
||||
CacheKey* CachedObject::GetCacheKey() {
|
||||
return &mCacheKey;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -23,11 +23,11 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
// Some objects are cached so that instead of creating new duplicate objects,
|
||||
// we increase the refcount of an existing object.
|
||||
// When an object is successfully created, the device should call
|
||||
// SetIsCachedReference() and insert the object into the cache.
|
||||
class CachedObject {
|
||||
// Some objects are cached so that instead of creating new duplicate objects,
|
||||
// we increase the refcount of an existing object.
|
||||
// When an object is successfully created, the device should call
|
||||
// SetIsCachedReference() and insert the object into the cache.
|
||||
class CachedObject {
|
||||
public:
|
||||
bool IsCachedReference() const;
|
||||
|
||||
|
@ -58,7 +58,7 @@ namespace dawn::native {
|
|||
size_t mContentHash = 0;
|
||||
bool mIsContentHashInitialized = false;
|
||||
CacheKey mCacheKey;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -18,22 +18,22 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
bool CallbackTaskManager::IsEmpty() {
|
||||
bool CallbackTaskManager::IsEmpty() {
|
||||
std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
|
||||
return mCallbackTaskQueue.empty();
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
|
||||
std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
|
||||
std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
|
||||
|
||||
std::vector<std::unique_ptr<CallbackTask>> allTasks;
|
||||
allTasks.swap(mCallbackTaskQueue);
|
||||
return allTasks;
|
||||
}
|
||||
}
|
||||
|
||||
void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
|
||||
void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
|
||||
std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
|
||||
mCallbackTaskQueue.push_back(std::move(callbackTask));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -21,15 +21,15 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
struct CallbackTask {
|
||||
struct CallbackTask {
|
||||
public:
|
||||
virtual ~CallbackTask() = default;
|
||||
virtual void Finish() = 0;
|
||||
virtual void HandleShutDown() = 0;
|
||||
virtual void HandleDeviceLoss() = 0;
|
||||
};
|
||||
};
|
||||
|
||||
class CallbackTaskManager {
|
||||
class CallbackTaskManager {
|
||||
public:
|
||||
void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask);
|
||||
bool IsEmpty();
|
||||
|
@ -38,7 +38,7 @@ namespace dawn::native {
|
|||
private:
|
||||
std::mutex mCallbackTaskQueueMutex;
|
||||
std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -24,25 +24,25 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
// TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
|
||||
// TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
|
||||
|
||||
CommandIterator::CommandIterator() {
|
||||
CommandIterator::CommandIterator() {
|
||||
Reset();
|
||||
}
|
||||
}
|
||||
|
||||
CommandIterator::~CommandIterator() {
|
||||
CommandIterator::~CommandIterator() {
|
||||
ASSERT(IsEmpty());
|
||||
}
|
||||
}
|
||||
|
||||
CommandIterator::CommandIterator(CommandIterator&& other) {
|
||||
CommandIterator::CommandIterator(CommandIterator&& other) {
|
||||
if (!other.IsEmpty()) {
|
||||
mBlocks = std::move(other.mBlocks);
|
||||
other.Reset();
|
||||
}
|
||||
Reset();
|
||||
}
|
||||
}
|
||||
|
||||
CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
|
||||
CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
|
||||
ASSERT(IsEmpty());
|
||||
if (!other.IsEmpty()) {
|
||||
mBlocks = std::move(other.mBlocks);
|
||||
|
@ -50,14 +50,13 @@ namespace dawn::native {
|
|||
}
|
||||
Reset();
|
||||
return *this;
|
||||
}
|
||||
}
|
||||
|
||||
CommandIterator::CommandIterator(CommandAllocator allocator)
|
||||
: mBlocks(allocator.AcquireBlocks()) {
|
||||
CommandIterator::CommandIterator(CommandAllocator allocator) : mBlocks(allocator.AcquireBlocks()) {
|
||||
Reset();
|
||||
}
|
||||
}
|
||||
|
||||
void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
|
||||
void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
|
||||
ASSERT(IsEmpty());
|
||||
mBlocks.clear();
|
||||
for (CommandAllocator& allocator : allocators) {
|
||||
|
@ -70,9 +69,9 @@ namespace dawn::native {
|
|||
}
|
||||
}
|
||||
Reset();
|
||||
}
|
||||
}
|
||||
|
||||
bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
|
||||
bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
|
||||
mCurrentBlock++;
|
||||
if (mCurrentBlock >= mBlocks.size()) {
|
||||
Reset();
|
||||
|
@ -81,9 +80,9 @@ namespace dawn::native {
|
|||
}
|
||||
mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
|
||||
return NextCommandId(commandId);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandIterator::Reset() {
|
||||
void CommandIterator::Reset() {
|
||||
mCurrentBlock = 0;
|
||||
|
||||
if (mBlocks.empty()) {
|
||||
|
@ -96,9 +95,9 @@ namespace dawn::native {
|
|||
} else {
|
||||
mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CommandIterator::MakeEmptyAsDataWasDestroyed() {
|
||||
void CommandIterator::MakeEmptyAsDataWasDestroyed() {
|
||||
if (IsEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
@ -109,31 +108,31 @@ namespace dawn::native {
|
|||
mBlocks.clear();
|
||||
Reset();
|
||||
ASSERT(IsEmpty());
|
||||
}
|
||||
}
|
||||
|
||||
bool CommandIterator::IsEmpty() const {
|
||||
bool CommandIterator::IsEmpty() const {
|
||||
return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
|
||||
}
|
||||
}
|
||||
|
||||
// Potential TODO(crbug.com/dawn/835):
|
||||
// - Host the size and pointer to next block in the block itself to avoid having an allocation
|
||||
// in the vector
|
||||
// - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
|
||||
// in Allocate
|
||||
// - Be able to optimize allocation to one block, for command buffers expected to live long to
|
||||
// avoid cache misses
|
||||
// - Better block allocation, maybe have Dawn API to say command buffer is going to have size
|
||||
// close to another
|
||||
// Potential TODO(crbug.com/dawn/835):
|
||||
// - Host the size and pointer to next block in the block itself to avoid having an allocation
|
||||
// in the vector
|
||||
// - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
|
||||
// in Allocate
|
||||
// - Be able to optimize allocation to one block, for command buffers expected to live long to
|
||||
// avoid cache misses
|
||||
// - Better block allocation, maybe have Dawn API to say command buffer is going to have size
|
||||
// close to another
|
||||
|
||||
CommandAllocator::CommandAllocator() {
|
||||
CommandAllocator::CommandAllocator() {
|
||||
ResetPointers();
|
||||
}
|
||||
}
|
||||
|
||||
CommandAllocator::~CommandAllocator() {
|
||||
CommandAllocator::~CommandAllocator() {
|
||||
Reset();
|
||||
}
|
||||
}
|
||||
|
||||
CommandAllocator::CommandAllocator(CommandAllocator&& other)
|
||||
CommandAllocator::CommandAllocator(CommandAllocator&& other)
|
||||
: mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) {
|
||||
other.mBlocks.clear();
|
||||
if (!other.IsEmpty()) {
|
||||
|
@ -143,9 +142,9 @@ namespace dawn::native {
|
|||
ResetPointers();
|
||||
}
|
||||
other.Reset();
|
||||
}
|
||||
}
|
||||
|
||||
CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
|
||||
CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
|
||||
Reset();
|
||||
if (!other.IsEmpty()) {
|
||||
std::swap(mBlocks, other.mBlocks);
|
||||
|
@ -155,22 +154,22 @@ namespace dawn::native {
|
|||
}
|
||||
other.Reset();
|
||||
return *this;
|
||||
}
|
||||
}
|
||||
|
||||
void CommandAllocator::Reset() {
|
||||
void CommandAllocator::Reset() {
|
||||
for (BlockDef& block : mBlocks) {
|
||||
free(block.block);
|
||||
}
|
||||
mBlocks.clear();
|
||||
mLastAllocationSize = kDefaultBaseAllocationSize;
|
||||
ResetPointers();
|
||||
}
|
||||
}
|
||||
|
||||
bool CommandAllocator::IsEmpty() const {
|
||||
bool CommandAllocator::IsEmpty() const {
|
||||
return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mPlaceholderEnum[0]);
|
||||
}
|
||||
}
|
||||
|
||||
CommandBlocks&& CommandAllocator::AcquireBlocks() {
|
||||
CommandBlocks&& CommandAllocator::AcquireBlocks() {
|
||||
ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
|
||||
ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
|
||||
ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
|
||||
|
@ -179,9 +178,9 @@ namespace dawn::native {
|
|||
mCurrentPtr = nullptr;
|
||||
mEndPtr = nullptr;
|
||||
return std::move(mBlocks);
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
|
||||
uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
|
||||
size_t commandSize,
|
||||
size_t commandAlignment) {
|
||||
// When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
|
||||
|
@ -202,12 +201,11 @@ namespace dawn::native {
|
|||
return nullptr;
|
||||
}
|
||||
return Allocate(commandId, commandSize, commandAlignment);
|
||||
}
|
||||
}
|
||||
|
||||
bool CommandAllocator::GetNewBlock(size_t minimumSize) {
|
||||
bool CommandAllocator::GetNewBlock(size_t minimumSize) {
|
||||
// Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
|
||||
mLastAllocationSize =
|
||||
std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
|
||||
mLastAllocationSize = std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
|
||||
|
||||
uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
|
||||
if (DAWN_UNLIKELY(block == nullptr)) {
|
||||
|
@ -218,11 +216,11 @@ namespace dawn::native {
|
|||
mCurrentPtr = AlignPtr(block, alignof(uint32_t));
|
||||
mEndPtr = block + mLastAllocationSize;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
void CommandAllocator::ResetPointers() {
|
||||
void CommandAllocator::ResetPointers() {
|
||||
mCurrentPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[0]);
|
||||
mEndPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[1]);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -26,49 +26,49 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
// Allocation for command buffers should be fast. To avoid doing an allocation per command
|
||||
// or to avoid copying commands when reallocing, we use a linear allocator in a growing set
|
||||
// of large memory blocks. We also use this to have the format to be (u32 commandId, command),
|
||||
// so that iteration over the commands is easy.
|
||||
// Allocation for command buffers should be fast. To avoid doing an allocation per command
|
||||
// or to avoid copying commands when reallocing, we use a linear allocator in a growing set
|
||||
// of large memory blocks. We also use this to have the format to be (u32 commandId, command),
|
||||
// so that iteration over the commands is easy.
|
||||
|
||||
// Usage of the allocator and iterator:
|
||||
// CommandAllocator allocator;
|
||||
// DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
|
||||
// // Fill command
|
||||
// // Repeat allocation and filling commands
|
||||
//
|
||||
// CommandIterator commands(allocator);
|
||||
// CommandType type;
|
||||
// while(commands.NextCommandId(&type)) {
|
||||
// switch(type) {
|
||||
// case CommandType::Draw:
|
||||
// DrawCommand* draw = commands.NextCommand<DrawCommand>();
|
||||
// // Do the draw
|
||||
// break;
|
||||
// // other cases
|
||||
// }
|
||||
// }
|
||||
// Usage of the allocator and iterator:
|
||||
// CommandAllocator allocator;
|
||||
// DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
|
||||
// // Fill command
|
||||
// // Repeat allocation and filling commands
|
||||
//
|
||||
// CommandIterator commands(allocator);
|
||||
// CommandType type;
|
||||
// while(commands.NextCommandId(&type)) {
|
||||
// switch(type) {
|
||||
// case CommandType::Draw:
|
||||
// DrawCommand* draw = commands.NextCommand<DrawCommand>();
|
||||
// // Do the draw
|
||||
// break;
|
||||
// // other cases
|
||||
// }
|
||||
// }
|
||||
|
||||
// Note that you need to extract the commands from the CommandAllocator before destroying it
|
||||
// and must tell the CommandIterator when the allocated commands have been processed for
|
||||
// deletion.
|
||||
// Note that you need to extract the commands from the CommandAllocator before destroying it
|
||||
// and must tell the CommandIterator when the allocated commands have been processed for
|
||||
// deletion.
|
||||
|
||||
// These are the lists of blocks, should not be used directly, only through CommandAllocator
|
||||
// and CommandIterator
|
||||
struct BlockDef {
|
||||
// These are the lists of blocks, should not be used directly, only through CommandAllocator
|
||||
// and CommandIterator
|
||||
struct BlockDef {
|
||||
size_t size;
|
||||
uint8_t* block;
|
||||
};
|
||||
using CommandBlocks = std::vector<BlockDef>;
|
||||
};
|
||||
using CommandBlocks = std::vector<BlockDef>;
|
||||
|
||||
namespace detail {
|
||||
constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
|
||||
constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
|
||||
} // namespace detail
|
||||
namespace detail {
|
||||
constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
|
||||
constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
|
||||
} // namespace detail
|
||||
|
||||
class CommandAllocator;
|
||||
class CommandAllocator;
|
||||
|
||||
class CommandIterator : public NonCopyable {
|
||||
class CommandIterator : public NonCopyable {
|
||||
public:
|
||||
CommandIterator();
|
||||
~CommandIterator();
|
||||
|
@ -145,9 +145,9 @@ namespace dawn::native {
|
|||
size_t mCurrentBlock = 0;
|
||||
// Used to avoid a special case for empty iterators.
|
||||
uint32_t mEndOfBlock = detail::kEndOfBlock;
|
||||
};
|
||||
};
|
||||
|
||||
class CommandAllocator : public NonCopyable {
|
||||
class CommandAllocator : public NonCopyable {
|
||||
public:
|
||||
CommandAllocator();
|
||||
~CommandAllocator();
|
||||
|
@ -166,8 +166,8 @@ namespace dawn::native {
|
|||
static_assert(sizeof(E) == sizeof(uint32_t));
|
||||
static_assert(alignof(E) == alignof(uint32_t));
|
||||
static_assert(alignof(T) <= kMaxSupportedAlignment);
|
||||
T* result = reinterpret_cast<T*>(
|
||||
Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
|
||||
T* result =
|
||||
reinterpret_cast<T*>(Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
|
||||
if (!result) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -242,9 +242,7 @@ namespace dawn::native {
|
|||
return AllocateInNewBlock(commandId, commandSize, commandAlignment);
|
||||
}
|
||||
|
||||
uint8_t* AllocateInNewBlock(uint32_t commandId,
|
||||
size_t commandSize,
|
||||
size_t commandAlignment);
|
||||
uint8_t* AllocateInNewBlock(uint32_t commandId, size_t commandSize, size_t commandAlignment);
|
||||
|
||||
DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
|
||||
return Allocate(detail::kAdditionalData, commandSize, commandAlignment);
|
||||
|
@ -267,7 +265,7 @@ namespace dawn::native {
|
|||
// be written. Nullptr iff the blocks were moved out.
|
||||
uint8_t* mCurrentPtr = nullptr;
|
||||
uint8_t* mEndPtr = nullptr;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -25,53 +25,52 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
CommandBufferBase::CommandBufferBase(CommandEncoder* encoder,
|
||||
CommandBufferBase::CommandBufferBase(CommandEncoder* encoder,
|
||||
const CommandBufferDescriptor* descriptor)
|
||||
: ApiObjectBase(encoder->GetDevice(), descriptor->label),
|
||||
mCommands(encoder->AcquireCommands()),
|
||||
mResourceUsages(encoder->AcquireResourceUsages()) {
|
||||
TrackInDevice();
|
||||
}
|
||||
}
|
||||
|
||||
CommandBufferBase::CommandBufferBase(DeviceBase* device)
|
||||
CommandBufferBase::CommandBufferBase(DeviceBase* device)
|
||||
: ApiObjectBase(device, kLabelNotImplemented) {
|
||||
TrackInDevice();
|
||||
}
|
||||
}
|
||||
|
||||
CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag) {
|
||||
}
|
||||
CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag) {}
|
||||
|
||||
// static
|
||||
CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
|
||||
// static
|
||||
CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
|
||||
return new CommandBufferBase(device, ObjectBase::kError);
|
||||
}
|
||||
}
|
||||
|
||||
ObjectType CommandBufferBase::GetType() const {
|
||||
ObjectType CommandBufferBase::GetType() const {
|
||||
return ObjectType::CommandBuffer;
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const {
|
||||
MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const {
|
||||
ASSERT(!IsError());
|
||||
|
||||
DAWN_INVALID_IF(!IsAlive(), "%s cannot be submitted more than once.", this);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
void CommandBufferBase::DestroyImpl() {
|
||||
void CommandBufferBase::DestroyImpl() {
|
||||
FreeCommands(&mCommands);
|
||||
mResourceUsages = {};
|
||||
}
|
||||
}
|
||||
|
||||
const CommandBufferResourceUsage& CommandBufferBase::GetResourceUsages() const {
|
||||
const CommandBufferResourceUsage& CommandBufferBase::GetResourceUsages() const {
|
||||
return mResourceUsages;
|
||||
}
|
||||
}
|
||||
|
||||
CommandIterator* CommandBufferBase::GetCommandIteratorForTesting() {
|
||||
CommandIterator* CommandBufferBase::GetCommandIteratorForTesting() {
|
||||
return &mCommands;
|
||||
}
|
||||
}
|
||||
|
||||
bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
|
||||
bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
|
||||
const Extent3D copySize,
|
||||
const uint32_t mipLevel) {
|
||||
Extent3D extent = texture->GetMipLevelPhysicalSize(mipLevel);
|
||||
|
@ -87,26 +86,24 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
|
||||
const Extent3D& copySize) {
|
||||
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, const Extent3D& copySize) {
|
||||
switch (copy.texture->GetDimension()) {
|
||||
case wgpu::TextureDimension::e1D:
|
||||
ASSERT(copy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
|
||||
ASSERT(copy.mipLevel == 0);
|
||||
return {copy.aspect, {0, 1}, {0, 1}};
|
||||
case wgpu::TextureDimension::e2D:
|
||||
return {
|
||||
copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
|
||||
return {copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
|
||||
case wgpu::TextureDimension::e3D:
|
||||
return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) {
|
||||
void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) {
|
||||
for (ColorAttachmentIndex i :
|
||||
IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
|
||||
auto& attachmentInfo = renderPass->colorAttachments[i];
|
||||
|
@ -183,9 +180,9 @@ namespace dawn::native {
|
|||
view->GetTexture()->SetIsSubresourceContentInitialized(
|
||||
attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store, stencilRange);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy) {
|
||||
bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy) {
|
||||
ASSERT(copy != nullptr);
|
||||
|
||||
if (copy->destination.offset > 0) {
|
||||
|
@ -194,8 +191,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
const TextureBase* texture = copy->source.texture.Get();
|
||||
const TexelBlockInfo& blockInfo =
|
||||
texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
|
||||
const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
|
||||
const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width;
|
||||
const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height;
|
||||
const bool multiSlice = copy->copySize.depthOrArrayLayers > 1;
|
||||
|
@ -224,26 +220,26 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
std::array<float, 4> ConvertToFloatColor(dawn::native::Color color) {
|
||||
std::array<float, 4> ConvertToFloatColor(dawn::native::Color color) {
|
||||
const std::array<float, 4> outputValue = {
|
||||
static_cast<float>(color.r), static_cast<float>(color.g), static_cast<float>(color.b),
|
||||
static_cast<float>(color.a)};
|
||||
return outputValue;
|
||||
}
|
||||
std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) {
|
||||
}
|
||||
std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) {
|
||||
const std::array<int32_t, 4> outputValue = {
|
||||
static_cast<int32_t>(color.r), static_cast<int32_t>(color.g),
|
||||
static_cast<int32_t>(color.b), static_cast<int32_t>(color.a)};
|
||||
static_cast<int32_t>(color.r), static_cast<int32_t>(color.g), static_cast<int32_t>(color.b),
|
||||
static_cast<int32_t>(color.a)};
|
||||
return outputValue;
|
||||
}
|
||||
}
|
||||
|
||||
std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color) {
|
||||
std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color) {
|
||||
const std::array<uint32_t, 4> outputValue = {
|
||||
static_cast<uint32_t>(color.r), static_cast<uint32_t>(color.g),
|
||||
static_cast<uint32_t>(color.b), static_cast<uint32_t>(color.a)};
|
||||
return outputValue;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -26,11 +26,11 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
struct BeginRenderPassCmd;
|
||||
struct CopyTextureToBufferCmd;
|
||||
struct TextureCopy;
|
||||
struct BeginRenderPassCmd;
|
||||
struct CopyTextureToBufferCmd;
|
||||
struct TextureCopy;
|
||||
|
||||
class CommandBufferBase : public ApiObjectBase {
|
||||
class CommandBufferBase : public ApiObjectBase {
|
||||
public:
|
||||
CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
|
||||
|
||||
|
@ -55,21 +55,20 @@ namespace dawn::native {
|
|||
CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
|
||||
|
||||
CommandBufferResourceUsage mResourceUsages;
|
||||
};
|
||||
};
|
||||
|
||||
bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
|
||||
bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
|
||||
const Extent3D copySize,
|
||||
const uint32_t mipLevel);
|
||||
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
|
||||
const Extent3D& copySize);
|
||||
SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, const Extent3D& copySize);
|
||||
|
||||
void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
|
||||
void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
|
||||
|
||||
bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy);
|
||||
bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy);
|
||||
|
||||
std::array<float, 4> ConvertToFloatColor(dawn::native::Color color);
|
||||
std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color);
|
||||
std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color);
|
||||
std::array<float, 4> ConvertToFloatColor(dawn::native::Color color);
|
||||
std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color);
|
||||
std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color);
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -30,8 +30,8 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
namespace {
|
||||
bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
|
||||
namespace {
|
||||
bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
|
||||
const std::vector<uint64_t>& pipelineMinBufferSizes) {
|
||||
ASSERT(unverifiedBufferSizes.size() == pipelineMinBufferSizes.size());
|
||||
|
||||
|
@ -42,58 +42,55 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
} // namespace
|
||||
}
|
||||
} // namespace
|
||||
|
||||
enum ValidationAspect {
|
||||
enum ValidationAspect {
|
||||
VALIDATION_ASPECT_PIPELINE,
|
||||
VALIDATION_ASPECT_BIND_GROUPS,
|
||||
VALIDATION_ASPECT_VERTEX_BUFFERS,
|
||||
VALIDATION_ASPECT_INDEX_BUFFER,
|
||||
|
||||
VALIDATION_ASPECT_COUNT
|
||||
};
|
||||
static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects);
|
||||
};
|
||||
static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects);
|
||||
|
||||
static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
|
||||
static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
|
||||
1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS;
|
||||
|
||||
static constexpr CommandBufferStateTracker::ValidationAspects kDrawAspects =
|
||||
static constexpr CommandBufferStateTracker::ValidationAspects kDrawAspects =
|
||||
1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
|
||||
1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
|
||||
|
||||
static constexpr CommandBufferStateTracker::ValidationAspects kDrawIndexedAspects =
|
||||
static constexpr CommandBufferStateTracker::ValidationAspects kDrawIndexedAspects =
|
||||
1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
|
||||
1 << VALIDATION_ASPECT_VERTEX_BUFFERS | 1 << VALIDATION_ASPECT_INDEX_BUFFER;
|
||||
|
||||
static constexpr CommandBufferStateTracker::ValidationAspects kLazyAspects =
|
||||
static constexpr CommandBufferStateTracker::ValidationAspects kLazyAspects =
|
||||
1 << VALIDATION_ASPECT_BIND_GROUPS | 1 << VALIDATION_ASPECT_VERTEX_BUFFERS |
|
||||
1 << VALIDATION_ASPECT_INDEX_BUFFER;
|
||||
|
||||
MaybeError CommandBufferStateTracker::ValidateCanDispatch() {
|
||||
MaybeError CommandBufferStateTracker::ValidateCanDispatch() {
|
||||
return ValidateOperation(kDispatchAspects);
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError CommandBufferStateTracker::ValidateCanDraw() {
|
||||
MaybeError CommandBufferStateTracker::ValidateCanDraw() {
|
||||
return ValidateOperation(kDrawAspects);
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError CommandBufferStateTracker::ValidateCanDrawIndexed() {
|
||||
MaybeError CommandBufferStateTracker::ValidateCanDrawIndexed() {
|
||||
return ValidateOperation(kDrawIndexedAspects);
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(
|
||||
uint32_t vertexCount,
|
||||
MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount,
|
||||
uint32_t firstVertex) {
|
||||
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
|
||||
|
||||
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
|
||||
vertexBufferSlotsUsedAsVertexBuffer =
|
||||
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& vertexBufferSlotsUsedAsVertexBuffer =
|
||||
lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer();
|
||||
|
||||
for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
|
||||
const VertexBufferInfo& vertexBuffer =
|
||||
lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
|
||||
const VertexBufferInfo& vertexBuffer = lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
|
||||
uint64_t arrayStride = vertexBuffer.arrayStride;
|
||||
uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
|
||||
|
||||
|
@ -106,8 +103,7 @@ namespace dawn::native {
|
|||
} else {
|
||||
uint64_t strideCount = static_cast<uint64_t>(firstVertex) + vertexCount;
|
||||
if (strideCount != 0u) {
|
||||
uint64_t requiredSize =
|
||||
(strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
|
||||
uint64_t requiredSize = (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
|
||||
// firstVertex and vertexCount are in uint32_t,
|
||||
// arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
|
||||
// currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
|
||||
|
@ -126,15 +122,14 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError CommandBufferStateTracker::ValidateBufferInRangeForInstanceBuffer(
|
||||
MaybeError CommandBufferStateTracker::ValidateBufferInRangeForInstanceBuffer(
|
||||
uint32_t instanceCount,
|
||||
uint32_t firstInstance) {
|
||||
RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
|
||||
|
||||
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
|
||||
vertexBufferSlotsUsedAsInstanceBuffer =
|
||||
const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& vertexBufferSlotsUsedAsInstanceBuffer =
|
||||
lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
|
||||
|
||||
for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
|
||||
|
@ -151,8 +146,7 @@ namespace dawn::native {
|
|||
} else {
|
||||
uint64_t strideCount = static_cast<uint64_t>(firstInstance) + instanceCount;
|
||||
if (strideCount != 0u) {
|
||||
uint64_t requiredSize =
|
||||
(strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
|
||||
uint64_t requiredSize = (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
|
||||
// firstInstance and instanceCount are in uint32_t,
|
||||
// arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
|
||||
// currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
|
||||
|
@ -171,9 +165,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError CommandBufferStateTracker::ValidateIndexBufferInRange(uint32_t indexCount,
|
||||
MaybeError CommandBufferStateTracker::ValidateIndexBufferInRange(uint32_t indexCount,
|
||||
uint32_t firstIndex) {
|
||||
// Validate the range of index buffer
|
||||
// firstIndex and indexCount are in uint32_t, while IndexFormatSize is 2 (for
|
||||
|
@ -186,9 +180,9 @@ namespace dawn::native {
|
|||
"(%u).",
|
||||
firstIndex, indexCount, mIndexFormat, mIndexBufferSize);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError CommandBufferStateTracker::ValidateOperation(ValidationAspects requiredAspects) {
|
||||
MaybeError CommandBufferStateTracker::ValidateOperation(ValidationAspects requiredAspects) {
|
||||
// Fast return-true path if everything is good
|
||||
ValidationAspects missingAspects = requiredAspects & ~mAspects;
|
||||
if (missingAspects.none()) {
|
||||
|
@ -204,9 +198,9 @@ namespace dawn::native {
|
|||
DAWN_TRY(CheckMissingAspects(requiredAspects & ~mAspects));
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
void CommandBufferStateTracker::RecomputeLazyAspects(ValidationAspects aspects) {
|
||||
void CommandBufferStateTracker::RecomputeLazyAspects(ValidationAspects aspects) {
|
||||
ASSERT(mAspects[VALIDATION_ASPECT_PIPELINE]);
|
||||
ASSERT((aspects & ~kLazyAspects).none());
|
||||
|
||||
|
@ -245,9 +239,9 @@ namespace dawn::native {
|
|||
mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError CommandBufferStateTracker::CheckMissingAspects(ValidationAspects aspects) {
|
||||
MaybeError CommandBufferStateTracker::CheckMissingAspects(ValidationAspects aspects) {
|
||||
if (!aspects.any()) {
|
||||
return {};
|
||||
}
|
||||
|
@ -310,8 +304,7 @@ namespace dawn::native {
|
|||
|
||||
DAWN_INVALID_IF(
|
||||
requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
|
||||
currentBGL->GetPipelineCompatibilityToken() !=
|
||||
PipelineCompatibilityToken(0),
|
||||
currentBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0),
|
||||
"%s at index %u uses a %s which was created as part of the default layout for "
|
||||
"a different pipeline than the current one (%s), and as a result is not "
|
||||
"compatible. Use an explicit bind group layout when creating bind groups and "
|
||||
|
@ -342,37 +335,37 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
void CommandBufferStateTracker::SetComputePipeline(ComputePipelineBase* pipeline) {
|
||||
void CommandBufferStateTracker::SetComputePipeline(ComputePipelineBase* pipeline) {
|
||||
SetPipelineCommon(pipeline);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandBufferStateTracker::SetRenderPipeline(RenderPipelineBase* pipeline) {
|
||||
void CommandBufferStateTracker::SetRenderPipeline(RenderPipelineBase* pipeline) {
|
||||
SetPipelineCommon(pipeline);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index,
|
||||
void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index,
|
||||
BindGroupBase* bindgroup,
|
||||
uint32_t dynamicOffsetCount,
|
||||
const uint32_t* dynamicOffsets) {
|
||||
mBindgroups[index] = bindgroup;
|
||||
mDynamicOffsets[index].assign(dynamicOffsets, dynamicOffsets + dynamicOffsetCount);
|
||||
mAspects.reset(VALIDATION_ASPECT_BIND_GROUPS);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandBufferStateTracker::SetIndexBuffer(wgpu::IndexFormat format, uint64_t size) {
|
||||
void CommandBufferStateTracker::SetIndexBuffer(wgpu::IndexFormat format, uint64_t size) {
|
||||
mIndexBufferSet = true;
|
||||
mIndexFormat = format;
|
||||
mIndexBufferSize = size;
|
||||
}
|
||||
}
|
||||
|
||||
void CommandBufferStateTracker::SetVertexBuffer(VertexBufferSlot slot, uint64_t size) {
|
||||
void CommandBufferStateTracker::SetVertexBuffer(VertexBufferSlot slot, uint64_t size) {
|
||||
mVertexBufferSlotsUsed.set(slot);
|
||||
mVertexBufferSizes[slot] = size;
|
||||
}
|
||||
}
|
||||
|
||||
void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
|
||||
void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
|
||||
mLastPipeline = pipeline;
|
||||
mLastPipelineLayout = pipeline != nullptr ? pipeline->GetLayout() : nullptr;
|
||||
mMinBufferSizes = pipeline != nullptr ? &pipeline->GetMinBufferSizes() : nullptr;
|
||||
|
@ -381,41 +374,41 @@ namespace dawn::native {
|
|||
|
||||
// Reset lazy aspects so they get recomputed on the next operation.
|
||||
mAspects &= ~kLazyAspects;
|
||||
}
|
||||
}
|
||||
|
||||
BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const {
|
||||
BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const {
|
||||
return mBindgroups[index];
|
||||
}
|
||||
}
|
||||
|
||||
const std::vector<uint32_t>& CommandBufferStateTracker::GetDynamicOffsets(
|
||||
const std::vector<uint32_t>& CommandBufferStateTracker::GetDynamicOffsets(
|
||||
BindGroupIndex index) const {
|
||||
return mDynamicOffsets[index];
|
||||
}
|
||||
}
|
||||
|
||||
bool CommandBufferStateTracker::HasPipeline() const {
|
||||
bool CommandBufferStateTracker::HasPipeline() const {
|
||||
return mLastPipeline != nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
RenderPipelineBase* CommandBufferStateTracker::GetRenderPipeline() const {
|
||||
RenderPipelineBase* CommandBufferStateTracker::GetRenderPipeline() const {
|
||||
ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::RenderPipeline);
|
||||
return static_cast<RenderPipelineBase*>(mLastPipeline);
|
||||
}
|
||||
}
|
||||
|
||||
ComputePipelineBase* CommandBufferStateTracker::GetComputePipeline() const {
|
||||
ComputePipelineBase* CommandBufferStateTracker::GetComputePipeline() const {
|
||||
ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::ComputePipeline);
|
||||
return static_cast<ComputePipelineBase*>(mLastPipeline);
|
||||
}
|
||||
}
|
||||
|
||||
PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
|
||||
PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
|
||||
return mLastPipelineLayout;
|
||||
}
|
||||
}
|
||||
|
||||
wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const {
|
||||
wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const {
|
||||
return mIndexFormat;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t CommandBufferStateTracker::GetIndexBufferSize() const {
|
||||
uint64_t CommandBufferStateTracker::GetIndexBufferSize() const {
|
||||
return mIndexBufferSize;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
class CommandBufferStateTracker {
|
||||
class CommandBufferStateTracker {
|
||||
public:
|
||||
// Non-state-modifying validation functions
|
||||
MaybeError ValidateCanDispatch();
|
||||
|
@ -81,7 +81,7 @@ namespace dawn::native {
|
|||
PipelineBase* mLastPipeline = nullptr;
|
||||
|
||||
const RequiredBufferSizes* mMinBufferSizes = nullptr;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -41,37 +41,34 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
namespace {
|
||||
namespace {
|
||||
|
||||
bool HasDeprecatedColor(const RenderPassColorAttachment& attachment) {
|
||||
bool HasDeprecatedColor(const RenderPassColorAttachment& attachment) {
|
||||
return !std::isnan(attachment.clearColor.r) || !std::isnan(attachment.clearColor.g) ||
|
||||
!std::isnan(attachment.clearColor.b) || !std::isnan(attachment.clearColor.a);
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateB2BCopyAlignment(uint64_t dataSize,
|
||||
uint64_t srcOffset,
|
||||
uint64_t dstOffset) {
|
||||
MaybeError ValidateB2BCopyAlignment(uint64_t dataSize, uint64_t srcOffset, uint64_t dstOffset) {
|
||||
// Copy size must be a multiple of 4 bytes on macOS.
|
||||
DAWN_INVALID_IF(dataSize % 4 != 0, "Copy size (%u) is not a multiple of 4.", dataSize);
|
||||
|
||||
// SourceOffset and destinationOffset must be multiples of 4 bytes on macOS.
|
||||
DAWN_INVALID_IF(
|
||||
srcOffset % 4 != 0 || dstOffset % 4 != 0,
|
||||
DAWN_INVALID_IF(srcOffset % 4 != 0 || dstOffset % 4 != 0,
|
||||
"Source offset (%u) or destination offset (%u) is not a multiple of 4 bytes,",
|
||||
srcOffset, dstOffset);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) {
|
||||
MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) {
|
||||
DAWN_INVALID_IF(texture->GetSampleCount() > 1,
|
||||
"%s sample count (%u) is not 1 when copying to or from a buffer.",
|
||||
texture, texture->GetSampleCount());
|
||||
"%s sample count (%u) is not 1 when copying to or from a buffer.", texture,
|
||||
texture->GetSampleCount());
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateLinearTextureCopyOffset(const TextureDataLayout& layout,
|
||||
MaybeError ValidateLinearTextureCopyOffset(const TextureDataLayout& layout,
|
||||
const TexelBlockInfo& blockInfo,
|
||||
const bool hasDepthOrStencil) {
|
||||
if (hasDepthOrStencil) {
|
||||
|
@ -85,10 +82,9 @@ namespace dawn::native {
|
|||
layout.offset, blockInfo.byteSize);
|
||||
}
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(
|
||||
const ImageCopyTexture& src) {
|
||||
MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(const ImageCopyTexture& src) {
|
||||
Aspect aspectUsed;
|
||||
DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(src));
|
||||
if (aspectUsed == Aspect::Depth) {
|
||||
|
@ -111,9 +107,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateAttachmentArrayLayersAndLevelCount(const TextureViewBase* attachment) {
|
||||
MaybeError ValidateAttachmentArrayLayersAndLevelCount(const TextureViewBase* attachment) {
|
||||
// Currently we do not support layered rendering.
|
||||
DAWN_INVALID_IF(attachment->GetLayerCount() > 1,
|
||||
"The layer count (%u) of %s used as attachment is greater than 1.",
|
||||
|
@ -124,9 +120,9 @@ namespace dawn::native {
|
|||
attachment->GetLevelCount(), attachment);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateOrSetAttachmentSize(const TextureViewBase* attachment,
|
||||
MaybeError ValidateOrSetAttachmentSize(const TextureViewBase* attachment,
|
||||
uint32_t* width,
|
||||
uint32_t* height) {
|
||||
const Extent3D& attachmentSize =
|
||||
|
@ -138,17 +134,16 @@ namespace dawn::native {
|
|||
*height = attachmentSize.height;
|
||||
DAWN_ASSERT(*width != 0 && *height != 0);
|
||||
} else {
|
||||
DAWN_INVALID_IF(
|
||||
*width != attachmentSize.width || *height != attachmentSize.height,
|
||||
DAWN_INVALID_IF(*width != attachmentSize.width || *height != attachmentSize.height,
|
||||
"Attachment %s size (width: %u, height: %u) does not match the size of the "
|
||||
"other attachments (width: %u, height: %u).",
|
||||
attachment, attachmentSize.width, attachmentSize.height, *width, *height);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateOrSetColorAttachmentSampleCount(const TextureViewBase* colorAttachment,
|
||||
MaybeError ValidateOrSetColorAttachmentSampleCount(const TextureViewBase* colorAttachment,
|
||||
uint32_t* sampleCount) {
|
||||
if (*sampleCount == 0) {
|
||||
*sampleCount = colorAttachment->GetTexture()->GetSampleCount();
|
||||
|
@ -162,9 +157,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateResolveTarget(const DeviceBase* device,
|
||||
MaybeError ValidateResolveTarget(const DeviceBase* device,
|
||||
const RenderPassColorAttachment& colorAttachment,
|
||||
UsageValidationMode usageValidationMode) {
|
||||
if (colorAttachment.resolveTarget == nullptr) {
|
||||
|
@ -177,8 +172,7 @@ namespace dawn::native {
|
|||
DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(),
|
||||
wgpu::TextureUsage::RenderAttachment, usageValidationMode));
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
!attachment->GetTexture()->IsMultisampledTexture(),
|
||||
DAWN_INVALID_IF(!attachment->GetTexture()->IsMultisampledTexture(),
|
||||
"Cannot set %s as a resolve target when the color attachment %s has a sample "
|
||||
"count of 1.",
|
||||
resolveTarget, attachment);
|
||||
|
@ -198,10 +192,8 @@ namespace dawn::native {
|
|||
const Extent3D& colorTextureSize =
|
||||
attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
|
||||
const Extent3D& resolveTextureSize =
|
||||
resolveTarget->GetTexture()->GetMipLevelVirtualSize(
|
||||
resolveTarget->GetBaseMipLevel());
|
||||
DAWN_INVALID_IF(
|
||||
colorTextureSize.width != resolveTextureSize.width ||
|
||||
resolveTarget->GetTexture()->GetMipLevelVirtualSize(resolveTarget->GetBaseMipLevel());
|
||||
DAWN_INVALID_IF(colorTextureSize.width != resolveTextureSize.width ||
|
||||
colorTextureSize.height != resolveTextureSize.height,
|
||||
"The Resolve target %s size (width: %u, height: %u) does not match the color "
|
||||
"attachment %s size (width: %u, height: %u).",
|
||||
|
@ -220,10 +212,9 @@ namespace dawn::native {
|
|||
resolveTarget, resolveTargetFormat);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateRenderPassColorAttachment(
|
||||
DeviceBase* device,
|
||||
MaybeError ValidateRenderPassColorAttachment(DeviceBase* device,
|
||||
const RenderPassColorAttachment& colorAttachment,
|
||||
uint32_t* width,
|
||||
uint32_t* height,
|
||||
|
@ -234,20 +225,18 @@ namespace dawn::native {
|
|||
return {};
|
||||
}
|
||||
DAWN_TRY(device->ValidateObject(attachment));
|
||||
DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(),
|
||||
wgpu::TextureUsage::RenderAttachment, usageValidationMode));
|
||||
DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment,
|
||||
usageValidationMode));
|
||||
|
||||
DAWN_INVALID_IF(!(attachment->GetAspects() & Aspect::Color) ||
|
||||
!attachment->GetFormat().isRenderable,
|
||||
"The color attachment %s format (%s) is not color renderable.",
|
||||
attachment, attachment->GetFormat().format);
|
||||
DAWN_INVALID_IF(
|
||||
!(attachment->GetAspects() & Aspect::Color) || !attachment->GetFormat().isRenderable,
|
||||
"The color attachment %s format (%s) is not color renderable.", attachment,
|
||||
attachment->GetFormat().format);
|
||||
|
||||
DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp));
|
||||
DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp));
|
||||
DAWN_INVALID_IF(colorAttachment.loadOp == wgpu::LoadOp::Undefined,
|
||||
"loadOp must be set.");
|
||||
DAWN_INVALID_IF(colorAttachment.storeOp == wgpu::StoreOp::Undefined,
|
||||
"storeOp must be set.");
|
||||
DAWN_INVALID_IF(colorAttachment.loadOp == wgpu::LoadOp::Undefined, "loadOp must be set.");
|
||||
DAWN_INVALID_IF(colorAttachment.storeOp == wgpu::StoreOp::Undefined, "storeOp must be set.");
|
||||
|
||||
// TODO(dawn:1269): Remove after the deprecation period.
|
||||
bool useClearColor = HasDeprecatedColor(colorAttachment);
|
||||
|
@ -272,9 +261,9 @@ namespace dawn::native {
|
|||
DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateRenderPassDepthStencilAttachment(
|
||||
MaybeError ValidateRenderPassDepthStencilAttachment(
|
||||
DeviceBase* device,
|
||||
const RenderPassDepthStencilAttachment* depthStencilAttachment,
|
||||
uint32_t* width,
|
||||
|
@ -285,27 +274,24 @@ namespace dawn::native {
|
|||
|
||||
TextureViewBase* attachment = depthStencilAttachment->view;
|
||||
DAWN_TRY(device->ValidateObject(attachment));
|
||||
DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(),
|
||||
wgpu::TextureUsage::RenderAttachment, usageValidationMode));
|
||||
DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment,
|
||||
usageValidationMode));
|
||||
|
||||
const Format& format = attachment->GetFormat();
|
||||
DAWN_INVALID_IF(
|
||||
!format.HasDepthOrStencil(),
|
||||
DAWN_INVALID_IF(!format.HasDepthOrStencil(),
|
||||
"The depth stencil attachment %s format (%s) is not a depth stencil format.",
|
||||
attachment, format.format);
|
||||
|
||||
DAWN_INVALID_IF(!format.isRenderable,
|
||||
"The depth stencil attachment %s format (%s) is not renderable.",
|
||||
attachment, format.format);
|
||||
"The depth stencil attachment %s format (%s) is not renderable.", attachment,
|
||||
format.format);
|
||||
|
||||
DAWN_INVALID_IF(attachment->GetAspects() != format.aspects,
|
||||
"The depth stencil attachment %s must encompass all aspects.",
|
||||
attachment);
|
||||
"The depth stencil attachment %s must encompass all aspects.", attachment);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) &&
|
||||
depthStencilAttachment->depthReadOnly !=
|
||||
depthStencilAttachment->stencilReadOnly,
|
||||
depthStencilAttachment->depthReadOnly != depthStencilAttachment->stencilReadOnly,
|
||||
"depthReadOnly (%u) and stencilReadOnly (%u) must be the same when texture aspect "
|
||||
"is 'all'.",
|
||||
depthStencilAttachment->depthReadOnly, depthStencilAttachment->stencilReadOnly);
|
||||
|
@ -326,8 +312,7 @@ namespace dawn::native {
|
|||
"no depth aspect or depthReadOnly (%u) is true.",
|
||||
depthStencilAttachment->depthLoadOp, attachment,
|
||||
depthStencilAttachment->depthReadOnly);
|
||||
DAWN_INVALID_IF(
|
||||
depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Undefined,
|
||||
DAWN_INVALID_IF(depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Undefined,
|
||||
"depthStoreOp (%s) must not be set if the attachment (%s) has no depth "
|
||||
"aspect or depthReadOnly (%u) is true.",
|
||||
depthStencilAttachment->depthStoreOp, attachment,
|
||||
|
@ -372,15 +357,13 @@ namespace dawn::native {
|
|||
}
|
||||
} else {
|
||||
DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
|
||||
DAWN_INVALID_IF(
|
||||
depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Undefined,
|
||||
DAWN_INVALID_IF(depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Undefined,
|
||||
"stencilLoadOp (%s) must be set if the attachment (%s) has a stencil "
|
||||
"aspect and stencilReadOnly (%u) is false.",
|
||||
depthStencilAttachment->stencilLoadOp, attachment,
|
||||
depthStencilAttachment->stencilReadOnly);
|
||||
DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
|
||||
DAWN_INVALID_IF(
|
||||
depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Undefined,
|
||||
DAWN_INVALID_IF(depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Undefined,
|
||||
"stencilStoreOp (%s) must be set if the attachment (%s) has a stencil "
|
||||
"aspect and stencilReadOnly (%u) is false.",
|
||||
depthStencilAttachment->stencilStoreOp, attachment,
|
||||
|
@ -389,8 +372,7 @@ namespace dawn::native {
|
|||
|
||||
if (!std::isnan(depthStencilAttachment->clearDepth)) {
|
||||
// TODO(dawn:1269): Remove this branch after the deprecation period.
|
||||
device->EmitDeprecationWarning(
|
||||
"clearDepth is deprecated, prefer depthClearValue instead.");
|
||||
device->EmitDeprecationWarning("clearDepth is deprecated, prefer depthClearValue instead.");
|
||||
} else {
|
||||
DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
|
||||
std::isnan(depthStencilAttachment->depthClearValue),
|
||||
|
@ -421,9 +403,9 @@ namespace dawn::native {
|
|||
DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateRenderPassDescriptor(DeviceBase* device,
|
||||
MaybeError ValidateRenderPassDescriptor(DeviceBase* device,
|
||||
const RenderPassDescriptor* descriptor,
|
||||
uint32_t* width,
|
||||
uint32_t* height,
|
||||
|
@ -436,9 +418,9 @@ namespace dawn::native {
|
|||
|
||||
bool isAllColorAttachmentNull = true;
|
||||
for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) {
|
||||
DAWN_TRY_CONTEXT(ValidateRenderPassColorAttachment(
|
||||
device, descriptor->colorAttachments[i], width, height,
|
||||
sampleCount, usageValidationMode),
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateRenderPassColorAttachment(device, descriptor->colorAttachments[i], width,
|
||||
height, sampleCount, usageValidationMode),
|
||||
"validating colorAttachments[%u].", i);
|
||||
if (descriptor->colorAttachments[i].view) {
|
||||
isAllColorAttachmentNull = false;
|
||||
|
@ -447,8 +429,8 @@ namespace dawn::native {
|
|||
|
||||
if (descriptor->depthStencilAttachment != nullptr) {
|
||||
DAWN_TRY_CONTEXT(ValidateRenderPassDepthStencilAttachment(
|
||||
device, descriptor->depthStencilAttachment, width, height,
|
||||
sampleCount, usageValidationMode),
|
||||
device, descriptor->depthStencilAttachment, width, height, sampleCount,
|
||||
usageValidationMode),
|
||||
"validating depthStencilAttachment.");
|
||||
} else {
|
||||
DAWN_INVALID_IF(
|
||||
|
@ -459,9 +441,9 @@ namespace dawn::native {
|
|||
if (descriptor->occlusionQuerySet != nullptr) {
|
||||
DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet));
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
|
||||
"The occlusionQuerySet %s type (%s) is not %s.", descriptor->occlusionQuerySet,
|
||||
DAWN_INVALID_IF(descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
|
||||
"The occlusionQuerySet %s type (%s) is not %s.",
|
||||
descriptor->occlusionQuerySet,
|
||||
descriptor->occlusionQuerySet->GetQueryType(), wgpu::QueryType::Occlusion);
|
||||
}
|
||||
|
||||
|
@ -479,10 +461,9 @@ namespace dawn::native {
|
|||
DAWN_ASSERT(querySet != nullptr);
|
||||
uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
|
||||
DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, querySet, queryIndex),
|
||||
"validating querySet and queryIndex of timestampWrites[%u].",
|
||||
i);
|
||||
DAWN_TRY_CONTEXT(ValidateRenderPassTimestampLocation(
|
||||
descriptor->timestampWrites[i].location),
|
||||
"validating querySet and queryIndex of timestampWrites[%u].", i);
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateRenderPassTimestampLocation(descriptor->timestampWrites[i].location),
|
||||
"validating location of timestampWrites[%u].", i);
|
||||
|
||||
auto checkIt = usedQueries.find(querySet);
|
||||
|
@ -497,14 +478,14 @@ namespace dawn::native {
|
|||
}
|
||||
}
|
||||
|
||||
DAWN_INVALID_IF(descriptor->colorAttachmentCount == 0 &&
|
||||
descriptor->depthStencilAttachment == nullptr,
|
||||
DAWN_INVALID_IF(
|
||||
descriptor->colorAttachmentCount == 0 && descriptor->depthStencilAttachment == nullptr,
|
||||
"Render pass has no attachments.");
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateComputePassDescriptor(const DeviceBase* device,
|
||||
MaybeError ValidateComputePassDescriptor(const DeviceBase* device,
|
||||
const ComputePassDescriptor* descriptor) {
|
||||
if (descriptor == nullptr) {
|
||||
return {};
|
||||
|
@ -515,27 +496,26 @@ namespace dawn::native {
|
|||
|
||||
for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
|
||||
DAWN_ASSERT(descriptor->timestampWrites[i].querySet != nullptr);
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateTimestampQuery(device, descriptor->timestampWrites[i].querySet,
|
||||
DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, descriptor->timestampWrites[i].querySet,
|
||||
descriptor->timestampWrites[i].queryIndex),
|
||||
"validating querySet and queryIndex of timestampWrites[%u].", i);
|
||||
DAWN_TRY_CONTEXT(ValidateComputePassTimestampLocation(
|
||||
descriptor->timestampWrites[i].location),
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateComputePassTimestampLocation(descriptor->timestampWrites[i].location),
|
||||
"validating location of timestampWrites[%u].", i);
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateQuerySetResolve(const QuerySetBase* querySet,
|
||||
MaybeError ValidateQuerySetResolve(const QuerySetBase* querySet,
|
||||
uint32_t firstQuery,
|
||||
uint32_t queryCount,
|
||||
const BufferBase* destination,
|
||||
uint64_t destinationOffset) {
|
||||
DAWN_INVALID_IF(firstQuery >= querySet->GetQueryCount(),
|
||||
"First query (%u) exceeds the number of queries (%u) in %s.",
|
||||
firstQuery, querySet->GetQueryCount(), querySet);
|
||||
"First query (%u) exceeds the number of queries (%u) in %s.", firstQuery,
|
||||
querySet->GetQueryCount(), querySet);
|
||||
|
||||
DAWN_INVALID_IF(
|
||||
queryCount > querySet->GetQueryCount() - firstQuery,
|
||||
|
@ -544,25 +524,25 @@ namespace dawn::native {
|
|||
firstQuery, queryCount, querySet->GetQueryCount(), querySet);
|
||||
|
||||
DAWN_INVALID_IF(destinationOffset % 256 != 0,
|
||||
"The destination buffer %s offset (%u) is not a multiple of 256.",
|
||||
destination, destinationOffset);
|
||||
"The destination buffer %s offset (%u) is not a multiple of 256.", destination,
|
||||
destinationOffset);
|
||||
|
||||
uint64_t bufferSize = destination->GetSize();
|
||||
// The destination buffer must have enough storage, from destination offset, to contain
|
||||
// the result of resolved queries
|
||||
bool fitsInBuffer = destinationOffset <= bufferSize &&
|
||||
(static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <=
|
||||
(bufferSize - destinationOffset));
|
||||
bool fitsInBuffer =
|
||||
destinationOffset <= bufferSize &&
|
||||
(static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <= (bufferSize - destinationOffset));
|
||||
DAWN_INVALID_IF(
|
||||
!fitsInBuffer,
|
||||
"The resolved %s data size (%u) would not fit in %s with size %u at the offset %u.",
|
||||
querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination,
|
||||
bufferSize, destinationOffset);
|
||||
querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination, bufferSize,
|
||||
destinationOffset);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError EncodeTimestampsToNanosecondsConversion(CommandEncoder* encoder,
|
||||
MaybeError EncodeTimestampsToNanosecondsConversion(CommandEncoder* encoder,
|
||||
QuerySetBase* querySet,
|
||||
uint32_t firstQuery,
|
||||
uint32_t queryCount,
|
||||
|
@ -583,8 +563,7 @@ namespace dawn::native {
|
|||
Ref<BufferBase> availabilityBuffer;
|
||||
DAWN_TRY_ASSIGN(availabilityBuffer, device->CreateBuffer(&availabilityDesc));
|
||||
|
||||
DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0,
|
||||
availability.data(),
|
||||
DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0, availability.data(),
|
||||
availability.size() * sizeof(uint32_t)));
|
||||
|
||||
// Timestamp params uniform buffer
|
||||
|
@ -597,14 +576,13 @@ namespace dawn::native {
|
|||
Ref<BufferBase> paramsBuffer;
|
||||
DAWN_TRY_ASSIGN(paramsBuffer, device->CreateBuffer(&parmsDesc));
|
||||
|
||||
DAWN_TRY(
|
||||
device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, ¶ms, sizeof(params)));
|
||||
DAWN_TRY(device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, ¶ms, sizeof(params)));
|
||||
|
||||
return EncodeConvertTimestampsToNanoseconds(
|
||||
encoder, destination, availabilityBuffer.Get(), paramsBuffer.Get());
|
||||
}
|
||||
return EncodeConvertTimestampsToNanoseconds(encoder, destination, availabilityBuffer.Get(),
|
||||
paramsBuffer.Get());
|
||||
}
|
||||
|
||||
bool IsReadOnlyDepthStencilAttachment(
|
||||
bool IsReadOnlyDepthStencilAttachment(
|
||||
const RenderPassDepthStencilAttachment* depthStencilAttachment) {
|
||||
DAWN_ASSERT(depthStencilAttachment != nullptr);
|
||||
Aspect aspects = depthStencilAttachment->view->GetAspects();
|
||||
|
@ -617,11 +595,11 @@ namespace dawn::native {
|
|||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace
|
||||
|
||||
MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
|
||||
MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
|
||||
const CommandEncoderDescriptor* descriptor) {
|
||||
DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
|
||||
wgpu::SType::DawnEncoderInternalUsageDescriptor));
|
||||
|
@ -633,20 +611,20 @@ namespace dawn::native {
|
|||
!device->APIHasFeature(wgpu::FeatureName::DawnInternalUsages),
|
||||
"%s is not available.", wgpu::FeatureName::DawnInternalUsages);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
Ref<CommandEncoder> CommandEncoder::Create(DeviceBase* device,
|
||||
// static
|
||||
Ref<CommandEncoder> CommandEncoder::Create(DeviceBase* device,
|
||||
const CommandEncoderDescriptor* descriptor) {
|
||||
return AcquireRef(new CommandEncoder(device, descriptor));
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
CommandEncoder* CommandEncoder::MakeError(DeviceBase* device) {
|
||||
// static
|
||||
CommandEncoder* CommandEncoder::MakeError(DeviceBase* device) {
|
||||
return new CommandEncoder(device, ObjectBase::kError);
|
||||
}
|
||||
}
|
||||
|
||||
CommandEncoder::CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor)
|
||||
CommandEncoder::CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor)
|
||||
: ApiObjectBase(device, descriptor->label), mEncodingContext(device, this) {
|
||||
TrackInDevice();
|
||||
|
||||
|
@ -658,38 +636,38 @@ namespace dawn::native {
|
|||
} else {
|
||||
mUsageValidationMode = UsageValidationMode::Default;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CommandEncoder::CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
CommandEncoder::CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag)
|
||||
: ApiObjectBase(device, tag),
|
||||
mEncodingContext(device, this),
|
||||
mUsageValidationMode(UsageValidationMode::Default) {
|
||||
mEncodingContext.HandleError(DAWN_FORMAT_VALIDATION_ERROR("%s is invalid.", this));
|
||||
}
|
||||
}
|
||||
|
||||
ObjectType CommandEncoder::GetType() const {
|
||||
ObjectType CommandEncoder::GetType() const {
|
||||
return ObjectType::CommandEncoder;
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::DestroyImpl() {
|
||||
void CommandEncoder::DestroyImpl() {
|
||||
mEncodingContext.Destroy();
|
||||
}
|
||||
}
|
||||
|
||||
CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
|
||||
CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
|
||||
return CommandBufferResourceUsage{
|
||||
mEncodingContext.AcquireRenderPassUsages(), mEncodingContext.AcquireComputePassUsages(),
|
||||
std::move(mTopLevelBuffers), std::move(mTopLevelTextures), std::move(mUsedQuerySets)};
|
||||
}
|
||||
}
|
||||
|
||||
CommandIterator CommandEncoder::AcquireCommands() {
|
||||
CommandIterator CommandEncoder::AcquireCommands() {
|
||||
return mEncodingContext.AcquireCommands();
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::TrackUsedQuerySet(QuerySetBase* querySet) {
|
||||
void CommandEncoder::TrackUsedQuerySet(QuerySetBase* querySet) {
|
||||
mUsedQuerySets.insert(querySet);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
|
||||
void CommandEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
|
||||
DAWN_ASSERT(querySet != nullptr);
|
||||
|
||||
if (GetDevice()->IsValidationEnabled()) {
|
||||
|
@ -698,17 +676,15 @@ namespace dawn::native {
|
|||
|
||||
// Set the query at queryIndex to available for resolving in query set.
|
||||
querySet->SetQueryAvailability(queryIndex, true);
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation of the API's command recording methods
|
||||
// Implementation of the API's command recording methods
|
||||
|
||||
ComputePassEncoder* CommandEncoder::APIBeginComputePass(
|
||||
const ComputePassDescriptor* descriptor) {
|
||||
ComputePassEncoder* CommandEncoder::APIBeginComputePass(const ComputePassDescriptor* descriptor) {
|
||||
return BeginComputePass(descriptor).Detach();
|
||||
}
|
||||
}
|
||||
|
||||
Ref<ComputePassEncoder> CommandEncoder::BeginComputePass(
|
||||
const ComputePassDescriptor* descriptor) {
|
||||
Ref<ComputePassEncoder> CommandEncoder::BeginComputePass(const ComputePassDescriptor* descriptor) {
|
||||
DeviceBase* device = GetDevice();
|
||||
|
||||
std::vector<TimestampWrite> timestampWritesAtBeginning;
|
||||
|
@ -763,13 +739,13 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return ComputePassEncoder::MakeError(device, this, &mEncodingContext);
|
||||
}
|
||||
}
|
||||
|
||||
RenderPassEncoder* CommandEncoder::APIBeginRenderPass(const RenderPassDescriptor* descriptor) {
|
||||
RenderPassEncoder* CommandEncoder::APIBeginRenderPass(const RenderPassDescriptor* descriptor) {
|
||||
return BeginRenderPass(descriptor).Detach();
|
||||
}
|
||||
}
|
||||
|
||||
Ref<RenderPassEncoder> CommandEncoder::BeginRenderPass(const RenderPassDescriptor* descriptor) {
|
||||
Ref<RenderPassEncoder> CommandEncoder::BeginRenderPass(const RenderPassDescriptor* descriptor) {
|
||||
DeviceBase* device = GetDevice();
|
||||
|
||||
RenderPassResourceUsageTracker usageTracker;
|
||||
|
@ -786,8 +762,8 @@ namespace dawn::native {
|
|||
[&](CommandAllocator* allocator) -> MaybeError {
|
||||
uint32_t sampleCount = 0;
|
||||
|
||||
DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height,
|
||||
&sampleCount, mUsageValidationMode));
|
||||
DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height, &sampleCount,
|
||||
mUsageValidationMode));
|
||||
|
||||
ASSERT(width > 0 && height > 0 && sampleCount > 0);
|
||||
|
||||
|
@ -928,9 +904,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return RenderPassEncoder::MakeError(device, this, &mEncodingContext);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::APICopyBufferToBuffer(BufferBase* source,
|
||||
void CommandEncoder::APICopyBufferToBuffer(BufferBase* source,
|
||||
uint64_t sourceOffset,
|
||||
BufferBase* destination,
|
||||
uint64_t destinationOffset,
|
||||
|
@ -947,8 +923,7 @@ namespace dawn::native {
|
|||
|
||||
DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(source, sourceOffset, size),
|
||||
"validating source %s copy size.", source);
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
|
||||
DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
|
||||
"validating destination %s copy size.", destination);
|
||||
DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
|
||||
|
||||
|
@ -973,9 +948,9 @@ namespace dawn::native {
|
|||
},
|
||||
"encoding %s.CopyBufferToBuffer(%s, %u, %s, %u, %u).", this, source, sourceOffset,
|
||||
destination, destinationOffset, size);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::APICopyBufferToTexture(const ImageCopyBuffer* source,
|
||||
void CommandEncoder::APICopyBufferToTexture(const ImageCopyBuffer* source,
|
||||
const ImageCopyTexture* destination,
|
||||
const Extent3D* copySize) {
|
||||
mEncodingContext.TryEncode(
|
||||
|
@ -987,8 +962,7 @@ namespace dawn::native {
|
|||
"validating source %s usage.", source->buffer);
|
||||
|
||||
DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
|
||||
DAWN_TRY_CONTEXT(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
|
||||
mUsageValidationMode),
|
||||
"validating destination %s usage.", destination->texture);
|
||||
DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
|
||||
|
@ -1031,11 +1005,11 @@ namespace dawn::native {
|
|||
|
||||
return {};
|
||||
},
|
||||
"encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer,
|
||||
destination->texture, copySize);
|
||||
}
|
||||
"encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer, destination->texture,
|
||||
copySize);
|
||||
}
|
||||
|
||||
void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
|
||||
void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
|
||||
const ImageCopyBuffer* destination,
|
||||
const Extent3D* copySize) {
|
||||
mEncodingContext.TryEncode(
|
||||
|
@ -1050,8 +1024,7 @@ namespace dawn::native {
|
|||
DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
|
||||
|
||||
DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
|
||||
DAWN_TRY_CONTEXT(
|
||||
ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
|
||||
DAWN_TRY_CONTEXT(ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
|
||||
"validating destination %s usage.", destination->buffer);
|
||||
|
||||
// We validate texture copy range before validating linear texture data,
|
||||
|
@ -1090,24 +1063,24 @@ namespace dawn::native {
|
|||
|
||||
return {};
|
||||
},
|
||||
"encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture,
|
||||
destination->buffer, copySize);
|
||||
}
|
||||
"encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture, destination->buffer,
|
||||
copySize);
|
||||
}
|
||||
|
||||
void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
|
||||
void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
|
||||
const ImageCopyTexture* destination,
|
||||
const Extent3D* copySize) {
|
||||
APICopyTextureToTextureHelper<false>(source, destination, copySize);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::APICopyTextureToTextureInternal(const ImageCopyTexture* source,
|
||||
void CommandEncoder::APICopyTextureToTextureInternal(const ImageCopyTexture* source,
|
||||
const ImageCopyTexture* destination,
|
||||
const Extent3D* copySize) {
|
||||
APICopyTextureToTextureHelper<true>(source, destination, copySize);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool Internal>
|
||||
void CommandEncoder::APICopyTextureToTextureHelper(const ImageCopyTexture* source,
|
||||
template <bool Internal>
|
||||
void CommandEncoder::APICopyTextureToTextureHelper(const ImageCopyTexture* source,
|
||||
const ImageCopyTexture* destination,
|
||||
const Extent3D* copySize) {
|
||||
mEncodingContext.TryEncode(
|
||||
|
@ -1165,9 +1138,9 @@ namespace dawn::native {
|
|||
},
|
||||
"encoding %s.CopyTextureToTexture(%s, %s, %s).", this, source->texture,
|
||||
destination->texture, copySize);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::APIClearBuffer(BufferBase* buffer, uint64_t offset, uint64_t size) {
|
||||
void CommandEncoder::APIClearBuffer(BufferBase* buffer, uint64_t offset, uint64_t size) {
|
||||
mEncodingContext.TryEncode(
|
||||
this,
|
||||
[&](CommandAllocator* allocator) -> MaybeError {
|
||||
|
@ -1176,8 +1149,8 @@ namespace dawn::native {
|
|||
|
||||
uint64_t bufferSize = buffer->GetSize();
|
||||
DAWN_INVALID_IF(offset > bufferSize,
|
||||
"Buffer offset (%u) is larger than the size (%u) of %s.",
|
||||
offset, bufferSize, buffer);
|
||||
"Buffer offset (%u) is larger than the size (%u) of %s.", offset,
|
||||
bufferSize, buffer);
|
||||
|
||||
uint64_t remainingSize = bufferSize - offset;
|
||||
if (size == wgpu::kWholeSize) {
|
||||
|
@ -1216,15 +1189,15 @@ namespace dawn::native {
|
|||
return {};
|
||||
},
|
||||
"encoding %s.ClearBuffer(%s, %u, %u).", this, buffer, offset, size);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::APIInjectValidationError(const char* message) {
|
||||
void CommandEncoder::APIInjectValidationError(const char* message) {
|
||||
if (mEncodingContext.CheckCurrentEncoder(this)) {
|
||||
mEncodingContext.HandleError(DAWN_VALIDATION_ERROR(message));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::APIInsertDebugMarker(const char* groupLabel) {
|
||||
void CommandEncoder::APIInsertDebugMarker(const char* groupLabel) {
|
||||
mEncodingContext.TryEncode(
|
||||
this,
|
||||
[&](CommandAllocator* allocator) -> MaybeError {
|
||||
|
@ -1238,15 +1211,14 @@ namespace dawn::native {
|
|||
return {};
|
||||
},
|
||||
"encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::APIPopDebugGroup() {
|
||||
void CommandEncoder::APIPopDebugGroup() {
|
||||
mEncodingContext.TryEncode(
|
||||
this,
|
||||
[&](CommandAllocator* allocator) -> MaybeError {
|
||||
if (GetDevice()->IsValidationEnabled()) {
|
||||
DAWN_INVALID_IF(
|
||||
mDebugGroupStackSize == 0,
|
||||
DAWN_INVALID_IF(mDebugGroupStackSize == 0,
|
||||
"PopDebugGroup called when no debug groups are currently pushed.");
|
||||
}
|
||||
allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
|
||||
|
@ -1256,9 +1228,9 @@ namespace dawn::native {
|
|||
return {};
|
||||
},
|
||||
"encoding %s.PopDebugGroup().", this);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::APIPushDebugGroup(const char* groupLabel) {
|
||||
void CommandEncoder::APIPushDebugGroup(const char* groupLabel) {
|
||||
mEncodingContext.TryEncode(
|
||||
this,
|
||||
[&](CommandAllocator* allocator) -> MaybeError {
|
||||
|
@ -1275,9 +1247,9 @@ namespace dawn::native {
|
|||
return {};
|
||||
},
|
||||
"encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::APIResolveQuerySet(QuerySetBase* querySet,
|
||||
void CommandEncoder::APIResolveQuerySet(QuerySetBase* querySet,
|
||||
uint32_t firstQuery,
|
||||
uint32_t queryCount,
|
||||
BufferBase* destination,
|
||||
|
@ -1315,11 +1287,11 @@ namespace dawn::native {
|
|||
|
||||
return {};
|
||||
},
|
||||
"encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery,
|
||||
queryCount, destination, destinationOffset);
|
||||
}
|
||||
"encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery, queryCount,
|
||||
destination, destinationOffset);
|
||||
}
|
||||
|
||||
void CommandEncoder::APIWriteBuffer(BufferBase* buffer,
|
||||
void CommandEncoder::APIWriteBuffer(BufferBase* buffer,
|
||||
uint64_t bufferOffset,
|
||||
const uint8_t* data,
|
||||
uint64_t size) {
|
||||
|
@ -1343,9 +1315,9 @@ namespace dawn::native {
|
|||
return {};
|
||||
},
|
||||
"encoding %s.WriteBuffer(%s, %u, ..., %u).", this, buffer, bufferOffset, size);
|
||||
}
|
||||
}
|
||||
|
||||
void CommandEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
|
||||
void CommandEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
|
||||
mEncodingContext.TryEncode(
|
||||
this,
|
||||
[&](CommandAllocator* allocator) -> MaybeError {
|
||||
|
@ -1363,18 +1335,18 @@ namespace dawn::native {
|
|||
return {};
|
||||
},
|
||||
"encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
|
||||
}
|
||||
}
|
||||
|
||||
CommandBufferBase* CommandEncoder::APIFinish(const CommandBufferDescriptor* descriptor) {
|
||||
CommandBufferBase* CommandEncoder::APIFinish(const CommandBufferDescriptor* descriptor) {
|
||||
Ref<CommandBufferBase> commandBuffer;
|
||||
if (GetDevice()->ConsumedError(Finish(descriptor), &commandBuffer)) {
|
||||
return CommandBufferBase::MakeError(GetDevice());
|
||||
}
|
||||
ASSERT(!IsError());
|
||||
return commandBuffer.Detach();
|
||||
}
|
||||
}
|
||||
|
||||
ResultOrError<Ref<CommandBufferBase>> CommandEncoder::Finish(
|
||||
ResultOrError<Ref<CommandBufferBase>> CommandEncoder::Finish(
|
||||
const CommandBufferDescriptor* descriptor) {
|
||||
DeviceBase* device = GetDevice();
|
||||
|
||||
|
@ -1394,10 +1366,10 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return device->CreateCommandBuffer(this, descriptor);
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation of the command buffer validation that can be precomputed before submit
|
||||
MaybeError CommandEncoder::ValidateFinish() const {
|
||||
// Implementation of the command buffer validation that can be precomputed before submit
|
||||
MaybeError CommandEncoder::ValidateFinish() const {
|
||||
TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "CommandEncoder::ValidateFinish");
|
||||
DAWN_TRY(GetDevice()->ValidateObject(this));
|
||||
|
||||
|
@ -1420,6 +1392,6 @@ namespace dawn::native {
|
|||
mDebugGroupStackSize);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -27,12 +27,12 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
enum class UsageValidationMode;
|
||||
enum class UsageValidationMode;
|
||||
|
||||
MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
|
||||
MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
|
||||
const CommandEncoderDescriptor* descriptor);
|
||||
|
||||
class CommandEncoder final : public ApiObjectBase {
|
||||
class CommandEncoder final : public ApiObjectBase {
|
||||
public:
|
||||
static Ref<CommandEncoder> Create(DeviceBase* device,
|
||||
const CommandEncoderDescriptor* descriptor);
|
||||
|
@ -116,7 +116,7 @@ namespace dawn::native {
|
|||
uint64_t mDebugGroupStackSize = 0;
|
||||
|
||||
UsageValidationMode mUsageValidationMode;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -32,8 +32,8 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
// Performs validation of the "synchronization scope" rules of WebGPU.
|
||||
MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
|
||||
// Performs validation of the "synchronization scope" rules of WebGPU.
|
||||
MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
|
||||
// Buffers can only be used as single-write or multiple read.
|
||||
for (size_t i = 0; i < scope.bufferUsages.size(); ++i) {
|
||||
const wgpu::BufferUsage usage = scope.bufferUsages[i];
|
||||
|
@ -64,9 +64,9 @@ namespace dawn::native {
|
|||
DAWN_TRY(std::move(error));
|
||||
}
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateTimestampQuery(const DeviceBase* device,
|
||||
MaybeError ValidateTimestampQuery(const DeviceBase* device,
|
||||
const QuerySetBase* querySet,
|
||||
uint32_t queryIndex) {
|
||||
DAWN_TRY(device->ValidateObject(querySet));
|
||||
|
@ -79,9 +79,9 @@ namespace dawn::native {
|
|||
querySet->GetQueryCount(), querySet);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateWriteBuffer(const DeviceBase* device,
|
||||
MaybeError ValidateWriteBuffer(const DeviceBase* device,
|
||||
const BufferBase* buffer,
|
||||
uint64_t bufferOffset,
|
||||
uint64_t size) {
|
||||
|
@ -100,16 +100,16 @@ namespace dawn::native {
|
|||
DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst));
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) {
|
||||
bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) {
|
||||
uint32_t maxStart = std::max(startA, startB);
|
||||
uint32_t minStart = std::min(startA, startB);
|
||||
return static_cast<uint64_t>(minStart) + static_cast<uint64_t>(length) >
|
||||
static_cast<uint64_t>(maxStart);
|
||||
}
|
||||
}
|
||||
|
||||
ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
|
||||
ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
|
||||
const Extent3D& copySize,
|
||||
uint32_t bytesPerRow,
|
||||
uint32_t rowsPerImage) {
|
||||
|
@ -154,23 +154,23 @@ namespace dawn::native {
|
|||
requiredBytesInCopy += bytesInLastImage;
|
||||
}
|
||||
return requiredBytesInCopy;
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
|
||||
MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
|
||||
uint64_t offset,
|
||||
uint64_t size) {
|
||||
uint64_t bufferSize = buffer->GetSize();
|
||||
bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
|
||||
DAWN_INVALID_IF(!fitsInBuffer,
|
||||
"Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset,
|
||||
size, buffer.Get(), bufferSize);
|
||||
"Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset, size,
|
||||
buffer.Get(), bufferSize);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
// Replace wgpu::kCopyStrideUndefined with real values, so backends don't have to think about
|
||||
// it.
|
||||
void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
|
||||
// Replace wgpu::kCopyStrideUndefined with real values, so backends don't have to think about
|
||||
// it.
|
||||
void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
|
||||
const TexelBlockInfo& blockInfo,
|
||||
const Extent3D& copyExtent) {
|
||||
ASSERT(layout != nullptr);
|
||||
|
@ -189,9 +189,9 @@ namespace dawn::native {
|
|||
ASSERT(copyExtent.depthOrArrayLayers <= 1);
|
||||
layout->rowsPerImage = heightInBlocks;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
|
||||
MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
|
||||
uint64_t byteSize,
|
||||
const TexelBlockInfo& blockInfo,
|
||||
const Extent3D& copyExtent) {
|
||||
|
@ -201,8 +201,7 @@ namespace dawn::native {
|
|||
// TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
|
||||
// validation message. Investigate ways to make it print as a more readable symbol.
|
||||
DAWN_INVALID_IF(
|
||||
copyExtent.depthOrArrayLayers > 1 &&
|
||||
(layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
|
||||
copyExtent.depthOrArrayLayers > 1 && (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
|
||||
layout.rowsPerImage == wgpu::kCopyStrideUndefined),
|
||||
"Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
|
||||
copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
|
||||
|
@ -214,8 +213,7 @@ namespace dawn::native {
|
|||
// Validation for other members in layout:
|
||||
ASSERT(copyExtent.width % blockInfo.width == 0);
|
||||
uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
|
||||
ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <=
|
||||
std::numeric_limits<uint32_t>::max());
|
||||
ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <= std::numeric_limits<uint32_t>::max());
|
||||
uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
|
||||
|
||||
// These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks,
|
||||
|
@ -225,18 +223,18 @@ namespace dawn::native {
|
|||
"The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow,
|
||||
layout.bytesPerRow);
|
||||
|
||||
DAWN_INVALID_IF(layout.rowsPerImage != wgpu::kCopyStrideUndefined &&
|
||||
heightInBlocks > layout.rowsPerImage,
|
||||
"The height of each image in blocks (%u) is > rowsPerImage (%u).",
|
||||
heightInBlocks, layout.rowsPerImage);
|
||||
DAWN_INVALID_IF(
|
||||
layout.rowsPerImage != wgpu::kCopyStrideUndefined && heightInBlocks > layout.rowsPerImage,
|
||||
"The height of each image in blocks (%u) is > rowsPerImage (%u).", heightInBlocks,
|
||||
layout.rowsPerImage);
|
||||
|
||||
// We compute required bytes in copy after validating texel block alignments
|
||||
// because the divisibility conditions are necessary for the algorithm to be valid,
|
||||
// also the bytesPerRow bound is necessary to avoid overflows.
|
||||
uint64_t requiredBytesInCopy;
|
||||
DAWN_TRY_ASSIGN(requiredBytesInCopy,
|
||||
ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow,
|
||||
layout.rowsPerImage));
|
||||
DAWN_TRY_ASSIGN(
|
||||
requiredBytesInCopy,
|
||||
ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow, layout.rowsPerImage));
|
||||
|
||||
bool fitsInData =
|
||||
layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
|
||||
|
@ -247,9 +245,9 @@ namespace dawn::native {
|
|||
requiredBytesInCopy, byteSize, layout.offset);
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
|
||||
MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
|
||||
const ImageCopyBuffer& imageCopyBuffer) {
|
||||
DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer));
|
||||
if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
|
||||
|
@ -259,9 +257,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateImageCopyTexture(DeviceBase const* device,
|
||||
MaybeError ValidateImageCopyTexture(DeviceBase const* device,
|
||||
const ImageCopyTexture& textureCopy,
|
||||
const Extent3D& copySize) {
|
||||
const TextureBase* texture = textureCopy.texture;
|
||||
|
@ -272,8 +270,7 @@ namespace dawn::native {
|
|||
textureCopy.mipLevel, texture->GetNumMipLevels(), texture);
|
||||
|
||||
DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
|
||||
DAWN_INVALID_IF(
|
||||
SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
|
||||
DAWN_INVALID_IF(SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
|
||||
"%s format (%s) does not have the selected aspect (%s).", texture,
|
||||
texture->GetFormat().format, textureCopy.aspect);
|
||||
|
||||
|
@ -287,14 +284,14 @@ namespace dawn::native {
|
|||
"Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
|
||||
"[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
|
||||
"format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
|
||||
&textureCopy.origin, ©Size, &subresourceSize, texture,
|
||||
texture->GetFormat().format, texture->GetSampleCount());
|
||||
&textureCopy.origin, ©Size, &subresourceSize, texture, texture->GetFormat().format,
|
||||
texture->GetSampleCount());
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateTextureCopyRange(DeviceBase const* device,
|
||||
MaybeError ValidateTextureCopyRange(DeviceBase const* device,
|
||||
const ImageCopyTexture& textureCopy,
|
||||
const Extent3D& copySize) {
|
||||
const TextureBase* texture = textureCopy.texture;
|
||||
|
@ -311,8 +308,7 @@ namespace dawn::native {
|
|||
DAWN_INVALID_IF(
|
||||
static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
|
||||
static_cast<uint64_t>(mipSize.width) ||
|
||||
static_cast<uint64_t>(textureCopy.origin.y) +
|
||||
static_cast<uint64_t>(copySize.height) >
|
||||
static_cast<uint64_t>(textureCopy.origin.y) + static_cast<uint64_t>(copySize.height) >
|
||||
static_cast<uint64_t>(mipSize.height) ||
|
||||
static_cast<uint64_t>(textureCopy.origin.z) +
|
||||
static_cast<uint64_t>(copySize.depthOrArrayLayers) >
|
||||
|
@ -340,19 +336,18 @@ namespace dawn::native {
|
|||
"copySize.width (%u) is not a multiple of compressed texture format block width "
|
||||
"(%u).",
|
||||
copySize.width, blockInfo.width);
|
||||
DAWN_INVALID_IF(
|
||||
copySize.height % blockInfo.height != 0,
|
||||
DAWN_INVALID_IF(copySize.height % blockInfo.height != 0,
|
||||
"copySize.height (%u) is not a multiple of compressed texture format block "
|
||||
"height (%u).",
|
||||
copySize.height, blockInfo.height);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
// Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar
|
||||
// formats).
|
||||
ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
|
||||
// Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar
|
||||
// formats).
|
||||
ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
|
||||
const Format& format = view.texture->GetFormat();
|
||||
switch (view.aspect) {
|
||||
case wgpu::TextureAspect::All: {
|
||||
|
@ -376,9 +371,9 @@ namespace dawn::native {
|
|||
break;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) {
|
||||
MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) {
|
||||
Aspect aspectUsed;
|
||||
DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst));
|
||||
|
||||
|
@ -388,15 +383,15 @@ namespace dawn::native {
|
|||
return {};
|
||||
default:
|
||||
DAWN_INVALID_IF(aspectUsed == Aspect::Depth,
|
||||
"Cannot copy into the depth aspect of %s with format %s.",
|
||||
dst.texture, format.format);
|
||||
"Cannot copy into the depth aspect of %s with format %s.", dst.texture,
|
||||
format.format);
|
||||
break;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
|
||||
MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
|
||||
const ImageCopyTexture& dst,
|
||||
const Extent3D& copySize) {
|
||||
const uint32_t srcSamples = src.texture->GetSampleCount();
|
||||
|
@ -427,14 +422,13 @@ namespace dawn::native {
|
|||
return DAWN_FORMAT_VALIDATION_ERROR("Copy is from %s to itself.", src.texture);
|
||||
|
||||
case wgpu::TextureDimension::e2D:
|
||||
DAWN_INVALID_IF(src.mipLevel == dst.mipLevel &&
|
||||
IsRangeOverlapped(src.origin.z, dst.origin.z,
|
||||
copySize.depthOrArrayLayers),
|
||||
DAWN_INVALID_IF(
|
||||
src.mipLevel == dst.mipLevel &&
|
||||
IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depthOrArrayLayers),
|
||||
"Copy source and destination are overlapping layer ranges "
|
||||
"([%u, %u) and [%u, %u)) of %s mip level %u",
|
||||
src.origin.z, src.origin.z + copySize.depthOrArrayLayers,
|
||||
dst.origin.z, dst.origin.z + copySize.depthOrArrayLayers,
|
||||
src.texture, src.mipLevel);
|
||||
src.origin.z, src.origin.z + copySize.depthOrArrayLayers, dst.origin.z,
|
||||
dst.origin.z + copySize.depthOrArrayLayers, src.texture, src.mipLevel);
|
||||
break;
|
||||
|
||||
case wgpu::TextureDimension::e3D:
|
||||
|
@ -446,23 +440,22 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
|
||||
MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
|
||||
const ImageCopyTexture& dst,
|
||||
const Extent3D& copySize) {
|
||||
// Metal requires texture-to-texture copies happens between texture formats that equal to
|
||||
// each other or only have diff on srgb-ness.
|
||||
DAWN_INVALID_IF(
|
||||
!src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
|
||||
DAWN_INVALID_IF(!src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
|
||||
"Source %s format (%s) and destination %s format (%s) are not copy compatible.",
|
||||
src.texture, src.texture->GetFormat().format, dst.texture,
|
||||
dst.texture->GetFormat().format);
|
||||
|
||||
return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateCanUseAs(const TextureBase* texture,
|
||||
MaybeError ValidateCanUseAs(const TextureBase* texture,
|
||||
wgpu::TextureUsage usage,
|
||||
UsageValidationMode mode) {
|
||||
ASSERT(wgpu::HasZeroOrOneBits(usage));
|
||||
|
@ -479,14 +472,13 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
|
||||
MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
|
||||
ASSERT(wgpu::HasZeroOrOneBits(usage));
|
||||
DAWN_INVALID_IF(!(buffer->GetUsageExternalOnly() & usage),
|
||||
"%s usage (%s) doesn't include %s.", buffer, buffer->GetUsageExternalOnly(),
|
||||
usage);
|
||||
DAWN_INVALID_IF(!(buffer->GetUsageExternalOnly() & usage), "%s usage (%s) doesn't include %s.",
|
||||
buffer, buffer->GetUsageExternalOnly(), usage);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -23,74 +23,74 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
class QuerySetBase;
|
||||
struct SyncScopeResourceUsage;
|
||||
struct TexelBlockInfo;
|
||||
class QuerySetBase;
|
||||
struct SyncScopeResourceUsage;
|
||||
struct TexelBlockInfo;
|
||||
|
||||
MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& usage);
|
||||
MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& usage);
|
||||
|
||||
MaybeError ValidateTimestampQuery(const DeviceBase* device,
|
||||
MaybeError ValidateTimestampQuery(const DeviceBase* device,
|
||||
const QuerySetBase* querySet,
|
||||
uint32_t queryIndex);
|
||||
|
||||
MaybeError ValidateWriteBuffer(const DeviceBase* device,
|
||||
MaybeError ValidateWriteBuffer(const DeviceBase* device,
|
||||
const BufferBase* buffer,
|
||||
uint64_t bufferOffset,
|
||||
uint64_t size);
|
||||
|
||||
template <typename A, typename B>
|
||||
DAWN_FORCE_INLINE uint64_t Safe32x32(A a, B b) {
|
||||
template <typename A, typename B>
|
||||
DAWN_FORCE_INLINE uint64_t Safe32x32(A a, B b) {
|
||||
static_assert(std::is_same<A, uint32_t>::value, "'a' must be uint32_t");
|
||||
static_assert(std::is_same<B, uint32_t>::value, "'b' must be uint32_t");
|
||||
return uint64_t(a) * uint64_t(b);
|
||||
}
|
||||
}
|
||||
|
||||
ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
|
||||
ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
|
||||
const Extent3D& copySize,
|
||||
uint32_t bytesPerRow,
|
||||
uint32_t rowsPerImage);
|
||||
|
||||
void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
|
||||
void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
|
||||
const TexelBlockInfo& blockInfo,
|
||||
const Extent3D& copyExtent);
|
||||
MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
|
||||
MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
|
||||
uint64_t byteSize,
|
||||
const TexelBlockInfo& blockInfo,
|
||||
const Extent3D& copyExtent);
|
||||
MaybeError ValidateTextureCopyRange(DeviceBase const* device,
|
||||
MaybeError ValidateTextureCopyRange(DeviceBase const* device,
|
||||
const ImageCopyTexture& imageCopyTexture,
|
||||
const Extent3D& copySize);
|
||||
ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view);
|
||||
MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst);
|
||||
ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view);
|
||||
MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst);
|
||||
|
||||
MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
|
||||
MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
|
||||
const ImageCopyBuffer& imageCopyBuffer);
|
||||
MaybeError ValidateImageCopyTexture(DeviceBase const* device,
|
||||
MaybeError ValidateImageCopyTexture(DeviceBase const* device,
|
||||
const ImageCopyTexture& imageCopyTexture,
|
||||
const Extent3D& copySize);
|
||||
|
||||
MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
|
||||
MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
|
||||
uint64_t offset,
|
||||
uint64_t size);
|
||||
|
||||
bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
|
||||
bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
|
||||
|
||||
MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
|
||||
MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
|
||||
const ImageCopyTexture& dst,
|
||||
const Extent3D& copySize);
|
||||
MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
|
||||
MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
|
||||
const ImageCopyTexture& dst,
|
||||
const Extent3D& copySize);
|
||||
|
||||
enum class UsageValidationMode {
|
||||
enum class UsageValidationMode {
|
||||
Default,
|
||||
Internal,
|
||||
};
|
||||
};
|
||||
|
||||
MaybeError ValidateCanUseAs(const TextureBase* texture,
|
||||
MaybeError ValidateCanUseAs(const TextureBase* texture,
|
||||
wgpu::TextureUsage usage,
|
||||
UsageValidationMode mode);
|
||||
MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage);
|
||||
MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage);
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
void FreeCommands(CommandIterator* commands) {
|
||||
void FreeCommands(CommandIterator* commands) {
|
||||
commands->Reset();
|
||||
|
||||
Command type;
|
||||
|
@ -62,8 +62,7 @@ namespace dawn::native {
|
|||
break;
|
||||
}
|
||||
case Command::CopyTextureToTexture: {
|
||||
CopyTextureToTextureCmd* copy =
|
||||
commands->NextCommand<CopyTextureToTextureCmd>();
|
||||
CopyTextureToTextureCmd* copy = commands->NextCommand<CopyTextureToTextureCmd>();
|
||||
copy->~CopyTextureToTextureCmd();
|
||||
break;
|
||||
}
|
||||
|
@ -211,9 +210,9 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
commands->MakeEmptyAsDataWasDestroyed();
|
||||
}
|
||||
}
|
||||
|
||||
void SkipCommand(CommandIterator* commands, Command type) {
|
||||
void SkipCommand(CommandIterator* commands, Command type) {
|
||||
switch (type) {
|
||||
case Command::BeginComputePass:
|
||||
commands->NextCommand<BeginComputePassCmd>();
|
||||
|
@ -360,6 +359,6 @@ namespace dawn::native {
|
|||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -29,11 +29,11 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
// Definition of the commands that are present in the CommandIterator given by the
|
||||
// CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header
|
||||
// dependencies: Ref<Object> needs Object to be defined.
|
||||
// Definition of the commands that are present in the CommandIterator given by the
|
||||
// CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header
|
||||
// dependencies: Ref<Object> needs Object to be defined.
|
||||
|
||||
enum class Command {
|
||||
enum class Command {
|
||||
BeginComputePass,
|
||||
BeginOcclusionQuery,
|
||||
BeginRenderPass,
|
||||
|
@ -67,31 +67,31 @@ namespace dawn::native {
|
|||
SetVertexBuffer,
|
||||
WriteBuffer,
|
||||
WriteTimestamp,
|
||||
};
|
||||
};
|
||||
|
||||
struct TimestampWrite {
|
||||
struct TimestampWrite {
|
||||
Ref<QuerySetBase> querySet;
|
||||
uint32_t queryIndex;
|
||||
};
|
||||
};
|
||||
|
||||
struct BeginComputePassCmd {
|
||||
struct BeginComputePassCmd {
|
||||
std::vector<TimestampWrite> timestampWrites;
|
||||
};
|
||||
};
|
||||
|
||||
struct BeginOcclusionQueryCmd {
|
||||
struct BeginOcclusionQueryCmd {
|
||||
Ref<QuerySetBase> querySet;
|
||||
uint32_t queryIndex;
|
||||
};
|
||||
};
|
||||
|
||||
struct RenderPassColorAttachmentInfo {
|
||||
struct RenderPassColorAttachmentInfo {
|
||||
Ref<TextureViewBase> view;
|
||||
Ref<TextureViewBase> resolveTarget;
|
||||
wgpu::LoadOp loadOp;
|
||||
wgpu::StoreOp storeOp;
|
||||
dawn::native::Color clearColor;
|
||||
};
|
||||
};
|
||||
|
||||
struct RenderPassDepthStencilAttachmentInfo {
|
||||
struct RenderPassDepthStencilAttachmentInfo {
|
||||
Ref<TextureViewBase> view;
|
||||
wgpu::LoadOp depthLoadOp;
|
||||
wgpu::StoreOp depthStoreOp;
|
||||
|
@ -101,9 +101,9 @@ namespace dawn::native {
|
|||
uint32_t clearStencil;
|
||||
bool depthReadOnly;
|
||||
bool stencilReadOnly;
|
||||
};
|
||||
};
|
||||
|
||||
struct BeginRenderPassCmd {
|
||||
struct BeginRenderPassCmd {
|
||||
Ref<AttachmentState> attachmentState;
|
||||
ityp::array<ColorAttachmentIndex, RenderPassColorAttachmentInfo, kMaxColorAttachments>
|
||||
colorAttachments;
|
||||
|
@ -115,185 +115,185 @@ namespace dawn::native {
|
|||
|
||||
Ref<QuerySetBase> occlusionQuerySet;
|
||||
std::vector<TimestampWrite> timestampWrites;
|
||||
};
|
||||
};
|
||||
|
||||
struct BufferCopy {
|
||||
struct BufferCopy {
|
||||
Ref<BufferBase> buffer;
|
||||
uint64_t offset;
|
||||
uint32_t bytesPerRow;
|
||||
uint32_t rowsPerImage;
|
||||
};
|
||||
};
|
||||
|
||||
struct TextureCopy {
|
||||
struct TextureCopy {
|
||||
Ref<TextureBase> texture;
|
||||
uint32_t mipLevel;
|
||||
Origin3D origin; // Texels / array layer
|
||||
Aspect aspect;
|
||||
};
|
||||
};
|
||||
|
||||
struct CopyBufferToBufferCmd {
|
||||
struct CopyBufferToBufferCmd {
|
||||
Ref<BufferBase> source;
|
||||
uint64_t sourceOffset;
|
||||
Ref<BufferBase> destination;
|
||||
uint64_t destinationOffset;
|
||||
uint64_t size;
|
||||
};
|
||||
};
|
||||
|
||||
struct CopyBufferToTextureCmd {
|
||||
struct CopyBufferToTextureCmd {
|
||||
BufferCopy source;
|
||||
TextureCopy destination;
|
||||
Extent3D copySize; // Texels
|
||||
};
|
||||
};
|
||||
|
||||
struct CopyTextureToBufferCmd {
|
||||
struct CopyTextureToBufferCmd {
|
||||
TextureCopy source;
|
||||
BufferCopy destination;
|
||||
Extent3D copySize; // Texels
|
||||
};
|
||||
};
|
||||
|
||||
struct CopyTextureToTextureCmd {
|
||||
struct CopyTextureToTextureCmd {
|
||||
TextureCopy source;
|
||||
TextureCopy destination;
|
||||
Extent3D copySize; // Texels
|
||||
};
|
||||
};
|
||||
|
||||
struct DispatchCmd {
|
||||
struct DispatchCmd {
|
||||
uint32_t x;
|
||||
uint32_t y;
|
||||
uint32_t z;
|
||||
};
|
||||
};
|
||||
|
||||
struct DispatchIndirectCmd {
|
||||
struct DispatchIndirectCmd {
|
||||
Ref<BufferBase> indirectBuffer;
|
||||
uint64_t indirectOffset;
|
||||
};
|
||||
};
|
||||
|
||||
struct DrawCmd {
|
||||
struct DrawCmd {
|
||||
uint32_t vertexCount;
|
||||
uint32_t instanceCount;
|
||||
uint32_t firstVertex;
|
||||
uint32_t firstInstance;
|
||||
};
|
||||
};
|
||||
|
||||
struct DrawIndexedCmd {
|
||||
struct DrawIndexedCmd {
|
||||
uint32_t indexCount;
|
||||
uint32_t instanceCount;
|
||||
uint32_t firstIndex;
|
||||
int32_t baseVertex;
|
||||
uint32_t firstInstance;
|
||||
};
|
||||
};
|
||||
|
||||
struct DrawIndirectCmd {
|
||||
struct DrawIndirectCmd {
|
||||
Ref<BufferBase> indirectBuffer;
|
||||
uint64_t indirectOffset;
|
||||
};
|
||||
};
|
||||
|
||||
struct DrawIndexedIndirectCmd : DrawIndirectCmd {};
|
||||
struct DrawIndexedIndirectCmd : DrawIndirectCmd {};
|
||||
|
||||
struct EndComputePassCmd {
|
||||
struct EndComputePassCmd {
|
||||
std::vector<TimestampWrite> timestampWrites;
|
||||
};
|
||||
};
|
||||
|
||||
struct EndOcclusionQueryCmd {
|
||||
struct EndOcclusionQueryCmd {
|
||||
Ref<QuerySetBase> querySet;
|
||||
uint32_t queryIndex;
|
||||
};
|
||||
};
|
||||
|
||||
struct EndRenderPassCmd {
|
||||
struct EndRenderPassCmd {
|
||||
std::vector<TimestampWrite> timestampWrites;
|
||||
};
|
||||
};
|
||||
|
||||
struct ExecuteBundlesCmd {
|
||||
struct ExecuteBundlesCmd {
|
||||
uint32_t count;
|
||||
};
|
||||
};
|
||||
|
||||
struct ClearBufferCmd {
|
||||
struct ClearBufferCmd {
|
||||
Ref<BufferBase> buffer;
|
||||
uint64_t offset;
|
||||
uint64_t size;
|
||||
};
|
||||
};
|
||||
|
||||
struct InsertDebugMarkerCmd {
|
||||
struct InsertDebugMarkerCmd {
|
||||
uint32_t length;
|
||||
};
|
||||
};
|
||||
|
||||
struct PopDebugGroupCmd {};
|
||||
struct PopDebugGroupCmd {};
|
||||
|
||||
struct PushDebugGroupCmd {
|
||||
struct PushDebugGroupCmd {
|
||||
uint32_t length;
|
||||
};
|
||||
};
|
||||
|
||||
struct ResolveQuerySetCmd {
|
||||
struct ResolveQuerySetCmd {
|
||||
Ref<QuerySetBase> querySet;
|
||||
uint32_t firstQuery;
|
||||
uint32_t queryCount;
|
||||
Ref<BufferBase> destination;
|
||||
uint64_t destinationOffset;
|
||||
};
|
||||
};
|
||||
|
||||
struct SetComputePipelineCmd {
|
||||
struct SetComputePipelineCmd {
|
||||
Ref<ComputePipelineBase> pipeline;
|
||||
};
|
||||
};
|
||||
|
||||
struct SetRenderPipelineCmd {
|
||||
struct SetRenderPipelineCmd {
|
||||
Ref<RenderPipelineBase> pipeline;
|
||||
};
|
||||
};
|
||||
|
||||
struct SetStencilReferenceCmd {
|
||||
struct SetStencilReferenceCmd {
|
||||
uint32_t reference;
|
||||
};
|
||||
};
|
||||
|
||||
struct SetViewportCmd {
|
||||
struct SetViewportCmd {
|
||||
float x, y, width, height, minDepth, maxDepth;
|
||||
};
|
||||
};
|
||||
|
||||
struct SetScissorRectCmd {
|
||||
struct SetScissorRectCmd {
|
||||
uint32_t x, y, width, height;
|
||||
};
|
||||
};
|
||||
|
||||
struct SetBlendConstantCmd {
|
||||
struct SetBlendConstantCmd {
|
||||
Color color;
|
||||
};
|
||||
};
|
||||
|
||||
struct SetBindGroupCmd {
|
||||
struct SetBindGroupCmd {
|
||||
BindGroupIndex index;
|
||||
Ref<BindGroupBase> group;
|
||||
uint32_t dynamicOffsetCount;
|
||||
};
|
||||
};
|
||||
|
||||
struct SetIndexBufferCmd {
|
||||
struct SetIndexBufferCmd {
|
||||
Ref<BufferBase> buffer;
|
||||
wgpu::IndexFormat format;
|
||||
uint64_t offset;
|
||||
uint64_t size;
|
||||
};
|
||||
};
|
||||
|
||||
struct SetVertexBufferCmd {
|
||||
struct SetVertexBufferCmd {
|
||||
VertexBufferSlot slot;
|
||||
Ref<BufferBase> buffer;
|
||||
uint64_t offset;
|
||||
uint64_t size;
|
||||
};
|
||||
};
|
||||
|
||||
struct WriteBufferCmd {
|
||||
struct WriteBufferCmd {
|
||||
Ref<BufferBase> buffer;
|
||||
uint64_t offset;
|
||||
uint64_t size;
|
||||
};
|
||||
};
|
||||
|
||||
struct WriteTimestampCmd {
|
||||
struct WriteTimestampCmd {
|
||||
Ref<QuerySetBase> querySet;
|
||||
uint32_t queryIndex;
|
||||
};
|
||||
};
|
||||
|
||||
// This needs to be called before the CommandIterator is freed so that the Ref<> present in
|
||||
// the commands have a chance to run their destructor and remove internal references.
|
||||
class CommandIterator;
|
||||
void FreeCommands(CommandIterator* commands);
|
||||
// This needs to be called before the CommandIterator is freed so that the Ref<> present in
|
||||
// the commands have a chance to run their destructor and remove internal references.
|
||||
class CommandIterator;
|
||||
void FreeCommands(CommandIterator* commands);
|
||||
|
||||
// Helper function to allow skipping over a command when it is unimplemented, while still
|
||||
// consuming the correct amount of data from the command iterator.
|
||||
void SkipCommand(CommandIterator* commands, Command type);
|
||||
// Helper function to allow skipping over a command when it is unimplemented, while still
|
||||
// consuming the correct amount of data from the command iterator.
|
||||
void SkipCommand(CommandIterator* commands, Command type);
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
|
@ -21,9 +21,9 @@
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
namespace {
|
||||
namespace {
|
||||
|
||||
WGPUCompilationMessageType tintSeverityToMessageType(tint::diag::Severity severity) {
|
||||
WGPUCompilationMessageType tintSeverityToMessageType(tint::diag::Severity severity) {
|
||||
switch (severity) {
|
||||
case tint::diag::Severity::Note:
|
||||
return WGPUCompilationMessageType_Info;
|
||||
|
@ -32,17 +32,17 @@ namespace dawn::native {
|
|||
default:
|
||||
return WGPUCompilationMessageType_Error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
} // anonymous namespace
|
||||
|
||||
OwnedCompilationMessages::OwnedCompilationMessages() {
|
||||
OwnedCompilationMessages::OwnedCompilationMessages() {
|
||||
mCompilationInfo.nextInChain = 0;
|
||||
mCompilationInfo.messageCount = 0;
|
||||
mCompilationInfo.messages = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void OwnedCompilationMessages::AddMessageForTesting(std::string message,
|
||||
void OwnedCompilationMessages::AddMessageForTesting(std::string message,
|
||||
wgpu::CompilationMessageType type,
|
||||
uint64_t lineNum,
|
||||
uint64_t linePos,
|
||||
|
@ -52,11 +52,11 @@ namespace dawn::native {
|
|||
ASSERT(mCompilationInfo.messages == nullptr);
|
||||
|
||||
mMessageStrings.push_back(message);
|
||||
mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type),
|
||||
lineNum, linePos, offset, length});
|
||||
}
|
||||
mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type), lineNum,
|
||||
linePos, offset, length});
|
||||
}
|
||||
|
||||
void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
|
||||
void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
|
||||
// Cannot add messages after GetCompilationInfo has been called.
|
||||
ASSERT(mCompilationInfo.messages == nullptr);
|
||||
|
||||
|
@ -114,11 +114,11 @@ namespace dawn::native {
|
|||
mMessageStrings.push_back(diagnostic.message);
|
||||
}
|
||||
|
||||
mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity),
|
||||
lineNum, linePos, offset, length});
|
||||
}
|
||||
mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity), lineNum,
|
||||
linePos, offset, length});
|
||||
}
|
||||
|
||||
void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
|
||||
void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
|
||||
// Cannot add messages after GetCompilationInfo has been called.
|
||||
ASSERT(mCompilationInfo.messages == nullptr);
|
||||
|
||||
|
@ -127,17 +127,17 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
AddFormattedTintMessages(diagnostics);
|
||||
}
|
||||
}
|
||||
|
||||
void OwnedCompilationMessages::ClearMessages() {
|
||||
void OwnedCompilationMessages::ClearMessages() {
|
||||
// Cannot clear messages after GetCompilationInfo has been called.
|
||||
ASSERT(mCompilationInfo.messages == nullptr);
|
||||
|
||||
mMessageStrings.clear();
|
||||
mMessages.clear();
|
||||
}
|
||||
}
|
||||
|
||||
const WGPUCompilationInfo* OwnedCompilationMessages::GetCompilationInfo() {
|
||||
const WGPUCompilationInfo* OwnedCompilationMessages::GetCompilationInfo() {
|
||||
mCompilationInfo.messageCount = mMessages.size();
|
||||
mCompilationInfo.messages = mMessages.data();
|
||||
|
||||
|
@ -150,13 +150,13 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
return &mCompilationInfo;
|
||||
}
|
||||
}
|
||||
|
||||
const std::vector<std::string>& OwnedCompilationMessages::GetFormattedTintMessages() {
|
||||
const std::vector<std::string>& OwnedCompilationMessages::GetFormattedTintMessages() {
|
||||
return mFormattedTintMessages;
|
||||
}
|
||||
}
|
||||
|
||||
void OwnedCompilationMessages::AddFormattedTintMessages(const tint::diag::List& diagnostics) {
|
||||
void OwnedCompilationMessages::AddFormattedTintMessages(const tint::diag::List& diagnostics) {
|
||||
tint::diag::List messageList;
|
||||
size_t warningCount = 0;
|
||||
size_t errorCount = 0;
|
||||
|
@ -196,6 +196,6 @@ namespace dawn::native {
|
|||
t << "generated while compiling the shader:" << std::endl
|
||||
<< tint::diag::Formatter{style}.format(messageList);
|
||||
mFormattedTintMessages.push_back(t.str());
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -23,13 +23,13 @@
|
|||
#include "dawn/common/NonCopyable.h"
|
||||
|
||||
namespace tint::diag {
|
||||
class Diagnostic;
|
||||
class List;
|
||||
class Diagnostic;
|
||||
class List;
|
||||
} // namespace tint::diag
|
||||
|
||||
namespace dawn::native {
|
||||
|
||||
class OwnedCompilationMessages : public NonCopyable {
|
||||
class OwnedCompilationMessages : public NonCopyable {
|
||||
public:
|
||||
OwnedCompilationMessages();
|
||||
~OwnedCompilationMessages() = default;
|
||||
|
@ -55,7 +55,7 @@ namespace dawn::native {
|
|||
std::vector<std::string> mMessageStrings;
|
||||
std::vector<WGPUCompilationMessage> mMessages;
|
||||
std::vector<std::string> mFormattedTintMessages;
|
||||
};
|
||||
};
|
||||
|
||||
} // namespace dawn::native
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue