Use C++17 message-less static_assert where applicable.
Bug: dawn:824 Change-Id: I01dda88caaf613092541b62ea1b8d92768d405e9 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/78724 Auto-Submit: Corentin Wallez <cwallez@chromium.org> Reviewed-by: Austin Eng <enga@chromium.org> Commit-Queue: Austin Eng <enga@chromium.org>
This commit is contained in:
parent
f9d8f2c0cb
commit
9c2e07cff9
|
@ -138,12 +138,12 @@
|
|||
};
|
||||
|
||||
{% if is_cmd %}
|
||||
static_assert(offsetof({{Return}}{{name}}Transfer, commandSize) == 0, "");
|
||||
static_assert(offsetof({{Return}}{{name}}Transfer, commandId) == sizeof(CmdHeader), "");
|
||||
static_assert(offsetof({{Return}}{{name}}Transfer, commandSize) == 0);
|
||||
static_assert(offsetof({{Return}}{{name}}Transfer, commandId) == sizeof(CmdHeader));
|
||||
{% endif %}
|
||||
|
||||
{% if record.chained %}
|
||||
static_assert(offsetof({{Return}}{{name}}Transfer, chain) == 0, "");
|
||||
static_assert(offsetof({{Return}}{{name}}Transfer, chain) == 0);
|
||||
{% endif %}
|
||||
|
||||
//* Returns the required transfer size for `record` in addition to the transfer structure.
|
||||
|
|
|
@ -78,7 +78,7 @@ extern void __cdecl __debugbreak(void);
|
|||
// It seems that (void) EXPR works on all compilers to silence the unused variable warning.
|
||||
#define DAWN_UNUSED(EXPR) (void)EXPR
|
||||
// Likewise using static asserting on sizeof(&FUNC) seems to make it tagged as used
|
||||
#define DAWN_UNUSED_FUNC(FUNC) static_assert(sizeof(&FUNC) == sizeof(void (*)()), "")
|
||||
#define DAWN_UNUSED_FUNC(FUNC) static_assert(sizeof(&FUNC) == sizeof(void (*)()))
|
||||
|
||||
// Add noop replacements for macros for features that aren't supported by the compiler.
|
||||
#if !defined(DAWN_LIKELY)
|
||||
|
|
|
@ -41,7 +41,7 @@ class DynamicLib {
|
|||
template <typename T>
|
||||
bool GetProc(T** proc, const std::string& procName, std::string* error = nullptr) const {
|
||||
ASSERT(proc != nullptr);
|
||||
static_assert(std::is_function<T>::value, "");
|
||||
static_assert(std::is_function<T>::value);
|
||||
|
||||
*proc = reinterpret_cast<T*>(GetProc(procName, error));
|
||||
return *proc != nullptr;
|
||||
|
|
|
@ -310,14 +310,14 @@ template <typename T, typename E>
|
|||
template <typename TChild>
|
||||
Result<T*, E>::Result(Result<TChild*, E>&& other) : mPayload(other.mPayload) {
|
||||
other.mPayload = detail::kEmptyPayload;
|
||||
static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value, "");
|
||||
static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value);
|
||||
}
|
||||
|
||||
template <typename T, typename E>
|
||||
template <typename TChild>
|
||||
Result<T*, E>& Result<T*, E>::operator=(Result<TChild*, E>&& other) {
|
||||
ASSERT(mPayload == detail::kEmptyPayload);
|
||||
static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value, "");
|
||||
static_assert(std::is_same<T, TChild>::value || std::is_base_of<T, TChild>::value);
|
||||
mPayload = other.mPayload;
|
||||
other.mPayload = detail::kEmptyPayload;
|
||||
return *this;
|
||||
|
@ -410,7 +410,7 @@ template <typename T, typename E>
|
|||
template <typename U>
|
||||
Result<Ref<T>, E>::Result(Ref<U>&& success)
|
||||
: mPayload(detail::MakePayload(success.Detach(), detail::Success)) {
|
||||
static_assert(std::is_convertible<U*, T*>::value, "");
|
||||
static_assert(std::is_convertible<U*, T*>::value);
|
||||
}
|
||||
|
||||
template <typename T, typename E>
|
||||
|
@ -426,14 +426,14 @@ Result<Ref<T>, E>::Result(std::unique_ptr<E> error)
|
|||
template <typename T, typename E>
|
||||
template <typename U>
|
||||
Result<Ref<T>, E>::Result(Result<Ref<U>, E>&& other) : mPayload(other.mPayload) {
|
||||
static_assert(std::is_convertible<U*, T*>::value, "");
|
||||
static_assert(std::is_convertible<U*, T*>::value);
|
||||
other.mPayload = detail::kEmptyPayload;
|
||||
}
|
||||
|
||||
template <typename T, typename E>
|
||||
template <typename U>
|
||||
Result<Ref<U>, E>& Result<Ref<T>, E>::operator=(Result<Ref<U>, E>&& other) {
|
||||
static_assert(std::is_convertible<U*, T*>::value, "");
|
||||
static_assert(std::is_convertible<U*, T*>::value);
|
||||
ASSERT(mPayload == detail::kEmptyPayload);
|
||||
mPayload = other.mPayload;
|
||||
other.mPayload = detail::kEmptyPayload;
|
||||
|
|
|
@ -69,8 +69,8 @@ namespace detail {
|
|||
|
||||
public:
|
||||
constexpr TypedIntegerImpl() : mValue(0) {
|
||||
static_assert(alignof(TypedIntegerImpl) == alignof(T), "");
|
||||
static_assert(sizeof(TypedIntegerImpl) == sizeof(T), "");
|
||||
static_assert(alignof(TypedIntegerImpl) == alignof(T));
|
||||
static_assert(sizeof(TypedIntegerImpl) == sizeof(T));
|
||||
}
|
||||
|
||||
// Construction from non-narrowing integral types.
|
||||
|
@ -134,7 +134,7 @@ namespace detail {
|
|||
template <typename T2 = T>
|
||||
static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))>
|
||||
AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static_assert(std::is_same<T, T2>::value, "");
|
||||
static_assert(std::is_same<T, T2>::value);
|
||||
|
||||
// Overflow would wrap around
|
||||
ASSERT(lhs.mValue + rhs.mValue >= lhs.mValue);
|
||||
|
@ -144,7 +144,7 @@ namespace detail {
|
|||
template <typename T2 = T>
|
||||
static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))>
|
||||
AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static_assert(std::is_same<T, T2>::value, "");
|
||||
static_assert(std::is_same<T, T2>::value);
|
||||
|
||||
if (lhs.mValue > 0) {
|
||||
// rhs is positive: |rhs| is at most the distance between max and |lhs|.
|
||||
|
@ -162,7 +162,7 @@ namespace detail {
|
|||
template <typename T2 = T>
|
||||
static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))>
|
||||
SubImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static_assert(std::is_same<T, T2>::value, "");
|
||||
static_assert(std::is_same<T, T2>::value);
|
||||
|
||||
// Overflow would wrap around
|
||||
ASSERT(lhs.mValue - rhs.mValue <= lhs.mValue);
|
||||
|
@ -173,7 +173,7 @@ namespace detail {
|
|||
static constexpr std::enable_if_t<std::is_signed<T>::value, decltype(T(0) - T2(0))> SubImpl(
|
||||
TypedIntegerImpl<Tag, T> lhs,
|
||||
TypedIntegerImpl<Tag, T2> rhs) {
|
||||
static_assert(std::is_same<T, T2>::value, "");
|
||||
static_assert(std::is_same<T, T2>::value);
|
||||
|
||||
if (lhs.mValue > 0) {
|
||||
// rhs is positive: positive minus positive won't overflow
|
||||
|
@ -190,7 +190,7 @@ namespace detail {
|
|||
|
||||
template <typename T2 = T>
|
||||
constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
|
||||
static_assert(std::is_same<T, T2>::value, "");
|
||||
static_assert(std::is_same<T, T2>::value);
|
||||
// The negation of the most negative value cannot be represented.
|
||||
ASSERT(this->mValue != std::numeric_limits<T>::min());
|
||||
return TypedIntegerImpl(-this->mValue);
|
||||
|
|
|
@ -33,7 +33,7 @@ namespace ityp {
|
|||
using I = UnderlyingType<Index>;
|
||||
using Base = std::array<Value, Size>;
|
||||
|
||||
static_assert(Size <= std::numeric_limits<I>::max(), "");
|
||||
static_assert(Size <= std::numeric_limits<I>::max());
|
||||
|
||||
public:
|
||||
constexpr array() = default;
|
||||
|
|
|
@ -28,7 +28,7 @@ namespace ityp {
|
|||
using I = UnderlyingType<Index>;
|
||||
using Base = std::bitset<N>;
|
||||
|
||||
static_assert(sizeof(I) <= sizeof(size_t), "");
|
||||
static_assert(sizeof(I) <= sizeof(size_t));
|
||||
|
||||
constexpr bitset(const Base& rhs) : Base(rhs) {
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ namespace ityp {
|
|||
using I = UnderlyingType<Index>;
|
||||
using Base = StackVector<Value, StaticCapacity>;
|
||||
using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
|
||||
static_assert(StaticCapacity <= std::numeric_limits<I>::max(), "");
|
||||
static_assert(StaticCapacity <= std::numeric_limits<I>::max());
|
||||
|
||||
public:
|
||||
stack_vec() : Base() {
|
||||
|
|
|
@ -142,14 +142,14 @@ namespace dawn::native::vulkan {
|
|||
|
||||
} // namespace dawn::native::vulkan
|
||||
|
||||
#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
|
||||
DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) \
|
||||
namespace dawn::native::vulkan { \
|
||||
using object = detail::VkHandle<struct VkTag##object, ::object>; \
|
||||
static_assert(sizeof(object) == sizeof(uint64_t), ""); \
|
||||
static_assert(alignof(object) == detail::kUint64Alignment, ""); \
|
||||
static_assert(sizeof(object) == sizeof(::object), ""); \
|
||||
static_assert(alignof(object) == detail::kNativeVkHandleAlignment, ""); \
|
||||
#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
|
||||
DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) \
|
||||
namespace dawn::native::vulkan { \
|
||||
using object = detail::VkHandle<struct VkTag##object, ::object>; \
|
||||
static_assert(sizeof(object) == sizeof(uint64_t)); \
|
||||
static_assert(alignof(object) == detail::kUint64Alignment); \
|
||||
static_assert(sizeof(object) == sizeof(::object)); \
|
||||
static_assert(alignof(object) == detail::kNativeVkHandleAlignment); \
|
||||
} // namespace dawn::native::vulkan
|
||||
|
||||
// Import additional parts of Vulkan that are supported on our architecture and preemptively include
|
||||
|
|
|
@ -70,7 +70,7 @@ namespace dawn::native {
|
|||
descriptor,
|
||||
AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived),
|
||||
descriptor->layout->GetBindingDataAlignment())) {
|
||||
static_assert(std::is_base_of<BindGroupBase, Derived>::value, "");
|
||||
static_assert(std::is_base_of<BindGroupBase, Derived>::value);
|
||||
}
|
||||
|
||||
// Constructor used only for mocking and testing.
|
||||
|
|
|
@ -106,7 +106,7 @@ namespace dawn::native {
|
|||
// this layout.
|
||||
size_t GetBindingDataSize() const;
|
||||
static constexpr size_t GetBindingDataAlignment() {
|
||||
static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData), "");
|
||||
static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData));
|
||||
return alignof(BufferBindingData);
|
||||
}
|
||||
|
||||
|
|
|
@ -162,9 +162,9 @@ namespace dawn::native {
|
|||
|
||||
template <typename T, typename E>
|
||||
T* Allocate(E commandId) {
|
||||
static_assert(sizeof(E) == sizeof(uint32_t), "");
|
||||
static_assert(alignof(E) == alignof(uint32_t), "");
|
||||
static_assert(alignof(T) <= kMaxSupportedAlignment, "");
|
||||
static_assert(sizeof(E) == sizeof(uint32_t));
|
||||
static_assert(alignof(E) == alignof(uint32_t));
|
||||
static_assert(alignof(T) <= kMaxSupportedAlignment);
|
||||
T* result = reinterpret_cast<T*>(
|
||||
Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
|
||||
if (!result) {
|
||||
|
@ -176,7 +176,7 @@ namespace dawn::native {
|
|||
|
||||
template <typename T>
|
||||
T* AllocateData(size_t count) {
|
||||
static_assert(alignof(T) <= kMaxSupportedAlignment, "");
|
||||
static_assert(alignof(T) <= kMaxSupportedAlignment);
|
||||
T* result = reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
|
||||
if (!result) {
|
||||
return nullptr;
|
||||
|
|
|
@ -53,7 +53,7 @@ namespace dawn::native {
|
|||
|
||||
VALIDATION_ASPECT_COUNT
|
||||
};
|
||||
static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects, "");
|
||||
static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects);
|
||||
|
||||
static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
|
||||
1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS;
|
||||
|
|
|
@ -219,7 +219,7 @@ namespace dawn::native {
|
|||
GammaTransferParams gammaEncodingParams = {};
|
||||
GammaTransferParams gammaDecodingForDstSrgbParams = {};
|
||||
};
|
||||
static_assert(sizeof(Uniform) == 176, "");
|
||||
static_assert(sizeof(Uniform) == 176);
|
||||
|
||||
// TODO(crbug.com/dawn/856): Expand copyTextureForBrowser to support any
|
||||
// non-depth, non-stencil, non-compressed texture format pair copy. Now this API
|
||||
|
|
|
@ -23,7 +23,7 @@ namespace dawn::native {
|
|||
template <typename T>
|
||||
class EnumMaskIterator final {
|
||||
static constexpr size_t N = EnumBitmaskSize<T>::value;
|
||||
static_assert(N > 0, "");
|
||||
static_assert(N > 0);
|
||||
|
||||
using U = std::underlying_type_t<T>;
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ namespace dawn::native {
|
|||
break;
|
||||
}
|
||||
|
||||
static_assert(static_cast<uint32_t>(wgpu::TextureSampleType::Undefined) == 0, "");
|
||||
static_assert(static_cast<uint32_t>(wgpu::TextureSampleType::Undefined) == 0);
|
||||
if (sampleType == wgpu::TextureSampleType::Undefined) {
|
||||
return SampleTypeBit::None;
|
||||
}
|
||||
|
@ -61,26 +61,21 @@ namespace dawn::native {
|
|||
// Check that SampleTypeBit bits are in the same position / order as the respective
|
||||
// wgpu::TextureSampleType value.
|
||||
static_assert(SampleTypeBit::Float ==
|
||||
static_cast<SampleTypeBit>(
|
||||
1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Float) - 1)),
|
||||
"");
|
||||
static_cast<SampleTypeBit>(
|
||||
1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Float) - 1)));
|
||||
static_assert(
|
||||
SampleTypeBit::UnfilterableFloat ==
|
||||
static_cast<SampleTypeBit>(
|
||||
1 << (static_cast<uint32_t>(wgpu::TextureSampleType::UnfilterableFloat) - 1)),
|
||||
"");
|
||||
static_cast<SampleTypeBit>(
|
||||
1 << (static_cast<uint32_t>(wgpu::TextureSampleType::UnfilterableFloat) - 1)));
|
||||
static_assert(SampleTypeBit::Uint ==
|
||||
static_cast<SampleTypeBit>(
|
||||
1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Uint) - 1)),
|
||||
"");
|
||||
static_cast<SampleTypeBit>(
|
||||
1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Uint) - 1)));
|
||||
static_assert(SampleTypeBit::Sint ==
|
||||
static_cast<SampleTypeBit>(
|
||||
1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Sint) - 1)),
|
||||
"");
|
||||
static_cast<SampleTypeBit>(
|
||||
1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Sint) - 1)));
|
||||
static_assert(SampleTypeBit::Depth ==
|
||||
static_cast<SampleTypeBit>(
|
||||
1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Depth) - 1)),
|
||||
"");
|
||||
static_cast<SampleTypeBit>(
|
||||
1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Depth) - 1)));
|
||||
return static_cast<SampleTypeBit>(1 << (static_cast<uint32_t>(sampleType) - 1));
|
||||
}
|
||||
|
||||
|
@ -131,8 +126,8 @@ namespace dawn::native {
|
|||
size_t ComputeFormatIndex(wgpu::TextureFormat format) {
|
||||
// This takes advantage of overflows to make the index of TextureFormat::Undefined outside
|
||||
// of the range of the FormatTable.
|
||||
static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 > kKnownFormatCount,
|
||||
"");
|
||||
static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 >
|
||||
kKnownFormatCount);
|
||||
return static_cast<size_t>(static_cast<uint32_t>(format) - 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ namespace dawn::native {
|
|||
static constexpr void Call(ObjectContentHasher* recorder, T* obj) {
|
||||
// Calling Record(objPtr) is not allowed. This check exists to only prevent such
|
||||
// mistakes.
|
||||
static_assert(obj == nullptr, "");
|
||||
static_assert(obj == nullptr);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -33,19 +33,16 @@ namespace dawn::native {
|
|||
const absl::FormatConversionSpec& spec,
|
||||
absl::FormatSink* s);
|
||||
|
||||
static_assert(static_cast<uint32_t>(SingleShaderStage::Vertex) < kNumStages, "");
|
||||
static_assert(static_cast<uint32_t>(SingleShaderStage::Fragment) < kNumStages, "");
|
||||
static_assert(static_cast<uint32_t>(SingleShaderStage::Compute) < kNumStages, "");
|
||||
static_assert(static_cast<uint32_t>(SingleShaderStage::Vertex) < kNumStages);
|
||||
static_assert(static_cast<uint32_t>(SingleShaderStage::Fragment) < kNumStages);
|
||||
static_assert(static_cast<uint32_t>(SingleShaderStage::Compute) < kNumStages);
|
||||
|
||||
static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Vertex) ==
|
||||
(1 << static_cast<uint32_t>(SingleShaderStage::Vertex)),
|
||||
"");
|
||||
(1 << static_cast<uint32_t>(SingleShaderStage::Vertex)));
|
||||
static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Fragment) ==
|
||||
(1 << static_cast<uint32_t>(SingleShaderStage::Fragment)),
|
||||
"");
|
||||
(1 << static_cast<uint32_t>(SingleShaderStage::Fragment)));
|
||||
static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Compute) ==
|
||||
(1 << static_cast<uint32_t>(SingleShaderStage::Compute)),
|
||||
"");
|
||||
(1 << static_cast<uint32_t>(SingleShaderStage::Compute)));
|
||||
|
||||
BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages);
|
||||
wgpu::ShaderStage StageBit(SingleShaderStage stage);
|
||||
|
|
|
@ -29,10 +29,10 @@ namespace dawn::native {
|
|||
namespace {
|
||||
|
||||
// Assert the offsets in dawn::native::TimestampParams are same with the ones in the shader
|
||||
static_assert(offsetof(dawn::native::TimestampParams, first) == 0, "");
|
||||
static_assert(offsetof(dawn::native::TimestampParams, count) == 4, "");
|
||||
static_assert(offsetof(dawn::native::TimestampParams, offset) == 8, "");
|
||||
static_assert(offsetof(dawn::native::TimestampParams, period) == 12, "");
|
||||
static_assert(offsetof(dawn::native::TimestampParams, first) == 0);
|
||||
static_assert(offsetof(dawn::native::TimestampParams, count) == 4);
|
||||
static_assert(offsetof(dawn::native::TimestampParams, offset) == 8);
|
||||
static_assert(offsetof(dawn::native::TimestampParams, period) == 12);
|
||||
|
||||
static const char sConvertTimestampsToNanoseconds[] = R"(
|
||||
struct Timestamp {
|
||||
|
|
|
@ -228,7 +228,7 @@ namespace dawn::native::null {
|
|||
}
|
||||
|
||||
MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
|
||||
static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max(), "");
|
||||
static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max());
|
||||
if (bytes > kMaxMemoryUsage || mMemoryUsage > kMaxMemoryUsage - bytes) {
|
||||
return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ namespace dawn::native::vulkan {
|
|||
mMaxSets = kMaxDescriptorsPerPool;
|
||||
} else {
|
||||
ASSERT(totalDescriptorCount <= kMaxBindingsPerPipelineLayout);
|
||||
static_assert(kMaxBindingsPerPipelineLayout <= kMaxDescriptorsPerPool, "");
|
||||
static_assert(kMaxBindingsPerPipelineLayout <= kMaxDescriptorsPerPool);
|
||||
|
||||
// Compute the total number of descriptors sets that fits given the max.
|
||||
mMaxSets = kMaxDescriptorsPerPool / totalDescriptorCount;
|
||||
|
|
|
@ -210,17 +210,13 @@ namespace dawn::native::vulkan {
|
|||
bool isDeclaredInFragmentShader) {
|
||||
// Vulkan and Dawn color write masks match, static assert it and return the mask
|
||||
static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Red) ==
|
||||
VK_COLOR_COMPONENT_R_BIT,
|
||||
"");
|
||||
VK_COLOR_COMPONENT_R_BIT);
|
||||
static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Green) ==
|
||||
VK_COLOR_COMPONENT_G_BIT,
|
||||
"");
|
||||
VK_COLOR_COMPONENT_G_BIT);
|
||||
static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Blue) ==
|
||||
VK_COLOR_COMPONENT_B_BIT,
|
||||
"");
|
||||
VK_COLOR_COMPONENT_B_BIT);
|
||||
static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Alpha) ==
|
||||
VK_COLOR_COMPONENT_A_BIT,
|
||||
"");
|
||||
VK_COLOR_COMPONENT_A_BIT);
|
||||
|
||||
// According to Vulkan SPEC (Chapter 14.3): "The input values to blending or color
|
||||
// attachment writes are undefined for components which do not correspond to a fragment
|
||||
|
|
|
@ -71,8 +71,8 @@ namespace dawn::wire::server {
|
|||
// an std::unordered_set. This lets us avoid providing our own hash and
|
||||
// equality comparison operators.
|
||||
inline uint64_t PackObjectTypeAndId(ObjectType type, ObjectId id) {
|
||||
static_assert(sizeof(ObjectType) * 8 <= 32, "");
|
||||
static_assert(sizeof(ObjectId) * 8 <= 32, "");
|
||||
static_assert(sizeof(ObjectType) * 8 <= 32);
|
||||
static_assert(sizeof(ObjectId) * 8 <= 32);
|
||||
return (static_cast<uint64_t>(type) << 32) + id;
|
||||
}
|
||||
|
||||
|
|
|
@ -350,7 +350,7 @@ class TextureFormatTest : public DawnTest {
|
|||
|
||||
template <typename T>
|
||||
void DoUnormTest(FormatTestInfo formatInfo) {
|
||||
static_assert(!std::is_signed<T>::value && std::is_integral<T>::value, "");
|
||||
static_assert(!std::is_signed<T>::value && std::is_integral<T>::value);
|
||||
ASSERT(sizeof(T) * formatInfo.componentCount == formatInfo.texelByteSize);
|
||||
ASSERT(formatInfo.type == wgpu::TextureComponentType::Float);
|
||||
|
||||
|
@ -364,7 +364,7 @@ class TextureFormatTest : public DawnTest {
|
|||
|
||||
template <typename T>
|
||||
void DoSnormTest(FormatTestInfo formatInfo) {
|
||||
static_assert(std::is_signed<T>::value && std::is_integral<T>::value, "");
|
||||
static_assert(std::is_signed<T>::value && std::is_integral<T>::value);
|
||||
ASSERT(sizeof(T) * formatInfo.componentCount == formatInfo.texelByteSize);
|
||||
ASSERT(formatInfo.type == wgpu::TextureComponentType::Float);
|
||||
|
||||
|
@ -380,7 +380,7 @@ class TextureFormatTest : public DawnTest {
|
|||
|
||||
template <typename T>
|
||||
void DoUintTest(FormatTestInfo formatInfo) {
|
||||
static_assert(!std::is_signed<T>::value && std::is_integral<T>::value, "");
|
||||
static_assert(!std::is_signed<T>::value && std::is_integral<T>::value);
|
||||
ASSERT(sizeof(T) * formatInfo.componentCount == formatInfo.texelByteSize);
|
||||
ASSERT(formatInfo.type == wgpu::TextureComponentType::Uint);
|
||||
|
||||
|
@ -394,7 +394,7 @@ class TextureFormatTest : public DawnTest {
|
|||
|
||||
template <typename T>
|
||||
void DoSintTest(FormatTestInfo formatInfo) {
|
||||
static_assert(std::is_signed<T>::value && std::is_integral<T>::value, "");
|
||||
static_assert(std::is_signed<T>::value && std::is_integral<T>::value);
|
||||
ASSERT(sizeof(T) * formatInfo.componentCount == formatInfo.texelByteSize);
|
||||
ASSERT(formatInfo.type == wgpu::TextureComponentType::Sint);
|
||||
|
||||
|
|
|
@ -577,7 +577,7 @@ TEST_P(VertexStateTest, OverlappingVertexAttributes) {
|
|||
uint32_t uints[2];
|
||||
uint16_t halfs[2];
|
||||
};
|
||||
static_assert(sizeof(Data) == 16, "");
|
||||
static_assert(sizeof(Data) == 16);
|
||||
Data data{1.f, {2u, 3u}, {Float32ToFloat16(4.f), Float32ToFloat16(5.f)}};
|
||||
|
||||
wgpu::Buffer vertexBuffer =
|
||||
|
|
|
@ -35,10 +35,10 @@ namespace {
|
|||
{1, dawn::platform::TraceCategory::GPUWork},
|
||||
};
|
||||
|
||||
static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::General) == 0, "");
|
||||
static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Validation) == 1, "");
|
||||
static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Recording) == 2, "");
|
||||
static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::GPUWork) == 3, "");
|
||||
static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::General) == 0);
|
||||
static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Validation) == 1);
|
||||
static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Recording) == 2);
|
||||
static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::GPUWork) == 3);
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ namespace dawn {
|
|||
|
||||
namespace dawn::native {
|
||||
|
||||
static_assert(EnumBitmaskSize<TestAspect>::value == 3, "");
|
||||
static_assert(EnumBitmaskSize<TestAspect>::value == 3);
|
||||
|
||||
TEST(EnumMaskIteratorTests, None) {
|
||||
for (TestAspect aspect : IterateEnumMask(static_cast<TestAspect>(0))) {
|
||||
|
|
|
@ -28,9 +28,9 @@ class ITypArrayTest : public testing::Test {
|
|||
static constexpr Array kArr = {Val(0), Val(1), Val(2), Val(3), Val(4),
|
||||
Val(5), Val(6), Val(7), Val(8), Val(9)};
|
||||
|
||||
static_assert(kArr[Key(3)] == Val(3), "");
|
||||
static_assert(kArr.at(Key(7)) == Val(7), "");
|
||||
static_assert(kArr.size() == Key(10), "");
|
||||
static_assert(kArr[Key(3)] == Val(3));
|
||||
static_assert(kArr.at(Key(7)) == Val(7));
|
||||
static_assert(kArr.size() == Key(10));
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -28,17 +28,17 @@ class ITypBitsetTest : public testing::Test {
|
|||
struct ConstexprTest {
|
||||
static constexpr Bitset kBitset = {1 << 0 | 1 << 3 | 1 << 7 | 1 << 8};
|
||||
|
||||
static_assert(kBitset[Key(0)] == true, "");
|
||||
static_assert(kBitset[Key(1)] == false, "");
|
||||
static_assert(kBitset[Key(2)] == false, "");
|
||||
static_assert(kBitset[Key(3)] == true, "");
|
||||
static_assert(kBitset[Key(4)] == false, "");
|
||||
static_assert(kBitset[Key(5)] == false, "");
|
||||
static_assert(kBitset[Key(6)] == false, "");
|
||||
static_assert(kBitset[Key(7)] == true, "");
|
||||
static_assert(kBitset[Key(8)] == true, "");
|
||||
static_assert(kBitset[Key(0)] == true);
|
||||
static_assert(kBitset[Key(1)] == false);
|
||||
static_assert(kBitset[Key(2)] == false);
|
||||
static_assert(kBitset[Key(3)] == true);
|
||||
static_assert(kBitset[Key(4)] == false);
|
||||
static_assert(kBitset[Key(5)] == false);
|
||||
static_assert(kBitset[Key(6)] == false);
|
||||
static_assert(kBitset[Key(7)] == true);
|
||||
static_assert(kBitset[Key(8)] == true);
|
||||
|
||||
static_assert(kBitset.size() == 9, "");
|
||||
static_assert(kBitset.size() == 9);
|
||||
};
|
||||
|
||||
void ExpectBits(const Bitset& bits, std::set<size_t> indices) {
|
||||
|
|
|
@ -54,9 +54,9 @@ TEST(Math, Log2) {
|
|||
ASSERT_EQ(Log2(0xFFFFFFFFu), 31u);
|
||||
ASSERT_EQ(Log2(static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF)), 63u);
|
||||
|
||||
static_assert(ConstexprLog2(1u) == 0u, "");
|
||||
static_assert(ConstexprLog2(0xFFFFFFFFu) == 31u, "");
|
||||
static_assert(ConstexprLog2(static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF)) == 63u, "");
|
||||
static_assert(ConstexprLog2(1u) == 0u);
|
||||
static_assert(ConstexprLog2(0xFFFFFFFFu) == 31u);
|
||||
static_assert(ConstexprLog2(static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF)) == 63u);
|
||||
|
||||
// Test boundary between two logs
|
||||
ASSERT_EQ(Log2(0x80000000u), 31u);
|
||||
|
@ -64,16 +64,16 @@ TEST(Math, Log2) {
|
|||
ASSERT_EQ(Log2(static_cast<uint64_t>(0x8000000000000000)), 63u);
|
||||
ASSERT_EQ(Log2(static_cast<uint64_t>(0x7FFFFFFFFFFFFFFF)), 62u);
|
||||
|
||||
static_assert(ConstexprLog2(0x80000000u) == 31u, "");
|
||||
static_assert(ConstexprLog2(0x7FFFFFFFu) == 30u, "");
|
||||
static_assert(ConstexprLog2(static_cast<uint64_t>(0x8000000000000000)) == 63u, "");
|
||||
static_assert(ConstexprLog2(static_cast<uint64_t>(0x7FFFFFFFFFFFFFFF)) == 62u, "");
|
||||
static_assert(ConstexprLog2(0x80000000u) == 31u);
|
||||
static_assert(ConstexprLog2(0x7FFFFFFFu) == 30u);
|
||||
static_assert(ConstexprLog2(static_cast<uint64_t>(0x8000000000000000)) == 63u);
|
||||
static_assert(ConstexprLog2(static_cast<uint64_t>(0x7FFFFFFFFFFFFFFF)) == 62u);
|
||||
|
||||
ASSERT_EQ(Log2(16u), 4u);
|
||||
ASSERT_EQ(Log2(15u), 3u);
|
||||
|
||||
static_assert(ConstexprLog2(16u) == 4u, "");
|
||||
static_assert(ConstexprLog2(15u) == 3u, "");
|
||||
static_assert(ConstexprLog2(16u) == 4u);
|
||||
static_assert(ConstexprLog2(15u) == 3u);
|
||||
}
|
||||
|
||||
// Tests for Log2Ceil
|
||||
|
@ -83,9 +83,9 @@ TEST(Math, Log2Ceil) {
|
|||
ASSERT_EQ(Log2Ceil(0xFFFFFFFFu), 32u);
|
||||
ASSERT_EQ(Log2Ceil(static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF)), 64u);
|
||||
|
||||
static_assert(ConstexprLog2Ceil(1u) == 0u, "");
|
||||
static_assert(ConstexprLog2Ceil(0xFFFFFFFFu) == 32u, "");
|
||||
static_assert(ConstexprLog2Ceil(static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF)) == 64u, "");
|
||||
static_assert(ConstexprLog2Ceil(1u) == 0u);
|
||||
static_assert(ConstexprLog2Ceil(0xFFFFFFFFu) == 32u);
|
||||
static_assert(ConstexprLog2Ceil(static_cast<uint64_t>(0xFFFFFFFFFFFFFFFF)) == 64u);
|
||||
|
||||
// Test boundary between two logs
|
||||
ASSERT_EQ(Log2Ceil(0x80000001u), 32u);
|
||||
|
@ -95,20 +95,20 @@ TEST(Math, Log2Ceil) {
|
|||
ASSERT_EQ(Log2Ceil(static_cast<uint64_t>(0x8000000000000000)), 63u);
|
||||
ASSERT_EQ(Log2Ceil(static_cast<uint64_t>(0x7FFFFFFFFFFFFFFF)), 63u);
|
||||
|
||||
static_assert(ConstexprLog2Ceil(0x80000001u) == 32u, "");
|
||||
static_assert(ConstexprLog2Ceil(0x80000000u) == 31u, "");
|
||||
static_assert(ConstexprLog2Ceil(0x7FFFFFFFu) == 31u, "");
|
||||
static_assert(ConstexprLog2Ceil(static_cast<uint64_t>(0x8000000000000001)) == 64u, "");
|
||||
static_assert(ConstexprLog2Ceil(static_cast<uint64_t>(0x8000000000000000)) == 63u, "");
|
||||
static_assert(ConstexprLog2Ceil(static_cast<uint64_t>(0x7FFFFFFFFFFFFFFF)) == 63u, "");
|
||||
static_assert(ConstexprLog2Ceil(0x80000001u) == 32u);
|
||||
static_assert(ConstexprLog2Ceil(0x80000000u) == 31u);
|
||||
static_assert(ConstexprLog2Ceil(0x7FFFFFFFu) == 31u);
|
||||
static_assert(ConstexprLog2Ceil(static_cast<uint64_t>(0x8000000000000001)) == 64u);
|
||||
static_assert(ConstexprLog2Ceil(static_cast<uint64_t>(0x8000000000000000)) == 63u);
|
||||
static_assert(ConstexprLog2Ceil(static_cast<uint64_t>(0x7FFFFFFFFFFFFFFF)) == 63u);
|
||||
|
||||
ASSERT_EQ(Log2Ceil(17u), 5u);
|
||||
ASSERT_EQ(Log2Ceil(16u), 4u);
|
||||
ASSERT_EQ(Log2Ceil(15u), 4u);
|
||||
|
||||
static_assert(ConstexprLog2Ceil(17u) == 5u, "");
|
||||
static_assert(ConstexprLog2Ceil(16u) == 4u, "");
|
||||
static_assert(ConstexprLog2Ceil(15u) == 4u, "");
|
||||
static_assert(ConstexprLog2Ceil(17u) == 5u);
|
||||
static_assert(ConstexprLog2Ceil(16u) == 4u);
|
||||
static_assert(ConstexprLog2Ceil(15u) == 4u);
|
||||
}
|
||||
|
||||
// Tests for IsPowerOfTwo
|
||||
|
|
|
@ -66,7 +66,7 @@ TEST_F(PlacementAllocatedTests, DeletionDoesNotFreeMemory) {
|
|||
delete foo;
|
||||
|
||||
// Touch the memory, this shouldn't crash.
|
||||
static_assert(sizeof(Foo) >= sizeof(uint32_t), "");
|
||||
static_assert(sizeof(Foo) >= sizeof(uint32_t));
|
||||
*reinterpret_cast<uint32_t*>(foo) = 42;
|
||||
|
||||
free(ptr);
|
||||
|
@ -87,7 +87,7 @@ TEST_F(PlacementAllocatedTests, DeletingDerivedClassCallsBaseDestructor) {
|
|||
}
|
||||
|
||||
// Touch the memory, this shouldn't crash.
|
||||
static_assert(sizeof(Bar) >= sizeof(uint32_t), "");
|
||||
static_assert(sizeof(Bar) >= sizeof(uint32_t));
|
||||
*reinterpret_cast<uint32_t*>(bar) = 42;
|
||||
|
||||
free(ptr);
|
||||
|
@ -108,7 +108,7 @@ TEST_F(PlacementAllocatedTests, DeletingBaseClassCallsDerivedDestructor) {
|
|||
}
|
||||
|
||||
// Touch the memory, this shouldn't crash.
|
||||
static_assert(sizeof(Bar) >= sizeof(uint32_t), "");
|
||||
static_assert(sizeof(Bar) >= sizeof(uint32_t));
|
||||
*reinterpret_cast<uint32_t*>(foo) = 42;
|
||||
|
||||
free(ptr);
|
||||
|
|
|
@ -45,7 +45,7 @@ TEST(ToBackend, Pointers) {
|
|||
const AdapterBase* base = adapter;
|
||||
|
||||
auto backendAdapter = ToBackend(base);
|
||||
static_assert(std::is_same<decltype(backendAdapter), const MyAdapter*>::value, "");
|
||||
static_assert(std::is_same<decltype(backendAdapter), const MyAdapter*>::value);
|
||||
ASSERT_EQ(adapter, backendAdapter);
|
||||
|
||||
adapter->Release();
|
||||
|
@ -55,7 +55,7 @@ TEST(ToBackend, Pointers) {
|
|||
AdapterBase* base = adapter;
|
||||
|
||||
auto backendAdapter = ToBackend(base);
|
||||
static_assert(std::is_same<decltype(backendAdapter), MyAdapter*>::value, "");
|
||||
static_assert(std::is_same<decltype(backendAdapter), MyAdapter*>::value);
|
||||
ASSERT_EQ(adapter, backendAdapter);
|
||||
|
||||
adapter->Release();
|
||||
|
@ -69,7 +69,7 @@ TEST(ToBackend, Ref) {
|
|||
const Ref<AdapterBase> base(adapter);
|
||||
|
||||
const auto& backendAdapter = ToBackend(base);
|
||||
static_assert(std::is_same<decltype(ToBackend(base)), const Ref<MyAdapter>&>::value, "");
|
||||
static_assert(std::is_same<decltype(ToBackend(base)), const Ref<MyAdapter>&>::value);
|
||||
ASSERT_EQ(adapter, backendAdapter.Get());
|
||||
|
||||
adapter->Release();
|
||||
|
@ -79,7 +79,7 @@ TEST(ToBackend, Ref) {
|
|||
Ref<AdapterBase> base(adapter);
|
||||
|
||||
auto backendAdapter = ToBackend(base);
|
||||
static_assert(std::is_same<decltype(ToBackend(base)), Ref<MyAdapter>&>::value, "");
|
||||
static_assert(std::is_same<decltype(ToBackend(base)), Ref<MyAdapter>&>::value);
|
||||
ASSERT_EQ(adapter, backendAdapter.Get());
|
||||
|
||||
adapter->Release();
|
||||
|
|
|
@ -31,8 +31,8 @@ TEST_F(TypedIntegerTest, ConstructionAndCast) {
|
|||
Unsigned uvalue(7);
|
||||
EXPECT_EQ(static_cast<uint32_t>(uvalue), 7u);
|
||||
|
||||
static_assert(static_cast<int32_t>(Signed(3)) == 3, "");
|
||||
static_assert(static_cast<uint32_t>(Unsigned(28)) == 28, "");
|
||||
static_assert(static_cast<int32_t>(Signed(3)) == 3);
|
||||
static_assert(static_cast<uint32_t>(Unsigned(28)) == 28);
|
||||
}
|
||||
|
||||
// Test typed integer comparison operators
|
||||
|
@ -146,8 +146,8 @@ TEST_F(TypedIntegerTest, NumericLimits) {
|
|||
}
|
||||
|
||||
TEST_F(TypedIntegerTest, UnderlyingType) {
|
||||
static_assert(std::is_same<UnderlyingType<Unsigned>, uint32_t>::value, "");
|
||||
static_assert(std::is_same<UnderlyingType<Signed>, int32_t>::value, "");
|
||||
static_assert(std::is_same<UnderlyingType<Unsigned>, uint32_t>::value);
|
||||
static_assert(std::is_same<UnderlyingType<Signed>, int32_t>::value);
|
||||
}
|
||||
|
||||
// Tests for bounds assertions on arithmetic overflow and underflow.
|
||||
|
|
|
@ -1190,10 +1190,8 @@ TEST_F(BindGroupLayoutValidationTest, DynamicBufferNumberLimit) {
|
|||
|
||||
// In this test, we use all the same shader stage. Ensure that this does not exceed the
|
||||
// per-stage limit.
|
||||
static_assert(kMaxDynamicUniformBuffersPerPipelineLayout <= kMaxUniformBuffersPerShaderStage,
|
||||
"");
|
||||
static_assert(kMaxDynamicStorageBuffersPerPipelineLayout <= kMaxStorageBuffersPerShaderStage,
|
||||
"");
|
||||
static_assert(kMaxDynamicUniformBuffersPerPipelineLayout <= kMaxUniformBuffersPerShaderStage);
|
||||
static_assert(kMaxDynamicStorageBuffersPerPipelineLayout <= kMaxStorageBuffersPerShaderStage);
|
||||
|
||||
for (uint32_t i = 0; i < kMaxDynamicUniformBuffersPerPipelineLayout; ++i) {
|
||||
maxUniformDB.push_back(utils::BindingLayoutEntryInitializationHelper(
|
||||
|
|
Loading…
Reference in New Issue