Replace reinterpret_cast with FromAPI/ToAPI where possible
This brings more type safety to the code and is marginally more readable. Bug: None Change-Id: I0330a8a8e95cd9b8b531af266acd8fdc50c50460 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/71606 Reviewed-by: Brandon Jones <bajones@chromium.org> Reviewed-by: Austin Eng <enga@chromium.org> Commit-Queue: Austin Eng <enga@chromium.org> Auto-Submit: Corentin Wallez <cwallez@chromium.org>
This commit is contained in:
parent
9dc3c0bc86
commit
c6d3a840da
|
@ -39,7 +39,7 @@ namespace dawn_native {
|
||||||
{%- endfor -%}
|
{%- endfor -%}
|
||||||
) {
|
) {
|
||||||
//* Perform conversion between C types and frontend types
|
//* Perform conversion between C types and frontend types
|
||||||
auto self = reinterpret_cast<{{as_frontendType(type)}}>(cSelf);
|
auto self = FromAPI(cSelf);
|
||||||
|
|
||||||
{% for arg in method.arguments %}
|
{% for arg in method.arguments %}
|
||||||
{% set varName = as_varName(arg.name) %}
|
{% set varName = as_varName(arg.name) %}
|
||||||
|
@ -63,7 +63,7 @@ namespace dawn_native {
|
||||||
);
|
);
|
||||||
{% if method.return_type.name.canonical_case() != "void" %}
|
{% if method.return_type.name.canonical_case() != "void" %}
|
||||||
{% if method.return_type.category == "object" %}
|
{% if method.return_type.category == "object" %}
|
||||||
return reinterpret_cast<{{as_cType(method.return_type.name)}}>(result);
|
return ToAPI(result);
|
||||||
{% else %}
|
{% else %}
|
||||||
return result;
|
return result;
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
@ -84,10 +84,8 @@ namespace dawn_native {
|
||||||
static constexpr size_t sProcMapSize = sizeof(sProcMap) / sizeof(sProcMap[0]);
|
static constexpr size_t sProcMapSize = sizeof(sProcMap) / sizeof(sProcMap[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
WGPUInstance NativeCreateInstance(WGPUInstanceDescriptor const* cDescriptor) {
|
WGPUInstance NativeCreateInstance(WGPUInstanceDescriptor const* descriptor) {
|
||||||
const dawn_native::InstanceDescriptor* descriptor =
|
return ToAPI(InstanceBase::Create(FromAPI(descriptor)));
|
||||||
reinterpret_cast<const dawn_native::InstanceDescriptor*>(cDescriptor);
|
|
||||||
return reinterpret_cast<WGPUInstance>(InstanceBase::Create(descriptor));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
WGPUProc NativeGetProcAddress(WGPUDevice, const char* procName) {
|
WGPUProc NativeGetProcAddress(WGPUDevice, const char* procName) {
|
||||||
|
|
|
@ -119,7 +119,7 @@ namespace dawn_native {
|
||||||
// to store them (ex. by calling GetLimits directly instead). Currently,
|
// to store them (ex. by calling GetLimits directly instead). Currently,
|
||||||
// we keep this function as it's only used internally in Chromium to
|
// we keep this function as it's only used internally in Chromium to
|
||||||
// send the adapter properties across the wire.
|
// send the adapter properties across the wire.
|
||||||
GetLimits(reinterpret_cast<SupportedLimits*>(&adapterProperties.limits));
|
GetLimits(FromAPI(&adapterProperties.limits));
|
||||||
return adapterProperties;
|
return adapterProperties;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,19 +149,18 @@ namespace dawn_native {
|
||||||
void AdapterBase::RequestDevice(const DawnDeviceDescriptor* descriptor,
|
void AdapterBase::RequestDevice(const DawnDeviceDescriptor* descriptor,
|
||||||
WGPURequestDeviceCallback callback,
|
WGPURequestDeviceCallback callback,
|
||||||
void* userdata) {
|
void* userdata) {
|
||||||
DeviceBase* result = nullptr;
|
DeviceBase* device = nullptr;
|
||||||
MaybeError err = CreateDeviceInternal(&result, descriptor);
|
MaybeError err = CreateDeviceInternal(&device, descriptor);
|
||||||
WGPUDevice device = reinterpret_cast<WGPUDevice>(result);
|
|
||||||
|
|
||||||
if (err.IsError()) {
|
if (err.IsError()) {
|
||||||
std::unique_ptr<ErrorData> errorData = err.AcquireError();
|
std::unique_ptr<ErrorData> errorData = err.AcquireError();
|
||||||
callback(WGPURequestDeviceStatus_Error, device,
|
callback(WGPURequestDeviceStatus_Error, ToAPI(device),
|
||||||
errorData->GetFormattedMessage().c_str(), userdata);
|
errorData->GetFormattedMessage().c_str(), userdata);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
WGPURequestDeviceStatus status =
|
WGPURequestDeviceStatus status =
|
||||||
device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
|
device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
|
||||||
callback(status, device, nullptr, userdata);
|
callback(status, ToAPI(device), nullptr, userdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
MaybeError AdapterBase::CreateDeviceInternal(DeviceBase** result,
|
MaybeError AdapterBase::CreateDeviceInternal(DeviceBase** result,
|
||||||
|
@ -178,9 +177,8 @@ namespace dawn_native {
|
||||||
|
|
||||||
if (descriptor != nullptr && descriptor->requiredLimits != nullptr) {
|
if (descriptor != nullptr && descriptor->requiredLimits != nullptr) {
|
||||||
DAWN_TRY_CONTEXT(
|
DAWN_TRY_CONTEXT(
|
||||||
ValidateLimits(
|
ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
|
||||||
mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
|
FromAPI(descriptor->requiredLimits)->limits),
|
||||||
reinterpret_cast<const RequiredLimits*>(descriptor->requiredLimits)->limits),
|
|
||||||
"validating required limits");
|
"validating required limits");
|
||||||
|
|
||||||
DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
|
DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
|
||||||
|
|
|
@ -44,9 +44,8 @@ namespace dawn_native {
|
||||||
ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
|
ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
|
||||||
|
|
||||||
if (mPipeline.Get() != nullptr) {
|
if (mPipeline.Get() != nullptr) {
|
||||||
mCreateComputePipelineAsyncCallback(
|
mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
|
||||||
WGPUCreatePipelineAsyncStatus_Success,
|
ToAPI(mPipeline.Detach()), "", mUserData);
|
||||||
reinterpret_cast<WGPUComputePipeline>(mPipeline.Detach()), "", mUserData);
|
|
||||||
} else {
|
} else {
|
||||||
mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
|
mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
|
||||||
mErrorMessage.c_str(), mUserData);
|
mErrorMessage.c_str(), mUserData);
|
||||||
|
@ -81,9 +80,8 @@ namespace dawn_native {
|
||||||
ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
|
ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
|
||||||
|
|
||||||
if (mPipeline.Get() != nullptr) {
|
if (mPipeline.Get() != nullptr) {
|
||||||
mCreateRenderPipelineAsyncCallback(
|
mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
|
||||||
WGPUCreatePipelineAsyncStatus_Success,
|
ToAPI(mPipeline.Detach()), "", mUserData);
|
||||||
reinterpret_cast<WGPURenderPipeline>(mPipeline.Detach()), "", mUserData);
|
|
||||||
} else {
|
} else {
|
||||||
mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
|
mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
|
||||||
mErrorMessage.c_str(), mUserData);
|
mErrorMessage.c_str(), mUserData);
|
||||||
|
|
|
@ -32,9 +32,7 @@ namespace dawn_native {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<const char*> GetTogglesUsed(WGPUDevice device) {
|
std::vector<const char*> GetTogglesUsed(WGPUDevice device) {
|
||||||
const dawn_native::DeviceBase* deviceBase =
|
return FromAPI(device)->GetTogglesUsed();
|
||||||
reinterpret_cast<const dawn_native::DeviceBase*>(device);
|
|
||||||
return deviceBase->GetTogglesUsed();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adapter
|
// Adapter
|
||||||
|
@ -110,7 +108,7 @@ namespace dawn_native {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
|
bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
|
||||||
return mImpl->GetLimits(reinterpret_cast<SupportedLimits*>(limits));
|
return mImpl->GetLimits(FromAPI(limits));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Adapter::SetUseTieredLimits(bool useTieredLimits) {
|
void Adapter::SetUseTieredLimits(bool useTieredLimits) {
|
||||||
|
@ -126,7 +124,7 @@ namespace dawn_native {
|
||||||
}
|
}
|
||||||
|
|
||||||
WGPUDevice Adapter::CreateDevice(const DawnDeviceDescriptor* deviceDescriptor) {
|
WGPUDevice Adapter::CreateDevice(const DawnDeviceDescriptor* deviceDescriptor) {
|
||||||
return reinterpret_cast<WGPUDevice>(mImpl->CreateDevice(deviceDescriptor));
|
return ToAPI(mImpl->CreateDevice(deviceDescriptor));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Adapter::RequestDevice(const DawnDeviceDescriptor* descriptor,
|
void Adapter::RequestDevice(const DawnDeviceDescriptor* descriptor,
|
||||||
|
@ -197,31 +195,29 @@ namespace dawn_native {
|
||||||
}
|
}
|
||||||
|
|
||||||
WGPUInstance Instance::Get() const {
|
WGPUInstance Instance::Get() const {
|
||||||
return reinterpret_cast<WGPUInstance>(mImpl);
|
return ToAPI(mImpl);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetLazyClearCountForTesting(WGPUDevice device) {
|
size_t GetLazyClearCountForTesting(WGPUDevice device) {
|
||||||
dawn_native::DeviceBase* deviceBase = reinterpret_cast<dawn_native::DeviceBase*>(device);
|
return FromAPI(device)->GetLazyClearCountForTesting();
|
||||||
return deviceBase->GetLazyClearCountForTesting();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetDeprecationWarningCountForTesting(WGPUDevice device) {
|
size_t GetDeprecationWarningCountForTesting(WGPUDevice device) {
|
||||||
dawn_native::DeviceBase* deviceBase = reinterpret_cast<dawn_native::DeviceBase*>(device);
|
return FromAPI(device)->GetDeprecationWarningCountForTesting();
|
||||||
return deviceBase->GetDeprecationWarningCountForTesting();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsTextureSubresourceInitialized(WGPUTexture cTexture,
|
bool IsTextureSubresourceInitialized(WGPUTexture texture,
|
||||||
uint32_t baseMipLevel,
|
uint32_t baseMipLevel,
|
||||||
uint32_t levelCount,
|
uint32_t levelCount,
|
||||||
uint32_t baseArrayLayer,
|
uint32_t baseArrayLayer,
|
||||||
uint32_t layerCount,
|
uint32_t layerCount,
|
||||||
WGPUTextureAspect cAspect) {
|
WGPUTextureAspect cAspect) {
|
||||||
dawn_native::TextureBase* texture = reinterpret_cast<dawn_native::TextureBase*>(cTexture);
|
TextureBase* textureBase = FromAPI(texture);
|
||||||
|
|
||||||
Aspect aspect =
|
Aspect aspect =
|
||||||
ConvertAspect(texture->GetFormat(), static_cast<wgpu::TextureAspect>(cAspect));
|
ConvertAspect(textureBase->GetFormat(), static_cast<wgpu::TextureAspect>(cAspect));
|
||||||
SubresourceRange range(aspect, {baseArrayLayer, layerCount}, {baseMipLevel, levelCount});
|
SubresourceRange range(aspect, {baseArrayLayer, layerCount}, {baseMipLevel, levelCount});
|
||||||
return texture->IsSubresourceContentInitialized(range);
|
return textureBase->IsSubresourceContentInitialized(range);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<const char*> GetProcMapNamesForTestingInternal();
|
std::vector<const char*> GetProcMapNamesForTestingInternal();
|
||||||
|
@ -231,8 +227,7 @@ namespace dawn_native {
|
||||||
}
|
}
|
||||||
|
|
||||||
DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device) {
|
DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device) {
|
||||||
dawn_native::DeviceBase* deviceBase = reinterpret_cast<dawn_native::DeviceBase*>(device);
|
return FromAPI(device)->APITick();
|
||||||
return deviceBase->APITick();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExternalImageDescriptor
|
// ExternalImageDescriptor
|
||||||
|
@ -251,14 +246,12 @@ namespace dawn_native {
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer) {
|
uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer) {
|
||||||
return reinterpret_cast<const BufferBase*>(buffer)->GetAllocatedSize();
|
return FromAPI(buffer)->GetAllocatedSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a, WGPUBindGroupLayout b) {
|
bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a, WGPUBindGroupLayout b) {
|
||||||
BindGroupLayoutBase* aBase = reinterpret_cast<BindGroupLayoutBase*>(a);
|
|
||||||
BindGroupLayoutBase* bBase = reinterpret_cast<BindGroupLayoutBase*>(b);
|
|
||||||
bool excludePipelineCompatibiltyToken = true;
|
bool excludePipelineCompatibiltyToken = true;
|
||||||
return aBase->IsLayoutEqual(bBase, excludePipelineCompatibiltyToken);
|
return FromAPI(a)->IsLayoutEqual(FromAPI(b), excludePipelineCompatibiltyToken);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace dawn_native
|
} // namespace dawn_native
|
||||||
|
|
|
@ -180,8 +180,7 @@ namespace dawn_native {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (descriptor != nullptr && descriptor->requiredLimits != nullptr) {
|
if (descriptor != nullptr && descriptor->requiredLimits != nullptr) {
|
||||||
mLimits.v1 = ReifyDefaultLimits(
|
mLimits.v1 = ReifyDefaultLimits(FromAPI(descriptor->requiredLimits)->limits);
|
||||||
reinterpret_cast<const RequiredLimits*>(descriptor->requiredLimits)->limits);
|
|
||||||
} else {
|
} else {
|
||||||
GetDefaultLimits(&mLimits.v1);
|
GetDefaultLimits(&mLimits.v1);
|
||||||
}
|
}
|
||||||
|
@ -252,7 +251,7 @@ namespace dawn_native {
|
||||||
ShaderModuleDescriptor descriptor;
|
ShaderModuleDescriptor descriptor;
|
||||||
ShaderModuleWGSLDescriptor wgslDesc;
|
ShaderModuleWGSLDescriptor wgslDesc;
|
||||||
wgslDesc.source = kEmptyFragmentShader;
|
wgslDesc.source = kEmptyFragmentShader;
|
||||||
descriptor.nextInChain = reinterpret_cast<ChainedStruct*>(&wgslDesc);
|
descriptor.nextInChain = &wgslDesc;
|
||||||
|
|
||||||
DAWN_TRY_ASSIGN(mInternalPipelineStore->dummyFragmentShader,
|
DAWN_TRY_ASSIGN(mInternalPipelineStore->dummyFragmentShader,
|
||||||
CreateShaderModule(&descriptor));
|
CreateShaderModule(&descriptor));
|
||||||
|
@ -1315,9 +1314,8 @@ namespace dawn_native {
|
||||||
Ref<ComputePipelineBase> cachedComputePipeline =
|
Ref<ComputePipelineBase> cachedComputePipeline =
|
||||||
GetCachedComputePipeline(uninitializedComputePipeline.Get());
|
GetCachedComputePipeline(uninitializedComputePipeline.Get());
|
||||||
if (cachedComputePipeline.Get() != nullptr) {
|
if (cachedComputePipeline.Get() != nullptr) {
|
||||||
callback(WGPUCreatePipelineAsyncStatus_Success,
|
callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()),
|
||||||
reinterpret_cast<WGPUComputePipeline>(cachedComputePipeline.Detach()), "",
|
"", userdata);
|
||||||
userdata);
|
|
||||||
} else {
|
} else {
|
||||||
// Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
|
// Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
|
||||||
// where the pipeline object may be initialized asynchronously and the result will be
|
// where the pipeline object may be initialized asynchronously and the result will be
|
||||||
|
@ -1462,9 +1460,8 @@ namespace dawn_native {
|
||||||
Ref<RenderPipelineBase> cachedRenderPipeline =
|
Ref<RenderPipelineBase> cachedRenderPipeline =
|
||||||
GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
|
GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
|
||||||
if (cachedRenderPipeline != nullptr) {
|
if (cachedRenderPipeline != nullptr) {
|
||||||
callback(WGPUCreatePipelineAsyncStatus_Success,
|
callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()),
|
||||||
reinterpret_cast<WGPURenderPipeline>(cachedRenderPipeline.Detach()), "",
|
"", userdata);
|
||||||
userdata);
|
|
||||||
} else {
|
} else {
|
||||||
// Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
|
// Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
|
||||||
// where the pipeline object may be initialized asynchronously and the result will be
|
// where the pipeline object may be initialized asynchronously and the result will be
|
||||||
|
|
|
@ -30,13 +30,11 @@ namespace dawn_native {
|
||||||
return blob;
|
return blob;
|
||||||
}
|
}
|
||||||
std::lock_guard<std::mutex> lock(mMutex);
|
std::lock_guard<std::mutex> lock(mMutex);
|
||||||
blob.bufferSize = mCache->LoadData(reinterpret_cast<WGPUDevice>(mDevice), key.data(),
|
blob.bufferSize = mCache->LoadData(ToAPI(mDevice), key.data(), key.size(), nullptr, 0);
|
||||||
key.size(), nullptr, 0);
|
|
||||||
if (blob.bufferSize > 0) {
|
if (blob.bufferSize > 0) {
|
||||||
blob.buffer.reset(new uint8_t[blob.bufferSize]);
|
blob.buffer.reset(new uint8_t[blob.bufferSize]);
|
||||||
const size_t bufferSize =
|
const size_t bufferSize = mCache->LoadData(ToAPI(mDevice), key.data(), key.size(),
|
||||||
mCache->LoadData(reinterpret_cast<WGPUDevice>(mDevice), key.data(), key.size(),
|
blob.buffer.get(), blob.bufferSize);
|
||||||
blob.buffer.get(), blob.bufferSize);
|
|
||||||
ASSERT(bufferSize == blob.bufferSize);
|
ASSERT(bufferSize == blob.bufferSize);
|
||||||
return blob;
|
return blob;
|
||||||
}
|
}
|
||||||
|
@ -50,8 +48,7 @@ namespace dawn_native {
|
||||||
ASSERT(value != nullptr);
|
ASSERT(value != nullptr);
|
||||||
ASSERT(size > 0);
|
ASSERT(size > 0);
|
||||||
std::lock_guard<std::mutex> lock(mMutex);
|
std::lock_guard<std::mutex> lock(mMutex);
|
||||||
mCache->StoreData(reinterpret_cast<WGPUDevice>(mDevice), key.data(), key.size(), value,
|
mCache->StoreData(ToAPI(mDevice), key.data(), key.size(), value, size);
|
||||||
size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dawn_platform::CachingInterface* PersistentCache::GetPlatformCache() {
|
dawn_platform::CachingInterface* PersistentCache::GetPlatformCache() {
|
||||||
|
|
|
@ -29,13 +29,11 @@
|
||||||
namespace dawn_native { namespace d3d12 {
|
namespace dawn_native { namespace d3d12 {
|
||||||
|
|
||||||
ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device) {
|
ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device) {
|
||||||
Device* backendDevice = reinterpret_cast<Device*>(device);
|
return ToBackend(FromAPI(device))->GetD3D12Device();
|
||||||
|
|
||||||
return backendDevice->GetD3D12Device();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device, HWND window) {
|
DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device, HWND window) {
|
||||||
Device* backendDevice = reinterpret_cast<Device*>(device);
|
Device* backendDevice = ToBackend(FromAPI(device));
|
||||||
|
|
||||||
DawnSwapChainImplementation impl;
|
DawnSwapChainImplementation impl;
|
||||||
impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
|
impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
|
||||||
|
@ -78,7 +76,7 @@ namespace dawn_native { namespace d3d12 {
|
||||||
WGPUTexture ExternalImageDXGI::ProduceTexture(
|
WGPUTexture ExternalImageDXGI::ProduceTexture(
|
||||||
WGPUDevice device,
|
WGPUDevice device,
|
||||||
const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
|
const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
|
||||||
Device* backendDevice = reinterpret_cast<Device*>(device);
|
Device* backendDevice = ToBackend(FromAPI(device));
|
||||||
|
|
||||||
// Ensure the texture usage is allowed
|
// Ensure the texture usage is allowed
|
||||||
if (!IsSubset(descriptor->usage, mUsage)) {
|
if (!IsSubset(descriptor->usage, mUsage)) {
|
||||||
|
@ -114,14 +112,14 @@ namespace dawn_native { namespace d3d12 {
|
||||||
ExternalMutexSerial(descriptor->releaseMutexKey), descriptor->isSwapChainTexture,
|
ExternalMutexSerial(descriptor->releaseMutexKey), descriptor->isSwapChainTexture,
|
||||||
descriptor->isInitialized);
|
descriptor->isInitialized);
|
||||||
|
|
||||||
return reinterpret_cast<WGPUTexture>(texture.Detach());
|
return ToAPI(texture.Detach());
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
std::unique_ptr<ExternalImageDXGI> ExternalImageDXGI::Create(
|
std::unique_ptr<ExternalImageDXGI> ExternalImageDXGI::Create(
|
||||||
WGPUDevice device,
|
WGPUDevice device,
|
||||||
const ExternalImageDescriptorDXGISharedHandle* descriptor) {
|
const ExternalImageDescriptorDXGISharedHandle* descriptor) {
|
||||||
Device* backendDevice = reinterpret_cast<Device*>(device);
|
Device* backendDevice = ToBackend(FromAPI(device));
|
||||||
|
|
||||||
Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource;
|
Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource;
|
||||||
if (FAILED(backendDevice->GetD3D12Device()->OpenSharedHandle(
|
if (FAILED(backendDevice->GetD3D12Device()->OpenSharedHandle(
|
||||||
|
@ -129,8 +127,7 @@ namespace dawn_native { namespace d3d12 {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
const TextureDescriptor* textureDescriptor =
|
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
|
||||||
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
|
|
||||||
|
|
||||||
if (backendDevice->ConsumedError(
|
if (backendDevice->ConsumedError(
|
||||||
ValidateTextureDescriptor(backendDevice, textureDescriptor))) {
|
ValidateTextureDescriptor(backendDevice, textureDescriptor))) {
|
||||||
|
@ -168,7 +165,7 @@ namespace dawn_native { namespace d3d12 {
|
||||||
uint64_t SetExternalMemoryReservation(WGPUDevice device,
|
uint64_t SetExternalMemoryReservation(WGPUDevice device,
|
||||||
uint64_t requestedReservationSize,
|
uint64_t requestedReservationSize,
|
||||||
MemorySegment memorySegment) {
|
MemorySegment memorySegment) {
|
||||||
Device* backendDevice = reinterpret_cast<Device*>(device);
|
Device* backendDevice = ToBackend(FromAPI(device));
|
||||||
|
|
||||||
return backendDevice->GetResidencyManager()->SetExternalMemoryReservation(
|
return backendDevice->GetResidencyManager()->SetExternalMemoryReservation(
|
||||||
memorySegment, requestedReservationSize);
|
memorySegment, requestedReservationSize);
|
||||||
|
|
|
@ -83,7 +83,7 @@ namespace dawn_native { namespace d3d12 {
|
||||||
: OldSwapChainBase(device, descriptor) {
|
: OldSwapChainBase(device, descriptor) {
|
||||||
const auto& im = GetImplementation();
|
const auto& im = GetImplementation();
|
||||||
DawnWSIContextD3D12 wsiContext = {};
|
DawnWSIContextD3D12 wsiContext = {};
|
||||||
wsiContext.device = reinterpret_cast<WGPUDevice>(GetDevice());
|
wsiContext.device = ToAPI(GetDevice());
|
||||||
im.Init(im.userData, &wsiContext);
|
im.Init(im.userData, &wsiContext);
|
||||||
|
|
||||||
ASSERT(im.textureUsage != WGPUTextureUsage_None);
|
ASSERT(im.textureUsage != WGPUTextureUsage_None);
|
||||||
|
|
|
@ -433,8 +433,7 @@ namespace dawn_native { namespace metal {
|
||||||
Ref<Texture> Device::CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
|
Ref<Texture> Device::CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
|
||||||
IOSurfaceRef ioSurface,
|
IOSurfaceRef ioSurface,
|
||||||
uint32_t plane) {
|
uint32_t plane) {
|
||||||
const TextureDescriptor* textureDescriptor =
|
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
|
||||||
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
|
|
||||||
|
|
||||||
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
|
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
|
@ -22,26 +22,24 @@
|
||||||
|
|
||||||
namespace dawn_native { namespace metal {
|
namespace dawn_native { namespace metal {
|
||||||
|
|
||||||
id<MTLDevice> GetMetalDevice(WGPUDevice cDevice) {
|
id<MTLDevice> GetMetalDevice(WGPUDevice device) {
|
||||||
Device* device = reinterpret_cast<Device*>(cDevice);
|
return ToBackend(FromAPI(device))->GetMTLDevice();
|
||||||
return device->GetMTLDevice();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ExternalImageDescriptorIOSurface::ExternalImageDescriptorIOSurface()
|
ExternalImageDescriptorIOSurface::ExternalImageDescriptorIOSurface()
|
||||||
: ExternalImageDescriptor(ExternalImageType::IOSurface) {
|
: ExternalImageDescriptor(ExternalImageType::IOSurface) {
|
||||||
}
|
}
|
||||||
|
|
||||||
WGPUTexture WrapIOSurface(WGPUDevice cDevice,
|
WGPUTexture WrapIOSurface(WGPUDevice device,
|
||||||
const ExternalImageDescriptorIOSurface* cDescriptor) {
|
const ExternalImageDescriptorIOSurface* cDescriptor) {
|
||||||
Device* device = reinterpret_cast<Device*>(cDevice);
|
Device* backendDevice = ToBackend(FromAPI(device));
|
||||||
Ref<TextureBase> texture = device->CreateTextureWrappingIOSurface(
|
Ref<TextureBase> texture = backendDevice->CreateTextureWrappingIOSurface(
|
||||||
cDescriptor, cDescriptor->ioSurface, cDescriptor->plane);
|
cDescriptor, cDescriptor->ioSurface, cDescriptor->plane);
|
||||||
return reinterpret_cast<WGPUTexture>(texture.Detach());
|
return ToAPI(texture.Detach());
|
||||||
}
|
}
|
||||||
|
|
||||||
void WaitForCommandsToBeScheduled(WGPUDevice cDevice) {
|
void WaitForCommandsToBeScheduled(WGPUDevice device) {
|
||||||
Device* device = reinterpret_cast<Device*>(cDevice);
|
ToBackend(FromAPI(device))->WaitForCommandsToBeScheduled();
|
||||||
device->WaitForCommandsToBeScheduled();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}} // namespace dawn_native::metal
|
}} // namespace dawn_native::metal
|
||||||
|
|
|
@ -431,8 +431,7 @@ namespace dawn_native { namespace metal {
|
||||||
const ExternalImageDescriptor* descriptor,
|
const ExternalImageDescriptor* descriptor,
|
||||||
IOSurfaceRef ioSurface,
|
IOSurfaceRef ioSurface,
|
||||||
uint32_t plane) {
|
uint32_t plane) {
|
||||||
const TextureDescriptor* textureDescriptor =
|
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
|
||||||
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
|
|
||||||
|
|
||||||
Ref<Texture> texture =
|
Ref<Texture> texture =
|
||||||
AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
|
AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
|
||||||
|
|
|
@ -203,8 +203,7 @@ namespace dawn_native { namespace opengl {
|
||||||
}
|
}
|
||||||
TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
|
TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
|
||||||
::EGLImage image) {
|
::EGLImage image) {
|
||||||
const TextureDescriptor* textureDescriptor =
|
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
|
||||||
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
|
|
||||||
|
|
||||||
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
|
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
|
@ -34,7 +34,7 @@ namespace dawn_native { namespace opengl {
|
||||||
DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
|
DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
|
||||||
PresentCallback present,
|
PresentCallback present,
|
||||||
void* presentUserdata) {
|
void* presentUserdata) {
|
||||||
Device* backendDevice = reinterpret_cast<Device*>(device);
|
Device* backendDevice = ToBackend(FromAPI(device));
|
||||||
|
|
||||||
DawnSwapChainImplementation impl;
|
DawnSwapChainImplementation impl;
|
||||||
impl = CreateSwapChainImplementation(
|
impl = CreateSwapChainImplementation(
|
||||||
|
@ -54,11 +54,12 @@ namespace dawn_native { namespace opengl {
|
||||||
: ExternalImageDescriptor(ExternalImageType::EGLImage) {
|
: ExternalImageDescriptor(ExternalImageType::EGLImage) {
|
||||||
}
|
}
|
||||||
|
|
||||||
WGPUTexture WrapExternalEGLImage(WGPUDevice cDevice,
|
WGPUTexture WrapExternalEGLImage(WGPUDevice device,
|
||||||
const ExternalImageDescriptorEGLImage* descriptor) {
|
const ExternalImageDescriptorEGLImage* descriptor) {
|
||||||
Device* device = reinterpret_cast<Device*>(cDevice);
|
Device* backendDevice = ToBackend(FromAPI(device));
|
||||||
TextureBase* texture = device->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
|
TextureBase* texture =
|
||||||
return reinterpret_cast<WGPUTexture>(texture);
|
backendDevice->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
|
||||||
|
return ToAPI(texture);
|
||||||
}
|
}
|
||||||
|
|
||||||
}} // namespace dawn_native::opengl
|
}} // namespace dawn_native::opengl
|
||||||
|
|
|
@ -719,8 +719,7 @@ namespace dawn_native { namespace vulkan {
|
||||||
VkSemaphore* outSignalSemaphore,
|
VkSemaphore* outSignalSemaphore,
|
||||||
VkDeviceMemory* outAllocation,
|
VkDeviceMemory* outAllocation,
|
||||||
std::vector<VkSemaphore>* outWaitSemaphores) {
|
std::vector<VkSemaphore>* outWaitSemaphores) {
|
||||||
const TextureDescriptor* textureDescriptor =
|
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
|
||||||
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
|
|
||||||
|
|
||||||
const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
|
const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
|
||||||
FindInChain(textureDescriptor->nextInChain, &internalUsageDesc);
|
FindInChain(textureDescriptor->nextInChain, &internalUsageDesc);
|
||||||
|
@ -794,8 +793,7 @@ namespace dawn_native { namespace vulkan {
|
||||||
const ExternalImageDescriptorVk* descriptor,
|
const ExternalImageDescriptorVk* descriptor,
|
||||||
ExternalMemoryHandle memoryHandle,
|
ExternalMemoryHandle memoryHandle,
|
||||||
const std::vector<ExternalSemaphoreHandle>& waitHandles) {
|
const std::vector<ExternalSemaphoreHandle>& waitHandles) {
|
||||||
const TextureDescriptor* textureDescriptor =
|
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
|
||||||
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
|
|
||||||
|
|
||||||
// Initial validation
|
// Initial validation
|
||||||
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
|
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
|
||||||
|
|
|
@ -29,13 +29,13 @@
|
||||||
namespace dawn_native { namespace vulkan {
|
namespace dawn_native { namespace vulkan {
|
||||||
|
|
||||||
VkInstance GetInstance(WGPUDevice device) {
|
VkInstance GetInstance(WGPUDevice device) {
|
||||||
Device* backendDevice = reinterpret_cast<Device*>(device);
|
Device* backendDevice = ToBackend(FromAPI(device));
|
||||||
return backendDevice->GetVkInstance();
|
return backendDevice->GetVkInstance();
|
||||||
}
|
}
|
||||||
|
|
||||||
DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device,
|
DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device,
|
||||||
const char* pName) {
|
const char* pName) {
|
||||||
Device* backendDevice = reinterpret_cast<Device*>(device);
|
Device* backendDevice = ToBackend(FromAPI(device));
|
||||||
return (*backendDevice->fn.GetInstanceProcAddr)(backendDevice->GetVkInstance(), pName);
|
return (*backendDevice->fn.GetInstanceProcAddr)(backendDevice->GetVkInstance(), pName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ namespace dawn_native { namespace vulkan {
|
||||||
// header as seen in this file uses the wrapped type.
|
// header as seen in this file uses the wrapped type.
|
||||||
DAWN_NATIVE_EXPORT DawnSwapChainImplementation
|
DAWN_NATIVE_EXPORT DawnSwapChainImplementation
|
||||||
CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surfaceNative) {
|
CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surfaceNative) {
|
||||||
Device* backendDevice = reinterpret_cast<Device*>(device);
|
Device* backendDevice = ToBackend(FromAPI(device));
|
||||||
VkSurfaceKHR surface = VkSurfaceKHR::CreateFromHandle(surfaceNative);
|
VkSurfaceKHR surface = VkSurfaceKHR::CreateFromHandle(surfaceNative);
|
||||||
|
|
||||||
DawnSwapChainImplementation impl;
|
DawnSwapChainImplementation impl;
|
||||||
|
@ -77,17 +77,17 @@ namespace dawn_native { namespace vulkan {
|
||||||
}
|
}
|
||||||
#endif // DAWN_PLATFORM_LINUX
|
#endif // DAWN_PLATFORM_LINUX
|
||||||
|
|
||||||
WGPUTexture WrapVulkanImage(WGPUDevice cDevice, const ExternalImageDescriptorVk* descriptor) {
|
WGPUTexture WrapVulkanImage(WGPUDevice device, const ExternalImageDescriptorVk* descriptor) {
|
||||||
#if defined(DAWN_PLATFORM_LINUX)
|
#if defined(DAWN_PLATFORM_LINUX)
|
||||||
switch (descriptor->type) {
|
switch (descriptor->type) {
|
||||||
case ExternalImageType::OpaqueFD:
|
case ExternalImageType::OpaqueFD:
|
||||||
case ExternalImageType::DmaBuf: {
|
case ExternalImageType::DmaBuf: {
|
||||||
|
Device* backendDevice = ToBackend(FromAPI(device));
|
||||||
const ExternalImageDescriptorFD* fdDescriptor =
|
const ExternalImageDescriptorFD* fdDescriptor =
|
||||||
static_cast<const ExternalImageDescriptorFD*>(descriptor);
|
static_cast<const ExternalImageDescriptorFD*>(descriptor);
|
||||||
Device* device = reinterpret_cast<Device*>(cDevice);
|
|
||||||
TextureBase* texture = device->CreateTextureWrappingVulkanImage(
|
return ToAPI(backendDevice->CreateTextureWrappingVulkanImage(
|
||||||
fdDescriptor, fdDescriptor->memoryFD, fdDescriptor->waitFDs);
|
fdDescriptor, fdDescriptor->memoryFD, fdDescriptor->waitFDs));
|
||||||
return reinterpret_cast<WGPUTexture>(texture);
|
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -97,20 +97,21 @@ namespace dawn_native { namespace vulkan {
|
||||||
#endif // DAWN_PLATFORM_LINUX
|
#endif // DAWN_PLATFORM_LINUX
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ExportVulkanImage(WGPUTexture cTexture,
|
bool ExportVulkanImage(WGPUTexture texture,
|
||||||
VkImageLayout desiredLayout,
|
VkImageLayout desiredLayout,
|
||||||
ExternalImageExportInfoVk* info) {
|
ExternalImageExportInfoVk* info) {
|
||||||
if (cTexture == nullptr) {
|
if (texture == nullptr) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
#if defined(DAWN_PLATFORM_LINUX)
|
#if defined(DAWN_PLATFORM_LINUX)
|
||||||
switch (info->type) {
|
switch (info->type) {
|
||||||
case ExternalImageType::OpaqueFD:
|
case ExternalImageType::OpaqueFD:
|
||||||
case ExternalImageType::DmaBuf: {
|
case ExternalImageType::DmaBuf: {
|
||||||
Texture* texture = reinterpret_cast<Texture*>(cTexture);
|
Texture* backendTexture = ToBackend(FromAPI(texture));
|
||||||
Device* device = ToBackend(texture->GetDevice());
|
Device* device = ToBackend(backendTexture->GetDevice());
|
||||||
ExternalImageExportInfoFD* fdInfo = static_cast<ExternalImageExportInfoFD*>(info);
|
ExternalImageExportInfoFD* fdInfo = static_cast<ExternalImageExportInfoFD*>(info);
|
||||||
return device->SignalAndExportExternalTexture(texture, desiredLayout, fdInfo,
|
|
||||||
|
return device->SignalAndExportExternalTexture(backendTexture, desiredLayout, fdInfo,
|
||||||
&fdInfo->semaphoreHandles);
|
&fdInfo->semaphoreHandles);
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -118,14 +118,14 @@ namespace dawn_native { namespace vulkan {
|
||||||
// semaphore extensions to import the image and wait on the provided synchronizaton
|
// semaphore extensions to import the image and wait on the provided synchronizaton
|
||||||
// primitives before the texture can be used.
|
// primitives before the texture can be used.
|
||||||
// On failure, returns a nullptr.
|
// On failure, returns a nullptr.
|
||||||
DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice cDevice,
|
DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice device,
|
||||||
const ExternalImageDescriptorVk* descriptor);
|
const ExternalImageDescriptorVk* descriptor);
|
||||||
|
|
||||||
// Exports external memory from a Vulkan image. This must be called on wrapped textures
|
// Exports external memory from a Vulkan image. This must be called on wrapped textures
|
||||||
// before they are destroyed. It writes the semaphore to wait on and the old/new image
|
// before they are destroyed. It writes the semaphore to wait on and the old/new image
|
||||||
// layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to
|
// layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to
|
||||||
// perform a layout transition.
|
// perform a layout transition.
|
||||||
DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture cTexture,
|
DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture texture,
|
||||||
VkImageLayout desiredLayout,
|
VkImageLayout desiredLayout,
|
||||||
ExternalImageExportInfoVk* info);
|
ExternalImageExportInfoVk* info);
|
||||||
|
|
||||||
|
|
|
@ -73,7 +73,7 @@ TEST_F(FeatureTests, GetEnabledFeatures) {
|
||||||
dawn_native::DawnDeviceDescriptor deviceDescriptor;
|
dawn_native::DawnDeviceDescriptor deviceDescriptor;
|
||||||
deviceDescriptor.requiredFeatures = {featureName};
|
deviceDescriptor.requiredFeatures = {featureName};
|
||||||
dawn_native::DeviceBase* deviceBase =
|
dawn_native::DeviceBase* deviceBase =
|
||||||
reinterpret_cast<dawn_native::DeviceBase*>(adapter.CreateDevice(&deviceDescriptor));
|
dawn_native::FromAPI(adapter.CreateDevice(&deviceDescriptor));
|
||||||
std::vector<const char*> enabledFeatures = deviceBase->GetEnabledFeatures();
|
std::vector<const char*> enabledFeatures = deviceBase->GetEnabledFeatures();
|
||||||
ASSERT_EQ(1u, enabledFeatures.size());
|
ASSERT_EQ(1u, enabledFeatures.size());
|
||||||
ASSERT_EQ(0, std::strcmp(featureName, enabledFeatures[0]));
|
ASSERT_EQ(0, std::strcmp(featureName, enabledFeatures[0]));
|
||||||
|
|
|
@ -318,7 +318,7 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
|
||||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||||
pass.EndPass();
|
pass.EndPass();
|
||||||
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
|
dawn_native::FromAPI(encoder.Get())->Destroy();
|
||||||
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
|
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -327,13 +327,13 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
|
||||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||||
pass.EndPass();
|
pass.EndPass();
|
||||||
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
|
dawn_native::FromAPI(encoder.Get())->Destroy();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destroyed encoder should allow encoding, and emit error on finish.
|
// Destroyed encoder should allow encoding, and emit error on finish.
|
||||||
{
|
{
|
||||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
|
dawn_native::FromAPI(encoder.Get())->Destroy();
|
||||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||||
pass.EndPass();
|
pass.EndPass();
|
||||||
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
|
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
|
||||||
|
@ -342,7 +342,7 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
|
||||||
// Destroyed encoder should allow encoding and shouldn't emit an error if never finished.
|
// Destroyed encoder should allow encoding and shouldn't emit an error if never finished.
|
||||||
{
|
{
|
||||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
|
dawn_native::FromAPI(encoder.Get())->Destroy();
|
||||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||||
pass.EndPass();
|
pass.EndPass();
|
||||||
}
|
}
|
||||||
|
@ -353,21 +353,21 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
|
||||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||||
pass.EndPass();
|
pass.EndPass();
|
||||||
encoder.Finish();
|
encoder.Finish();
|
||||||
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
|
dawn_native::FromAPI(encoder.Get())->Destroy();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destroying an encoder twice should not emit any errors.
|
// Destroying an encoder twice should not emit any errors.
|
||||||
{
|
{
|
||||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
|
dawn_native::FromAPI(encoder.Get())->Destroy();
|
||||||
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
|
dawn_native::FromAPI(encoder.Get())->Destroy();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destroying an encoder twice and then calling finish should fail.
|
// Destroying an encoder twice and then calling finish should fail.
|
||||||
{
|
{
|
||||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||||
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
|
dawn_native::FromAPI(encoder.Get())->Destroy();
|
||||||
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
|
dawn_native::FromAPI(encoder.Get())->Destroy();
|
||||||
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
|
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -167,8 +167,7 @@ TEST_F(ShaderModuleValidationTest, GetCompilationMessages) {
|
||||||
return vec4<f32>(0.0, 1.0, 0.0, 1.0);
|
return vec4<f32>(0.0, 1.0, 0.0, 1.0);
|
||||||
})");
|
})");
|
||||||
|
|
||||||
dawn_native::ShaderModuleBase* shaderModuleBase =
|
dawn_native::ShaderModuleBase* shaderModuleBase = dawn_native::FromAPI(shaderModule.Get());
|
||||||
reinterpret_cast<dawn_native::ShaderModuleBase*>(shaderModule.Get());
|
|
||||||
dawn_native::OwnedCompilationMessages* messages = shaderModuleBase->GetCompilationMessages();
|
dawn_native::OwnedCompilationMessages* messages = shaderModuleBase->GetCompilationMessages();
|
||||||
messages->ClearMessages();
|
messages->ClearMessages();
|
||||||
messages->AddMessageForTesting("Info Message");
|
messages->AddMessageForTesting("Info Message");
|
||||||
|
|
|
@ -42,7 +42,7 @@ class D3D12ResidencyTestBase : public DawnTest {
|
||||||
|
|
||||||
// Restrict Dawn's budget to create an artificial budget.
|
// Restrict Dawn's budget to create an artificial budget.
|
||||||
dawn_native::d3d12::Device* d3dDevice =
|
dawn_native::d3d12::Device* d3dDevice =
|
||||||
reinterpret_cast<dawn_native::d3d12::Device*>(device.Get());
|
dawn_native::d3d12::ToBackend(dawn_native::FromAPI((device.Get())));
|
||||||
d3dDevice->GetResidencyManager()->RestrictBudgetForTesting(kRestrictedBudgetSize);
|
d3dDevice->GetResidencyManager()->RestrictBudgetForTesting(kRestrictedBudgetSize);
|
||||||
|
|
||||||
// Initialize a source buffer on the GPU to serve as a source to quickly copy data to other
|
// Initialize a source buffer on the GPU to serve as a source to quickly copy data to other
|
||||||
|
@ -94,18 +94,20 @@ class D3D12ResourceResidencyTests : public D3D12ResidencyTestBase {
|
||||||
bool CheckAllocationMethod(wgpu::Buffer buffer,
|
bool CheckAllocationMethod(wgpu::Buffer buffer,
|
||||||
dawn_native::AllocationMethod allocationMethod) const {
|
dawn_native::AllocationMethod allocationMethod) const {
|
||||||
dawn_native::d3d12::Buffer* d3dBuffer =
|
dawn_native::d3d12::Buffer* d3dBuffer =
|
||||||
reinterpret_cast<dawn_native::d3d12::Buffer*>(buffer.Get());
|
dawn_native::d3d12::ToBackend(dawn_native::FromAPI((buffer.Get())));
|
||||||
return d3dBuffer->CheckAllocationMethodForTesting(allocationMethod);
|
return d3dBuffer->CheckAllocationMethodForTesting(allocationMethod);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CheckIfBufferIsResident(wgpu::Buffer buffer) const {
|
bool CheckIfBufferIsResident(wgpu::Buffer buffer) const {
|
||||||
dawn_native::d3d12::Buffer* d3dBuffer =
|
dawn_native::d3d12::Buffer* d3dBuffer =
|
||||||
reinterpret_cast<dawn_native::d3d12::Buffer*>(buffer.Get());
|
dawn_native::d3d12::ToBackend(dawn_native::FromAPI((buffer.Get())));
|
||||||
return d3dBuffer->CheckIsResidentForTesting();
|
return d3dBuffer->CheckIsResidentForTesting();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsUMA() const {
|
bool IsUMA() const {
|
||||||
return reinterpret_cast<dawn_native::d3d12::Device*>(device.Get())->GetDeviceInfo().isUMA;
|
return dawn_native::d3d12::ToBackend(dawn_native::FromAPI(device.Get()))
|
||||||
|
->GetDeviceInfo()
|
||||||
|
.isUMA;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -367,7 +369,7 @@ TEST_P(D3D12DescriptorResidencyTests, SwitchedViewHeapResidency) {
|
||||||
wgpu::Sampler sampler = device.CreateSampler();
|
wgpu::Sampler sampler = device.CreateSampler();
|
||||||
|
|
||||||
dawn_native::d3d12::Device* d3dDevice =
|
dawn_native::d3d12::Device* d3dDevice =
|
||||||
reinterpret_cast<dawn_native::d3d12::Device*>(device.Get());
|
dawn_native::d3d12::ToBackend(dawn_native::FromAPI(device.Get()));
|
||||||
|
|
||||||
dawn_native::d3d12::ShaderVisibleDescriptorAllocator* allocator =
|
dawn_native::d3d12::ShaderVisibleDescriptorAllocator* allocator =
|
||||||
d3dDevice->GetViewShaderVisibleDescriptorAllocator();
|
d3dDevice->GetViewShaderVisibleDescriptorAllocator();
|
||||||
|
|
|
@ -128,7 +128,7 @@ class EGLImageTestBase : public DawnTest {
|
||||||
void* data,
|
void* data,
|
||||||
size_t size) {
|
size_t size) {
|
||||||
dawn_native::opengl::Device* openglDevice =
|
dawn_native::opengl::Device* openglDevice =
|
||||||
reinterpret_cast<dawn_native::opengl::Device*>(device.Get());
|
dawn_native::opengl::ToBackend(dawn_native::FromAPI(device.Get()));
|
||||||
const dawn_native::opengl::OpenGLFunctions& gl = openglDevice->gl;
|
const dawn_native::opengl::OpenGLFunctions& gl = openglDevice->gl;
|
||||||
GLuint tex;
|
GLuint tex;
|
||||||
gl.GenTextures(1, &tex);
|
gl.GenTextures(1, &tex);
|
||||||
|
@ -300,7 +300,7 @@ class EGLImageUsageTests : public EGLImageTestBase {
|
||||||
void* data,
|
void* data,
|
||||||
size_t dataSize) {
|
size_t dataSize) {
|
||||||
dawn_native::opengl::Device* openglDevice =
|
dawn_native::opengl::Device* openglDevice =
|
||||||
reinterpret_cast<dawn_native::opengl::Device*>(device.Get());
|
dawn_native::opengl::ToBackend(dawn_native::FromAPI(device.Get()));
|
||||||
const dawn_native::opengl::OpenGLFunctions& gl = openglDevice->gl;
|
const dawn_native::opengl::OpenGLFunctions& gl = openglDevice->gl;
|
||||||
|
|
||||||
// Get a texture view for the eglImage
|
// Get a texture view for the eglImage
|
||||||
|
|
|
@ -53,14 +53,13 @@ class InternalStorageBufferBindingTests : public DawnTest {
|
||||||
bglDesc.entryCount = 1;
|
bglDesc.entryCount = 1;
|
||||||
bglDesc.entries = &bglEntry;
|
bglDesc.entries = &bglEntry;
|
||||||
|
|
||||||
dawn_native::DeviceBase* nativeDevice =
|
dawn_native::DeviceBase* nativeDevice = dawn_native::FromAPI(device.Get());
|
||||||
reinterpret_cast<dawn_native::DeviceBase*>(device.Get());
|
|
||||||
|
|
||||||
Ref<dawn_native::BindGroupLayoutBase> bglRef =
|
Ref<dawn_native::BindGroupLayoutBase> bglRef =
|
||||||
nativeDevice->CreateBindGroupLayout(&bglDesc, true).AcquireSuccess();
|
nativeDevice->CreateBindGroupLayout(&bglDesc, true).AcquireSuccess();
|
||||||
|
|
||||||
wgpu::BindGroupLayout bgl =
|
wgpu::BindGroupLayout bgl =
|
||||||
wgpu::BindGroupLayout::Acquire(reinterpret_cast<WGPUBindGroupLayout>(bglRef.Detach()));
|
wgpu::BindGroupLayout::Acquire(dawn_native::ToAPI(bglRef.Detach()));
|
||||||
|
|
||||||
// Create pipeline layout
|
// Create pipeline layout
|
||||||
wgpu::PipelineLayoutDescriptor plDesc;
|
wgpu::PipelineLayoutDescriptor plDesc;
|
||||||
|
|
|
@ -26,10 +26,10 @@ namespace {
|
||||||
wgpu::Buffer availability,
|
wgpu::Buffer availability,
|
||||||
wgpu::Buffer params) {
|
wgpu::Buffer params) {
|
||||||
ASSERT_TRUE(dawn_native::EncodeConvertTimestampsToNanoseconds(
|
ASSERT_TRUE(dawn_native::EncodeConvertTimestampsToNanoseconds(
|
||||||
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get()),
|
dawn_native::FromAPI(encoder.Get()), dawn_native::FromAPI(timestamps.Get()),
|
||||||
reinterpret_cast<dawn_native::BufferBase*>(timestamps.Get()),
|
dawn_native::FromAPI(availability.Get()),
|
||||||
reinterpret_cast<dawn_native::BufferBase*>(availability.Get()),
|
dawn_native::FromAPI(params.Get()))
|
||||||
reinterpret_cast<dawn_native::BufferBase*>(params.Get())).IsSuccess());
|
.IsSuccess());
|
||||||
}
|
}
|
||||||
|
|
||||||
class InternalShaderExpectation : public detail::Expectation {
|
class InternalShaderExpectation : public detail::Expectation {
|
||||||
|
|
|
@ -29,7 +29,7 @@ namespace {
|
||||||
DawnTest::SetUp();
|
DawnTest::SetUp();
|
||||||
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
|
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
|
||||||
|
|
||||||
mDeviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(device.Get());
|
mDeviceVk = dawn_native::vulkan::ToBackend(dawn_native::FromAPI(device.Get()));
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
|
@ -44,7 +44,7 @@ namespace dawn_native { namespace vulkan {
|
||||||
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
|
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
|
||||||
|
|
||||||
gbmDevice = CreateGbmDevice();
|
gbmDevice = CreateGbmDevice();
|
||||||
deviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(device.Get());
|
deviceVk = dawn_native::vulkan::ToBackend(dawn_native::FromAPI(device.Get()));
|
||||||
|
|
||||||
defaultGbmBo = CreateGbmBo(1, 1, true /* linear */);
|
defaultGbmBo = CreateGbmBo(1, 1, true /* linear */);
|
||||||
defaultStride = gbm_bo_get_stride_for_plane(defaultGbmBo, 0);
|
defaultStride = gbm_bo_get_stride_for_plane(defaultGbmBo, 0);
|
||||||
|
@ -311,14 +311,13 @@ namespace dawn_native { namespace vulkan {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create another device based on the original
|
// Create another device based on the original
|
||||||
backendAdapter =
|
backendAdapter = dawn_native::vulkan::ToBackend(deviceVk->GetAdapter());
|
||||||
reinterpret_cast<dawn_native::vulkan::Adapter*>(deviceVk->GetAdapter());
|
|
||||||
deviceDescriptor.forceEnabledToggles = GetParam().forceEnabledWorkarounds;
|
deviceDescriptor.forceEnabledToggles = GetParam().forceEnabledWorkarounds;
|
||||||
deviceDescriptor.forceDisabledToggles = GetParam().forceDisabledWorkarounds;
|
deviceDescriptor.forceDisabledToggles = GetParam().forceDisabledWorkarounds;
|
||||||
|
|
||||||
secondDeviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(
|
secondDeviceVk =
|
||||||
backendAdapter->CreateDevice(&deviceDescriptor));
|
dawn_native::vullkan::ToBackend(backendAdapter->CreateDevice(&deviceDescriptor));
|
||||||
secondDevice = wgpu::Device::Acquire(reinterpret_cast<WGPUDevice>(secondDeviceVk));
|
secondDevice = wgpu::Device::Acquire(dawn_native::ToAPI(secondDeviceVk));
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -691,10 +690,9 @@ namespace dawn_native { namespace vulkan {
|
||||||
// device 1 = |device|
|
// device 1 = |device|
|
||||||
// device 2 = |secondDevice|
|
// device 2 = |secondDevice|
|
||||||
// Create device 3
|
// Create device 3
|
||||||
dawn_native::vulkan::Device* thirdDeviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(
|
dawn_native::vulkan::Device* thirdDeviceVk =
|
||||||
backendAdapter->CreateDevice(&deviceDescriptor));
|
dawn_native::vulkan::ToBackend(backendAdapter->CreateDevice(&deviceDescriptor));
|
||||||
wgpu::Device thirdDevice =
|
wgpu::Device thirdDevice = wgpu::Device::Acquire(dawn_native::ToAPI(thirdDeviceVk));
|
||||||
wgpu::Device::Acquire(reinterpret_cast<WGPUDevice>(thirdDeviceVk));
|
|
||||||
|
|
||||||
// Make queue for device 2 and 3
|
// Make queue for device 2 and 3
|
||||||
wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
|
wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
|
||||||
|
|
|
@ -40,7 +40,7 @@ namespace dawn_native { namespace vulkan {
|
||||||
DawnTest::SetUp();
|
DawnTest::SetUp();
|
||||||
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
|
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
|
||||||
|
|
||||||
deviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(device.Get());
|
deviceVk = dawn_native::vulkan::ToBackend(dawn_native::FromAPI(device.Get()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a VkImage with external memory
|
// Creates a VkImage with external memory
|
||||||
|
@ -382,14 +382,13 @@ namespace dawn_native { namespace vulkan {
|
||||||
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
|
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
|
||||||
|
|
||||||
// Create another device based on the original
|
// Create another device based on the original
|
||||||
backendAdapter =
|
backendAdapter = dawn_native::vulkan::ToBackend(deviceVk->GetAdapter());
|
||||||
reinterpret_cast<dawn_native::vulkan::Adapter*>(deviceVk->GetAdapter());
|
|
||||||
deviceDescriptor.forceEnabledToggles = GetParam().forceEnabledWorkarounds;
|
deviceDescriptor.forceEnabledToggles = GetParam().forceEnabledWorkarounds;
|
||||||
deviceDescriptor.forceDisabledToggles = GetParam().forceDisabledWorkarounds;
|
deviceDescriptor.forceDisabledToggles = GetParam().forceDisabledWorkarounds;
|
||||||
|
|
||||||
secondDeviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(
|
secondDeviceVk =
|
||||||
backendAdapter->CreateDevice(&deviceDescriptor));
|
dawn_native::vulkan::ToBackend(backendAdapter->CreateDevice(&deviceDescriptor));
|
||||||
secondDevice = wgpu::Device::Acquire(reinterpret_cast<WGPUDevice>(secondDeviceVk));
|
secondDevice = wgpu::Device::Acquire(dawn_native::ToAPI(secondDeviceVk));
|
||||||
|
|
||||||
CreateBindExportImage(deviceVk, 1, 1, VK_FORMAT_R8G8B8A8_UNORM, &defaultImage,
|
CreateBindExportImage(deviceVk, 1, 1, VK_FORMAT_R8G8B8A8_UNORM, &defaultImage,
|
||||||
&defaultAllocation, &defaultAllocationSize,
|
&defaultAllocation, &defaultAllocationSize,
|
||||||
|
@ -797,10 +796,9 @@ namespace dawn_native { namespace vulkan {
|
||||||
// device 1 = |device|
|
// device 1 = |device|
|
||||||
// device 2 = |secondDevice|
|
// device 2 = |secondDevice|
|
||||||
// Create device 3
|
// Create device 3
|
||||||
dawn_native::vulkan::Device* thirdDeviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(
|
dawn_native::vulkan::Device* thirdDeviceVk =
|
||||||
backendAdapter->CreateDevice(&deviceDescriptor));
|
dawn_native::vulkan::ToBackend(backendAdapter->CreateDevice(&deviceDescriptor));
|
||||||
wgpu::Device thirdDevice =
|
wgpu::Device thirdDevice = wgpu::Device::Acquire(dawn_native::ToAPI(thirdDeviceVk));
|
||||||
wgpu::Device::Acquire(reinterpret_cast<WGPUDevice>(thirdDeviceVk));
|
|
||||||
|
|
||||||
// Make queue for device 2 and 3
|
// Make queue for device 2 and 3
|
||||||
wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
|
wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
|
||||||
|
|
Loading…
Reference in New Issue