Replace reinterpret_cast with FromAPI/ToAPI where possible

This brings more type safety to the code and is marginally more
readable.

Bug: None
Change-Id: I0330a8a8e95cd9b8b531af266acd8fdc50c50460
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/71606
Reviewed-by: Brandon Jones <bajones@chromium.org>
Reviewed-by: Austin Eng <enga@chromium.org>
Commit-Queue: Austin Eng <enga@chromium.org>
Auto-Submit: Corentin Wallez <cwallez@chromium.org>
This commit is contained in:
Corentin Wallez 2021-12-03 16:47:18 +00:00 committed by Dawn LUCI CQ
parent 9dc3c0bc86
commit c6d3a840da
26 changed files with 124 additions and 155 deletions

View File

@ -39,7 +39,7 @@ namespace dawn_native {
{%- endfor -%}
) {
//* Perform conversion between C types and frontend types
auto self = reinterpret_cast<{{as_frontendType(type)}}>(cSelf);
auto self = FromAPI(cSelf);
{% for arg in method.arguments %}
{% set varName = as_varName(arg.name) %}
@ -63,7 +63,7 @@ namespace dawn_native {
);
{% if method.return_type.name.canonical_case() != "void" %}
{% if method.return_type.category == "object" %}
return reinterpret_cast<{{as_cType(method.return_type.name)}}>(result);
return ToAPI(result);
{% else %}
return result;
{% endif %}
@ -84,10 +84,8 @@ namespace dawn_native {
static constexpr size_t sProcMapSize = sizeof(sProcMap) / sizeof(sProcMap[0]);
}
WGPUInstance NativeCreateInstance(WGPUInstanceDescriptor const* cDescriptor) {
const dawn_native::InstanceDescriptor* descriptor =
reinterpret_cast<const dawn_native::InstanceDescriptor*>(cDescriptor);
return reinterpret_cast<WGPUInstance>(InstanceBase::Create(descriptor));
WGPUInstance NativeCreateInstance(WGPUInstanceDescriptor const* descriptor) {
return ToAPI(InstanceBase::Create(FromAPI(descriptor)));
}
WGPUProc NativeGetProcAddress(WGPUDevice, const char* procName) {

View File

@ -119,7 +119,7 @@ namespace dawn_native {
// to store them (ex. by calling GetLimits directly instead). Currently,
// we keep this function as it's only used internally in Chromium to
// send the adapter properties across the wire.
GetLimits(reinterpret_cast<SupportedLimits*>(&adapterProperties.limits));
GetLimits(FromAPI(&adapterProperties.limits));
return adapterProperties;
}
@ -149,19 +149,18 @@ namespace dawn_native {
void AdapterBase::RequestDevice(const DawnDeviceDescriptor* descriptor,
WGPURequestDeviceCallback callback,
void* userdata) {
DeviceBase* result = nullptr;
MaybeError err = CreateDeviceInternal(&result, descriptor);
WGPUDevice device = reinterpret_cast<WGPUDevice>(result);
DeviceBase* device = nullptr;
MaybeError err = CreateDeviceInternal(&device, descriptor);
if (err.IsError()) {
std::unique_ptr<ErrorData> errorData = err.AcquireError();
callback(WGPURequestDeviceStatus_Error, device,
callback(WGPURequestDeviceStatus_Error, ToAPI(device),
errorData->GetFormattedMessage().c_str(), userdata);
return;
}
WGPURequestDeviceStatus status =
device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
callback(status, device, nullptr, userdata);
callback(status, ToAPI(device), nullptr, userdata);
}
MaybeError AdapterBase::CreateDeviceInternal(DeviceBase** result,
@ -178,9 +177,8 @@ namespace dawn_native {
if (descriptor != nullptr && descriptor->requiredLimits != nullptr) {
DAWN_TRY_CONTEXT(
ValidateLimits(
mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
reinterpret_cast<const RequiredLimits*>(descriptor->requiredLimits)->limits),
ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
FromAPI(descriptor->requiredLimits)->limits),
"validating required limits");
DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,

View File

@ -44,9 +44,8 @@ namespace dawn_native {
ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
if (mPipeline.Get() != nullptr) {
mCreateComputePipelineAsyncCallback(
WGPUCreatePipelineAsyncStatus_Success,
reinterpret_cast<WGPUComputePipeline>(mPipeline.Detach()), "", mUserData);
mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
ToAPI(mPipeline.Detach()), "", mUserData);
} else {
mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
mErrorMessage.c_str(), mUserData);
@ -81,9 +80,8 @@ namespace dawn_native {
ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
if (mPipeline.Get() != nullptr) {
mCreateRenderPipelineAsyncCallback(
WGPUCreatePipelineAsyncStatus_Success,
reinterpret_cast<WGPURenderPipeline>(mPipeline.Detach()), "", mUserData);
mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
ToAPI(mPipeline.Detach()), "", mUserData);
} else {
mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
mErrorMessage.c_str(), mUserData);

View File

@ -32,9 +32,7 @@ namespace dawn_native {
}
std::vector<const char*> GetTogglesUsed(WGPUDevice device) {
const dawn_native::DeviceBase* deviceBase =
reinterpret_cast<const dawn_native::DeviceBase*>(device);
return deviceBase->GetTogglesUsed();
return FromAPI(device)->GetTogglesUsed();
}
// Adapter
@ -110,7 +108,7 @@ namespace dawn_native {
}
bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
return mImpl->GetLimits(reinterpret_cast<SupportedLimits*>(limits));
return mImpl->GetLimits(FromAPI(limits));
}
void Adapter::SetUseTieredLimits(bool useTieredLimits) {
@ -126,7 +124,7 @@ namespace dawn_native {
}
WGPUDevice Adapter::CreateDevice(const DawnDeviceDescriptor* deviceDescriptor) {
return reinterpret_cast<WGPUDevice>(mImpl->CreateDevice(deviceDescriptor));
return ToAPI(mImpl->CreateDevice(deviceDescriptor));
}
void Adapter::RequestDevice(const DawnDeviceDescriptor* descriptor,
@ -197,31 +195,29 @@ namespace dawn_native {
}
WGPUInstance Instance::Get() const {
return reinterpret_cast<WGPUInstance>(mImpl);
return ToAPI(mImpl);
}
size_t GetLazyClearCountForTesting(WGPUDevice device) {
dawn_native::DeviceBase* deviceBase = reinterpret_cast<dawn_native::DeviceBase*>(device);
return deviceBase->GetLazyClearCountForTesting();
return FromAPI(device)->GetLazyClearCountForTesting();
}
size_t GetDeprecationWarningCountForTesting(WGPUDevice device) {
dawn_native::DeviceBase* deviceBase = reinterpret_cast<dawn_native::DeviceBase*>(device);
return deviceBase->GetDeprecationWarningCountForTesting();
return FromAPI(device)->GetDeprecationWarningCountForTesting();
}
bool IsTextureSubresourceInitialized(WGPUTexture cTexture,
bool IsTextureSubresourceInitialized(WGPUTexture texture,
uint32_t baseMipLevel,
uint32_t levelCount,
uint32_t baseArrayLayer,
uint32_t layerCount,
WGPUTextureAspect cAspect) {
dawn_native::TextureBase* texture = reinterpret_cast<dawn_native::TextureBase*>(cTexture);
TextureBase* textureBase = FromAPI(texture);
Aspect aspect =
ConvertAspect(texture->GetFormat(), static_cast<wgpu::TextureAspect>(cAspect));
ConvertAspect(textureBase->GetFormat(), static_cast<wgpu::TextureAspect>(cAspect));
SubresourceRange range(aspect, {baseArrayLayer, layerCount}, {baseMipLevel, levelCount});
return texture->IsSubresourceContentInitialized(range);
return textureBase->IsSubresourceContentInitialized(range);
}
std::vector<const char*> GetProcMapNamesForTestingInternal();
@ -231,8 +227,7 @@ namespace dawn_native {
}
DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device) {
dawn_native::DeviceBase* deviceBase = reinterpret_cast<dawn_native::DeviceBase*>(device);
return deviceBase->APITick();
return FromAPI(device)->APITick();
}
// ExternalImageDescriptor
@ -251,14 +246,12 @@ namespace dawn_native {
}
uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer) {
return reinterpret_cast<const BufferBase*>(buffer)->GetAllocatedSize();
return FromAPI(buffer)->GetAllocatedSize();
}
bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a, WGPUBindGroupLayout b) {
BindGroupLayoutBase* aBase = reinterpret_cast<BindGroupLayoutBase*>(a);
BindGroupLayoutBase* bBase = reinterpret_cast<BindGroupLayoutBase*>(b);
bool excludePipelineCompatibiltyToken = true;
return aBase->IsLayoutEqual(bBase, excludePipelineCompatibiltyToken);
return FromAPI(a)->IsLayoutEqual(FromAPI(b), excludePipelineCompatibiltyToken);
}
} // namespace dawn_native

View File

@ -180,8 +180,7 @@ namespace dawn_native {
}
if (descriptor != nullptr && descriptor->requiredLimits != nullptr) {
mLimits.v1 = ReifyDefaultLimits(
reinterpret_cast<const RequiredLimits*>(descriptor->requiredLimits)->limits);
mLimits.v1 = ReifyDefaultLimits(FromAPI(descriptor->requiredLimits)->limits);
} else {
GetDefaultLimits(&mLimits.v1);
}
@ -252,7 +251,7 @@ namespace dawn_native {
ShaderModuleDescriptor descriptor;
ShaderModuleWGSLDescriptor wgslDesc;
wgslDesc.source = kEmptyFragmentShader;
descriptor.nextInChain = reinterpret_cast<ChainedStruct*>(&wgslDesc);
descriptor.nextInChain = &wgslDesc;
DAWN_TRY_ASSIGN(mInternalPipelineStore->dummyFragmentShader,
CreateShaderModule(&descriptor));
@ -1315,9 +1314,8 @@ namespace dawn_native {
Ref<ComputePipelineBase> cachedComputePipeline =
GetCachedComputePipeline(uninitializedComputePipeline.Get());
if (cachedComputePipeline.Get() != nullptr) {
callback(WGPUCreatePipelineAsyncStatus_Success,
reinterpret_cast<WGPUComputePipeline>(cachedComputePipeline.Detach()), "",
userdata);
callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()),
"", userdata);
} else {
// Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
// where the pipeline object may be initialized asynchronously and the result will be
@ -1462,9 +1460,8 @@ namespace dawn_native {
Ref<RenderPipelineBase> cachedRenderPipeline =
GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
if (cachedRenderPipeline != nullptr) {
callback(WGPUCreatePipelineAsyncStatus_Success,
reinterpret_cast<WGPURenderPipeline>(cachedRenderPipeline.Detach()), "",
userdata);
callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()),
"", userdata);
} else {
// Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
// where the pipeline object may be initialized asynchronously and the result will be

View File

@ -30,12 +30,10 @@ namespace dawn_native {
return blob;
}
std::lock_guard<std::mutex> lock(mMutex);
blob.bufferSize = mCache->LoadData(reinterpret_cast<WGPUDevice>(mDevice), key.data(),
key.size(), nullptr, 0);
blob.bufferSize = mCache->LoadData(ToAPI(mDevice), key.data(), key.size(), nullptr, 0);
if (blob.bufferSize > 0) {
blob.buffer.reset(new uint8_t[blob.bufferSize]);
const size_t bufferSize =
mCache->LoadData(reinterpret_cast<WGPUDevice>(mDevice), key.data(), key.size(),
const size_t bufferSize = mCache->LoadData(ToAPI(mDevice), key.data(), key.size(),
blob.buffer.get(), blob.bufferSize);
ASSERT(bufferSize == blob.bufferSize);
return blob;
@ -50,8 +48,7 @@ namespace dawn_native {
ASSERT(value != nullptr);
ASSERT(size > 0);
std::lock_guard<std::mutex> lock(mMutex);
mCache->StoreData(reinterpret_cast<WGPUDevice>(mDevice), key.data(), key.size(), value,
size);
mCache->StoreData(ToAPI(mDevice), key.data(), key.size(), value, size);
}
dawn_platform::CachingInterface* PersistentCache::GetPlatformCache() {

View File

@ -29,13 +29,11 @@
namespace dawn_native { namespace d3d12 {
ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device) {
Device* backendDevice = reinterpret_cast<Device*>(device);
return backendDevice->GetD3D12Device();
return ToBackend(FromAPI(device))->GetD3D12Device();
}
DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device, HWND window) {
Device* backendDevice = reinterpret_cast<Device*>(device);
Device* backendDevice = ToBackend(FromAPI(device));
DawnSwapChainImplementation impl;
impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
@ -78,7 +76,7 @@ namespace dawn_native { namespace d3d12 {
WGPUTexture ExternalImageDXGI::ProduceTexture(
WGPUDevice device,
const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
Device* backendDevice = reinterpret_cast<Device*>(device);
Device* backendDevice = ToBackend(FromAPI(device));
// Ensure the texture usage is allowed
if (!IsSubset(descriptor->usage, mUsage)) {
@ -114,14 +112,14 @@ namespace dawn_native { namespace d3d12 {
ExternalMutexSerial(descriptor->releaseMutexKey), descriptor->isSwapChainTexture,
descriptor->isInitialized);
return reinterpret_cast<WGPUTexture>(texture.Detach());
return ToAPI(texture.Detach());
}
// static
std::unique_ptr<ExternalImageDXGI> ExternalImageDXGI::Create(
WGPUDevice device,
const ExternalImageDescriptorDXGISharedHandle* descriptor) {
Device* backendDevice = reinterpret_cast<Device*>(device);
Device* backendDevice = ToBackend(FromAPI(device));
Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource;
if (FAILED(backendDevice->GetD3D12Device()->OpenSharedHandle(
@ -129,8 +127,7 @@ namespace dawn_native { namespace d3d12 {
return nullptr;
}
const TextureDescriptor* textureDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
if (backendDevice->ConsumedError(
ValidateTextureDescriptor(backendDevice, textureDescriptor))) {
@ -168,7 +165,7 @@ namespace dawn_native { namespace d3d12 {
uint64_t SetExternalMemoryReservation(WGPUDevice device,
uint64_t requestedReservationSize,
MemorySegment memorySegment) {
Device* backendDevice = reinterpret_cast<Device*>(device);
Device* backendDevice = ToBackend(FromAPI(device));
return backendDevice->GetResidencyManager()->SetExternalMemoryReservation(
memorySegment, requestedReservationSize);

View File

@ -83,7 +83,7 @@ namespace dawn_native { namespace d3d12 {
: OldSwapChainBase(device, descriptor) {
const auto& im = GetImplementation();
DawnWSIContextD3D12 wsiContext = {};
wsiContext.device = reinterpret_cast<WGPUDevice>(GetDevice());
wsiContext.device = ToAPI(GetDevice());
im.Init(im.userData, &wsiContext);
ASSERT(im.textureUsage != WGPUTextureUsage_None);

View File

@ -433,8 +433,7 @@ namespace dawn_native { namespace metal {
Ref<Texture> Device::CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
IOSurfaceRef ioSurface,
uint32_t plane) {
const TextureDescriptor* textureDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
return nullptr;

View File

@ -22,26 +22,24 @@
namespace dawn_native { namespace metal {
id<MTLDevice> GetMetalDevice(WGPUDevice cDevice) {
Device* device = reinterpret_cast<Device*>(cDevice);
return device->GetMTLDevice();
id<MTLDevice> GetMetalDevice(WGPUDevice device) {
return ToBackend(FromAPI(device))->GetMTLDevice();
}
ExternalImageDescriptorIOSurface::ExternalImageDescriptorIOSurface()
: ExternalImageDescriptor(ExternalImageType::IOSurface) {
}
WGPUTexture WrapIOSurface(WGPUDevice cDevice,
WGPUTexture WrapIOSurface(WGPUDevice device,
const ExternalImageDescriptorIOSurface* cDescriptor) {
Device* device = reinterpret_cast<Device*>(cDevice);
Ref<TextureBase> texture = device->CreateTextureWrappingIOSurface(
Device* backendDevice = ToBackend(FromAPI(device));
Ref<TextureBase> texture = backendDevice->CreateTextureWrappingIOSurface(
cDescriptor, cDescriptor->ioSurface, cDescriptor->plane);
return reinterpret_cast<WGPUTexture>(texture.Detach());
return ToAPI(texture.Detach());
}
void WaitForCommandsToBeScheduled(WGPUDevice cDevice) {
Device* device = reinterpret_cast<Device*>(cDevice);
device->WaitForCommandsToBeScheduled();
void WaitForCommandsToBeScheduled(WGPUDevice device) {
ToBackend(FromAPI(device))->WaitForCommandsToBeScheduled();
}
}} // namespace dawn_native::metal

View File

@ -431,8 +431,7 @@ namespace dawn_native { namespace metal {
const ExternalImageDescriptor* descriptor,
IOSurfaceRef ioSurface,
uint32_t plane) {
const TextureDescriptor* textureDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
Ref<Texture> texture =
AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));

View File

@ -203,8 +203,7 @@ namespace dawn_native { namespace opengl {
}
TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
::EGLImage image) {
const TextureDescriptor* textureDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
return nullptr;

View File

@ -34,7 +34,7 @@ namespace dawn_native { namespace opengl {
DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
PresentCallback present,
void* presentUserdata) {
Device* backendDevice = reinterpret_cast<Device*>(device);
Device* backendDevice = ToBackend(FromAPI(device));
DawnSwapChainImplementation impl;
impl = CreateSwapChainImplementation(
@ -54,11 +54,12 @@ namespace dawn_native { namespace opengl {
: ExternalImageDescriptor(ExternalImageType::EGLImage) {
}
WGPUTexture WrapExternalEGLImage(WGPUDevice cDevice,
WGPUTexture WrapExternalEGLImage(WGPUDevice device,
const ExternalImageDescriptorEGLImage* descriptor) {
Device* device = reinterpret_cast<Device*>(cDevice);
TextureBase* texture = device->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
return reinterpret_cast<WGPUTexture>(texture);
Device* backendDevice = ToBackend(FromAPI(device));
TextureBase* texture =
backendDevice->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
return ToAPI(texture);
}
}} // namespace dawn_native::opengl

View File

@ -719,8 +719,7 @@ namespace dawn_native { namespace vulkan {
VkSemaphore* outSignalSemaphore,
VkDeviceMemory* outAllocation,
std::vector<VkSemaphore>* outWaitSemaphores) {
const TextureDescriptor* textureDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
FindInChain(textureDescriptor->nextInChain, &internalUsageDesc);
@ -794,8 +793,7 @@ namespace dawn_native { namespace vulkan {
const ExternalImageDescriptorVk* descriptor,
ExternalMemoryHandle memoryHandle,
const std::vector<ExternalSemaphoreHandle>& waitHandles) {
const TextureDescriptor* textureDescriptor =
reinterpret_cast<const TextureDescriptor*>(descriptor->cTextureDescriptor);
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
// Initial validation
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {

View File

@ -29,13 +29,13 @@
namespace dawn_native { namespace vulkan {
VkInstance GetInstance(WGPUDevice device) {
Device* backendDevice = reinterpret_cast<Device*>(device);
Device* backendDevice = ToBackend(FromAPI(device));
return backendDevice->GetVkInstance();
}
DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device,
const char* pName) {
Device* backendDevice = reinterpret_cast<Device*>(device);
Device* backendDevice = ToBackend(FromAPI(device));
return (*backendDevice->fn.GetInstanceProcAddr)(backendDevice->GetVkInstance(), pName);
}
@ -43,7 +43,7 @@ namespace dawn_native { namespace vulkan {
// header as seen in this file uses the wrapped type.
DAWN_NATIVE_EXPORT DawnSwapChainImplementation
CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surfaceNative) {
Device* backendDevice = reinterpret_cast<Device*>(device);
Device* backendDevice = ToBackend(FromAPI(device));
VkSurfaceKHR surface = VkSurfaceKHR::CreateFromHandle(surfaceNative);
DawnSwapChainImplementation impl;
@ -77,17 +77,17 @@ namespace dawn_native { namespace vulkan {
}
#endif // DAWN_PLATFORM_LINUX
WGPUTexture WrapVulkanImage(WGPUDevice cDevice, const ExternalImageDescriptorVk* descriptor) {
WGPUTexture WrapVulkanImage(WGPUDevice device, const ExternalImageDescriptorVk* descriptor) {
#if defined(DAWN_PLATFORM_LINUX)
switch (descriptor->type) {
case ExternalImageType::OpaqueFD:
case ExternalImageType::DmaBuf: {
Device* backendDevice = ToBackend(FromAPI(device));
const ExternalImageDescriptorFD* fdDescriptor =
static_cast<const ExternalImageDescriptorFD*>(descriptor);
Device* device = reinterpret_cast<Device*>(cDevice);
TextureBase* texture = device->CreateTextureWrappingVulkanImage(
fdDescriptor, fdDescriptor->memoryFD, fdDescriptor->waitFDs);
return reinterpret_cast<WGPUTexture>(texture);
return ToAPI(backendDevice->CreateTextureWrappingVulkanImage(
fdDescriptor, fdDescriptor->memoryFD, fdDescriptor->waitFDs));
}
default:
return nullptr;
@ -97,20 +97,21 @@ namespace dawn_native { namespace vulkan {
#endif // DAWN_PLATFORM_LINUX
}
bool ExportVulkanImage(WGPUTexture cTexture,
bool ExportVulkanImage(WGPUTexture texture,
VkImageLayout desiredLayout,
ExternalImageExportInfoVk* info) {
if (cTexture == nullptr) {
if (texture == nullptr) {
return false;
}
#if defined(DAWN_PLATFORM_LINUX)
switch (info->type) {
case ExternalImageType::OpaqueFD:
case ExternalImageType::DmaBuf: {
Texture* texture = reinterpret_cast<Texture*>(cTexture);
Device* device = ToBackend(texture->GetDevice());
Texture* backendTexture = ToBackend(FromAPI(texture));
Device* device = ToBackend(backendTexture->GetDevice());
ExternalImageExportInfoFD* fdInfo = static_cast<ExternalImageExportInfoFD*>(info);
return device->SignalAndExportExternalTexture(texture, desiredLayout, fdInfo,
return device->SignalAndExportExternalTexture(backendTexture, desiredLayout, fdInfo,
&fdInfo->semaphoreHandles);
}
default:

View File

@ -118,14 +118,14 @@ namespace dawn_native { namespace vulkan {
// semaphore extensions to import the image and wait on the provided synchronizaton
// primitives before the texture can be used.
// On failure, returns a nullptr.
DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice cDevice,
DAWN_NATIVE_EXPORT WGPUTexture WrapVulkanImage(WGPUDevice device,
const ExternalImageDescriptorVk* descriptor);
// Exports external memory from a Vulkan image. This must be called on wrapped textures
// before they are destroyed. It writes the semaphore to wait on and the old/new image
// layouts to |info|. Pass VK_IMAGE_LAYOUT_UNDEFINED as |desiredLayout| if you don't want to
// perform a layout transition.
DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture cTexture,
DAWN_NATIVE_EXPORT bool ExportVulkanImage(WGPUTexture texture,
VkImageLayout desiredLayout,
ExternalImageExportInfoVk* info);

View File

@ -73,7 +73,7 @@ TEST_F(FeatureTests, GetEnabledFeatures) {
dawn_native::DawnDeviceDescriptor deviceDescriptor;
deviceDescriptor.requiredFeatures = {featureName};
dawn_native::DeviceBase* deviceBase =
reinterpret_cast<dawn_native::DeviceBase*>(adapter.CreateDevice(&deviceDescriptor));
dawn_native::FromAPI(adapter.CreateDevice(&deviceDescriptor));
std::vector<const char*> enabledFeatures = deviceBase->GetEnabledFeatures();
ASSERT_EQ(1u, enabledFeatures.size());
ASSERT_EQ(0, std::strcmp(featureName, enabledFeatures[0]));

View File

@ -318,7 +318,7 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
pass.EndPass();
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
dawn_native::FromAPI(encoder.Get())->Destroy();
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
}
@ -327,13 +327,13 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
pass.EndPass();
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
dawn_native::FromAPI(encoder.Get())->Destroy();
}
// Destroyed encoder should allow encoding, and emit error on finish.
{
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
dawn_native::FromAPI(encoder.Get())->Destroy();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
pass.EndPass();
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
@ -342,7 +342,7 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
// Destroyed encoder should allow encoding and shouldn't emit an error if never finished.
{
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
dawn_native::FromAPI(encoder.Get())->Destroy();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
pass.EndPass();
}
@ -353,21 +353,21 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
pass.EndPass();
encoder.Finish();
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
dawn_native::FromAPI(encoder.Get())->Destroy();
}
// Destroying an encoder twice should not emit any errors.
{
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
dawn_native::FromAPI(encoder.Get())->Destroy();
dawn_native::FromAPI(encoder.Get())->Destroy();
}
// Destroying an encoder twice and then calling finish should fail.
{
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get())->Destroy();
dawn_native::FromAPI(encoder.Get())->Destroy();
dawn_native::FromAPI(encoder.Get())->Destroy();
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
}
}

View File

@ -167,8 +167,7 @@ TEST_F(ShaderModuleValidationTest, GetCompilationMessages) {
return vec4<f32>(0.0, 1.0, 0.0, 1.0);
})");
dawn_native::ShaderModuleBase* shaderModuleBase =
reinterpret_cast<dawn_native::ShaderModuleBase*>(shaderModule.Get());
dawn_native::ShaderModuleBase* shaderModuleBase = dawn_native::FromAPI(shaderModule.Get());
dawn_native::OwnedCompilationMessages* messages = shaderModuleBase->GetCompilationMessages();
messages->ClearMessages();
messages->AddMessageForTesting("Info Message");

View File

@ -42,7 +42,7 @@ class D3D12ResidencyTestBase : public DawnTest {
// Restrict Dawn's budget to create an artificial budget.
dawn_native::d3d12::Device* d3dDevice =
reinterpret_cast<dawn_native::d3d12::Device*>(device.Get());
dawn_native::d3d12::ToBackend(dawn_native::FromAPI((device.Get())));
d3dDevice->GetResidencyManager()->RestrictBudgetForTesting(kRestrictedBudgetSize);
// Initialize a source buffer on the GPU to serve as a source to quickly copy data to other
@ -94,18 +94,20 @@ class D3D12ResourceResidencyTests : public D3D12ResidencyTestBase {
bool CheckAllocationMethod(wgpu::Buffer buffer,
dawn_native::AllocationMethod allocationMethod) const {
dawn_native::d3d12::Buffer* d3dBuffer =
reinterpret_cast<dawn_native::d3d12::Buffer*>(buffer.Get());
dawn_native::d3d12::ToBackend(dawn_native::FromAPI((buffer.Get())));
return d3dBuffer->CheckAllocationMethodForTesting(allocationMethod);
}
bool CheckIfBufferIsResident(wgpu::Buffer buffer) const {
dawn_native::d3d12::Buffer* d3dBuffer =
reinterpret_cast<dawn_native::d3d12::Buffer*>(buffer.Get());
dawn_native::d3d12::ToBackend(dawn_native::FromAPI((buffer.Get())));
return d3dBuffer->CheckIsResidentForTesting();
}
bool IsUMA() const {
return reinterpret_cast<dawn_native::d3d12::Device*>(device.Get())->GetDeviceInfo().isUMA;
return dawn_native::d3d12::ToBackend(dawn_native::FromAPI(device.Get()))
->GetDeviceInfo()
.isUMA;
}
};
@ -367,7 +369,7 @@ TEST_P(D3D12DescriptorResidencyTests, SwitchedViewHeapResidency) {
wgpu::Sampler sampler = device.CreateSampler();
dawn_native::d3d12::Device* d3dDevice =
reinterpret_cast<dawn_native::d3d12::Device*>(device.Get());
dawn_native::d3d12::ToBackend(dawn_native::FromAPI(device.Get()));
dawn_native::d3d12::ShaderVisibleDescriptorAllocator* allocator =
d3dDevice->GetViewShaderVisibleDescriptorAllocator();

View File

@ -128,7 +128,7 @@ class EGLImageTestBase : public DawnTest {
void* data,
size_t size) {
dawn_native::opengl::Device* openglDevice =
reinterpret_cast<dawn_native::opengl::Device*>(device.Get());
dawn_native::opengl::ToBackend(dawn_native::FromAPI(device.Get()));
const dawn_native::opengl::OpenGLFunctions& gl = openglDevice->gl;
GLuint tex;
gl.GenTextures(1, &tex);
@ -300,7 +300,7 @@ class EGLImageUsageTests : public EGLImageTestBase {
void* data,
size_t dataSize) {
dawn_native::opengl::Device* openglDevice =
reinterpret_cast<dawn_native::opengl::Device*>(device.Get());
dawn_native::opengl::ToBackend(dawn_native::FromAPI(device.Get()));
const dawn_native::opengl::OpenGLFunctions& gl = openglDevice->gl;
// Get a texture view for the eglImage

View File

@ -53,14 +53,13 @@ class InternalStorageBufferBindingTests : public DawnTest {
bglDesc.entryCount = 1;
bglDesc.entries = &bglEntry;
dawn_native::DeviceBase* nativeDevice =
reinterpret_cast<dawn_native::DeviceBase*>(device.Get());
dawn_native::DeviceBase* nativeDevice = dawn_native::FromAPI(device.Get());
Ref<dawn_native::BindGroupLayoutBase> bglRef =
nativeDevice->CreateBindGroupLayout(&bglDesc, true).AcquireSuccess();
wgpu::BindGroupLayout bgl =
wgpu::BindGroupLayout::Acquire(reinterpret_cast<WGPUBindGroupLayout>(bglRef.Detach()));
wgpu::BindGroupLayout::Acquire(dawn_native::ToAPI(bglRef.Detach()));
// Create pipeline layout
wgpu::PipelineLayoutDescriptor plDesc;

View File

@ -26,10 +26,10 @@ namespace {
wgpu::Buffer availability,
wgpu::Buffer params) {
ASSERT_TRUE(dawn_native::EncodeConvertTimestampsToNanoseconds(
reinterpret_cast<dawn_native::CommandEncoder*>(encoder.Get()),
reinterpret_cast<dawn_native::BufferBase*>(timestamps.Get()),
reinterpret_cast<dawn_native::BufferBase*>(availability.Get()),
reinterpret_cast<dawn_native::BufferBase*>(params.Get())).IsSuccess());
dawn_native::FromAPI(encoder.Get()), dawn_native::FromAPI(timestamps.Get()),
dawn_native::FromAPI(availability.Get()),
dawn_native::FromAPI(params.Get()))
.IsSuccess());
}
class InternalShaderExpectation : public detail::Expectation {

View File

@ -29,7 +29,7 @@ namespace {
DawnTest::SetUp();
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
mDeviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(device.Get());
mDeviceVk = dawn_native::vulkan::ToBackend(dawn_native::FromAPI(device.Get()));
}
protected:

View File

@ -44,7 +44,7 @@ namespace dawn_native { namespace vulkan {
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
gbmDevice = CreateGbmDevice();
deviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(device.Get());
deviceVk = dawn_native::vulkan::ToBackend(dawn_native::FromAPI(device.Get()));
defaultGbmBo = CreateGbmBo(1, 1, true /* linear */);
defaultStride = gbm_bo_get_stride_for_plane(defaultGbmBo, 0);
@ -311,14 +311,13 @@ namespace dawn_native { namespace vulkan {
}
// Create another device based on the original
backendAdapter =
reinterpret_cast<dawn_native::vulkan::Adapter*>(deviceVk->GetAdapter());
backendAdapter = dawn_native::vulkan::ToBackend(deviceVk->GetAdapter());
deviceDescriptor.forceEnabledToggles = GetParam().forceEnabledWorkarounds;
deviceDescriptor.forceDisabledToggles = GetParam().forceDisabledWorkarounds;
secondDeviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(
backendAdapter->CreateDevice(&deviceDescriptor));
secondDevice = wgpu::Device::Acquire(reinterpret_cast<WGPUDevice>(secondDeviceVk));
secondDeviceVk =
dawn_native::vullkan::ToBackend(backendAdapter->CreateDevice(&deviceDescriptor));
secondDevice = wgpu::Device::Acquire(dawn_native::ToAPI(secondDeviceVk));
}
protected:
@ -691,10 +690,9 @@ namespace dawn_native { namespace vulkan {
// device 1 = |device|
// device 2 = |secondDevice|
// Create device 3
dawn_native::vulkan::Device* thirdDeviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(
backendAdapter->CreateDevice(&deviceDescriptor));
wgpu::Device thirdDevice =
wgpu::Device::Acquire(reinterpret_cast<WGPUDevice>(thirdDeviceVk));
dawn_native::vulkan::Device* thirdDeviceVk =
dawn_native::vulkan::ToBackend(backendAdapter->CreateDevice(&deviceDescriptor));
wgpu::Device thirdDevice = wgpu::Device::Acquire(dawn_native::ToAPI(thirdDeviceVk));
// Make queue for device 2 and 3
wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();

View File

@ -40,7 +40,7 @@ namespace dawn_native { namespace vulkan {
DawnTest::SetUp();
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
deviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(device.Get());
deviceVk = dawn_native::vulkan::ToBackend(dawn_native::FromAPI(device.Get()));
}
// Creates a VkImage with external memory
@ -382,14 +382,13 @@ namespace dawn_native { namespace vulkan {
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
// Create another device based on the original
backendAdapter =
reinterpret_cast<dawn_native::vulkan::Adapter*>(deviceVk->GetAdapter());
backendAdapter = dawn_native::vulkan::ToBackend(deviceVk->GetAdapter());
deviceDescriptor.forceEnabledToggles = GetParam().forceEnabledWorkarounds;
deviceDescriptor.forceDisabledToggles = GetParam().forceDisabledWorkarounds;
secondDeviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(
backendAdapter->CreateDevice(&deviceDescriptor));
secondDevice = wgpu::Device::Acquire(reinterpret_cast<WGPUDevice>(secondDeviceVk));
secondDeviceVk =
dawn_native::vulkan::ToBackend(backendAdapter->CreateDevice(&deviceDescriptor));
secondDevice = wgpu::Device::Acquire(dawn_native::ToAPI(secondDeviceVk));
CreateBindExportImage(deviceVk, 1, 1, VK_FORMAT_R8G8B8A8_UNORM, &defaultImage,
&defaultAllocation, &defaultAllocationSize,
@ -797,10 +796,9 @@ namespace dawn_native { namespace vulkan {
// device 1 = |device|
// device 2 = |secondDevice|
// Create device 3
dawn_native::vulkan::Device* thirdDeviceVk = reinterpret_cast<dawn_native::vulkan::Device*>(
backendAdapter->CreateDevice(&deviceDescriptor));
wgpu::Device thirdDevice =
wgpu::Device::Acquire(reinterpret_cast<WGPUDevice>(thirdDeviceVk));
dawn_native::vulkan::Device* thirdDeviceVk =
dawn_native::vulkan::ToBackend(backendAdapter->CreateDevice(&deviceDescriptor));
wgpu::Device thirdDevice = wgpu::Device::Acquire(dawn_native::ToAPI(thirdDeviceVk));
// Make queue for device 2 and 3
wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();