Support NV12 via ExternalImageDescriptorDmaBuf on CROS

On Intel platforms, all planes in fact have same dma-buf, so the
DISJOINT bit shouldn't be used to create the vkimage.
For multi-planar formats, VkImageDrmFormatModifierListCreateInfoEXT
has to be used instead of
VkImageDrmFormatModifierExplicitCreateInfoEXT.

Bug: chromium:1258986
Change-Id: I25306a438e7ba9fd981848e63068e486bbddf11d
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/68961
Reviewed-by: Austin Eng <enga@chromium.org>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Commit-Queue: Jie A Chen <jie.a.chen@intel.com>
This commit is contained in:
jchen10 2021-12-13 02:38:44 +00:00 committed by Dawn LUCI CQ
parent 16e3221a0e
commit ffb0024a89
17 changed files with 1106 additions and 610 deletions

View File

@ -250,8 +250,9 @@ namespace dawn_native {
"The texture usage (%s) includes %s, which is incompatible with the format (%s).",
usage, wgpu::TextureUsage::StorageBinding, format->format);
// Only allows simple readonly texture usages.
constexpr wgpu::TextureUsage kValidMultiPlanarUsages =
wgpu::TextureUsage::TextureBinding;
wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc;
DAWN_INVALID_IF(
format->IsMultiPlanar() && !IsSubset(usage, kValidMultiPlanarUsages),
"The texture usage (%s) is incompatible with the multi-planar format (%s).", usage,

View File

@ -1228,7 +1228,8 @@ namespace dawn_native { namespace d3d12 {
if (texture->GetFormat().IsMultiPlanar()) {
const Aspect planeAspect = ConvertViewAspect(GetFormat(), descriptor->aspect);
planeSlice = GetAspectIndex(planeAspect);
mSrvDesc.Format = D3D12TextureFormat(GetFormat().GetAspectInfo(planeAspect).format);
mSrvDesc.Format =
D3D12TextureFormat(texture->GetFormat().GetAspectInfo(planeAspect).format);
}
// Currently we always use D3D12_TEX2D_ARRAY_SRV because we cannot specify base array layer

View File

@ -151,6 +151,12 @@ namespace dawn_native { namespace vulkan {
mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
}
#if defined(DAWN_USE_SYNC_FDS)
// TODO(chromium:1258986): Precisely enable the feature by querying the device's format
// features.
mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
#endif
return {};
}

View File

@ -420,6 +420,8 @@ namespace dawn_native { namespace vulkan {
return VK_FORMAT_ASTC_12x12_SRGB_BLOCK;
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
return VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
// TODO(dawn:666): implement stencil8
case wgpu::TextureFormat::Stencil8:
// TODO(dawn:690): implement depth24unorm-stencil8
@ -624,10 +626,12 @@ namespace dawn_native { namespace vulkan {
: TextureBase(device, descriptor, state),
// A usage of none will make sure the texture is transitioned before its first use as
// required by the Vulkan spec.
mSubresourceLastUsages(ComputeAspectsForSubresourceStorage(),
GetArrayLayers(),
GetNumMipLevels(),
wgpu::TextureUsage::None) {
mSubresourceLastUsages(std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
(ShouldCombineDepthStencilBarriers() ? Aspect::CombinedDepthStencil
: GetFormat().aspects),
GetArrayLayers(),
GetNumMipLevels(),
wgpu::TextureUsage::None)) {
}
MaybeError Texture::InitializeAsInternalTexture(VkImageUsageFlags extraUsages) {
@ -693,8 +697,16 @@ namespace dawn_native { namespace vulkan {
external_memory::Service* externalMemoryService) {
VkFormat format = VulkanImageFormat(ToBackend(GetDevice()), GetFormat().format);
VkImageUsageFlags usage = VulkanImageUsage(GetInternalUsage(), GetFormat());
DAWN_INVALID_IF(!externalMemoryService->SupportsCreateImage(descriptor, format, usage),
DAWN_INVALID_IF(!externalMemoryService->SupportsCreateImage(descriptor, format, usage,
&mSupportsDisjointVkImage),
"Creating an image from external memory is not supported.");
// mSubresourceLastUsages was initialized with Plane0/Plane1 in the constructor for
// multiplanar formats, so we need to correct it to Color here.
if (ShouldCombineMultiPlaneBarriers()) {
mSubresourceLastUsages = std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
ComputeAspectsForSubresourceStorage(), GetArrayLayers(), GetNumMipLevels(),
wgpu::TextureUsage::None);
}
mExternalState = ExternalState::PendingAcquire;
@ -768,14 +780,15 @@ namespace dawn_native { namespace vulkan {
// Release the texture
mExternalState = ExternalState::Released;
Aspect aspects = ComputeAspectsForSubresourceStorage();
ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
wgpu::TextureUsage usage = mSubresourceLastUsages.Get(Aspect::Color, 0, 0);
wgpu::TextureUsage usage = mSubresourceLastUsages->Get(aspects, 0, 0);
VkImageMemoryBarrier barrier;
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
barrier.image = GetHandle();
barrier.subresourceRange.aspectMask = VulkanAspectMask(GetFormat().aspects);
barrier.subresourceRange.aspectMask = VulkanAspectMask(aspects);
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = 1;
barrier.subresourceRange.baseArrayLayer = 0;
@ -867,24 +880,6 @@ namespace dawn_native { namespace vulkan {
return mHandle;
}
VkImageAspectFlags Texture::GetVkAspectMask(wgpu::TextureAspect aspect) const {
// TODO(enga): These masks could be precomputed.
switch (aspect) {
case wgpu::TextureAspect::All:
return VulkanAspectMask(GetFormat().aspects);
case wgpu::TextureAspect::DepthOnly:
ASSERT(GetFormat().aspects & Aspect::Depth);
return VulkanAspectMask(Aspect::Depth);
case wgpu::TextureAspect::StencilOnly:
ASSERT(GetFormat().aspects & Aspect::Stencil);
return VulkanAspectMask(Aspect::Stencil);
case wgpu::TextureAspect::Plane0Only:
case wgpu::TextureAspect::Plane1Only:
break;
}
UNREACHABLE();
}
void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
std::vector<VkImageMemoryBarrier>* barriers,
size_t transitionBarrierStart) {
@ -897,9 +892,10 @@ namespace dawn_native { namespace vulkan {
if (mExternalState == ExternalState::PendingAcquire) {
if (barriers->size() == transitionBarrierStart) {
barriers->push_back(BuildMemoryBarrier(
this, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
SubresourceRange::SingleMipAndLayer(0, 0, GetFormat().aspects)));
barriers->push_back(
BuildMemoryBarrier(this, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
SubresourceRange::SingleMipAndLayer(
0, 0, ComputeAspectsForSubresourceStorage())));
}
VkImageMemoryBarrier* barrier = &(*barriers)[transitionBarrierStart];
@ -972,14 +968,32 @@ namespace dawn_native { namespace vulkan {
return false;
}
// Base Vulkan doesn't support transitioning depth and stencil separately. We work around
// this limitation by combining the usages in the two planes of `textureUsages` into a
// single plane in a new SubresourceStorage<TextureUsage>. The barriers will be produced
// for DEPTH | STENCIL since the SubresourceRange uses Aspect::CombinedDepthStencil.
bool Texture::ShouldCombineDepthStencilBarriers() const {
return GetFormat().aspects == (Aspect::Depth | Aspect::Stencil);
}
// The Vulkan spec requires:
// "If image has a single-plane color format or is not disjoint, then the aspectMask member of
// subresourceRange must be VK_IMAGE_ASPECT_COLOR_BIT.".
// For multi-planar formats, we currently only support import them in non-disjoint way.
bool Texture::ShouldCombineMultiPlaneBarriers() const {
// TODO(chromium:1258986): Figure out how to support disjoint vkImage.
ASSERT(!mSupportsDisjointVkImage);
return GetFormat().aspects == (Aspect::Plane0 | Aspect::Plane1);
}
Aspect Texture::ComputeAspectsForSubresourceStorage() const {
if (ShouldCombineDepthStencilBarriers()) {
return Aspect::CombinedDepthStencil;
}
// Force to use Aspect::Color for Aspect::Plane0/1.
if (ShouldCombineMultiPlaneBarriers()) {
return Aspect::Color;
}
return GetFormat().aspects;
}
@ -988,16 +1002,13 @@ namespace dawn_native { namespace vulkan {
std::vector<VkImageMemoryBarrier>* imageBarriers,
VkPipelineStageFlags* srcStages,
VkPipelineStageFlags* dstStages) {
// Base Vulkan doesn't support transitioning depth and stencil separately. We work around
// this limitation by combining the usages in the two planes of `textureUsages` into a
// single plane in a new SubresourceStorage<TextureUsage>. The barriers will be produced
// for DEPTH | STENCIL since the SubresourceRange uses Aspect::CombinedDepthStencil.
if (ShouldCombineDepthStencilBarriers()) {
SubresourceStorage<wgpu::TextureUsage> combinedUsages(
Aspect::CombinedDepthStencil, GetArrayLayers(), GetNumMipLevels());
if (ShouldCombineBarriers()) {
Aspect combinedAspect = ComputeAspectsForSubresourceStorage();
SubresourceStorage<wgpu::TextureUsage> combinedUsages(combinedAspect, GetArrayLayers(),
GetNumMipLevels());
textureUsages.Iterate([&](const SubresourceRange& range, wgpu::TextureUsage usage) {
SubresourceRange updateRange = range;
updateRange.aspects = Aspect::CombinedDepthStencil;
updateRange.aspects = combinedAspect;
combinedUsages.Update(
updateRange, [&](const SubresourceRange&, wgpu::TextureUsage* combinedUsage) {
@ -1028,7 +1039,7 @@ namespace dawn_native { namespace vulkan {
// TODO(crbug.com/dawn/814): support 1D textures.
ASSERT(GetDimension() != wgpu::TextureDimension::e1D);
mSubresourceLastUsages.Merge(
mSubresourceLastUsages->Merge(
subresourceUsages, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage,
const wgpu::TextureUsage& newUsage) {
if (newUsage == wgpu::TextureUsage::None ||
@ -1081,15 +1092,9 @@ namespace dawn_native { namespace vulkan {
std::vector<VkImageMemoryBarrier>* imageBarriers,
VkPipelineStageFlags* srcStages,
VkPipelineStageFlags* dstStages) {
// Base Vulkan doesn't support transitioning depth and stencil separately. We work around
// this limitation by modifying the range to be on CombinedDepthStencil. The barriers will
// be produced for DEPTH | STENCIL since the SubresourceRange uses
// Aspect::CombinedDepthStencil.
if (ShouldCombineDepthStencilBarriers()) {
if (ShouldCombineBarriers()) {
SubresourceRange updatedRange = range;
updatedRange.aspects = Aspect::CombinedDepthStencil;
std::vector<VkImageMemoryBarrier> newBarriers;
updatedRange.aspects = ComputeAspectsForSubresourceStorage();
TransitionUsageAndGetResourceBarrierImpl(usage, updatedRange, imageBarriers, srcStages,
dstStages);
} else {
@ -1108,7 +1113,7 @@ namespace dawn_native { namespace vulkan {
const Format& format = GetFormat();
wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
mSubresourceLastUsages.Update(
mSubresourceLastUsages->Update(
range, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage) {
if (CanReuseWithoutBarrier(*lastUsage, usage)) {
return;
@ -1281,7 +1286,8 @@ namespace dawn_native { namespace vulkan {
}
VkImageLayout Texture::GetCurrentLayoutForSwapChain() const {
return VulkanImageLayout(this, mSubresourceLastUsages.Get(Aspect::Color, 0, 0));
ASSERT(GetFormat().aspects == Aspect::Color);
return VulkanImageLayout(this, mSubresourceLastUsages->Get(Aspect::Color, 0, 0));
}
// static

View File

@ -62,7 +62,6 @@ namespace dawn_native { namespace vulkan {
VkImage nativeImage);
VkImage GetHandle() const;
VkImageAspectFlags GetVkAspectMask(wgpu::TextureAspect aspect) const;
// Transitions the texture to be used as `usage`, recording any necessary barrier in
// `commands`.
@ -139,8 +138,18 @@ namespace dawn_native { namespace vulkan {
// indicates whether we should combine depth and stencil barriers to accommodate this
// limitation.
bool ShouldCombineDepthStencilBarriers() const;
// This indicates whether the VK_IMAGE_ASPECT_COLOR_BIT instead of
// VK_IMAGE_ASPECT_PLANE_n_BIT must be used.
bool ShouldCombineMultiPlaneBarriers() const;
bool ShouldCombineBarriers() const {
return ShouldCombineDepthStencilBarriers() || ShouldCombineMultiPlaneBarriers();
}
// Compute the Aspects of the SubresourceStoage for this texture depending on whether we're
// doing the workaround for combined depth and stencil barriers.
// doing the workaround for combined depth and stencil barriers, or combining multi-plane
// barriers.
Aspect ComputeAspectsForSubresourceStorage() const;
VkImage mHandle = VK_NULL_HANDLE;
@ -165,7 +174,9 @@ namespace dawn_native { namespace vulkan {
// Note that in early Vulkan versions it is not possible to transition depth and stencil
// separately so textures with Depth|Stencil aspects will have a single Depth aspect in the
// storage.
SubresourceStorage<wgpu::TextureUsage> mSubresourceLastUsages;
std::unique_ptr<SubresourceStorage<wgpu::TextureUsage>> mSubresourceLastUsages;
bool mSupportsDisjointVkImage = false;
};
class TextureView final : public TextureViewBase {

View File

@ -71,7 +71,12 @@ namespace dawn_native { namespace vulkan {
break;
case Aspect::Plane0:
flags |= VK_IMAGE_ASPECT_PLANE_0_BIT;
break;
case Aspect::Plane1:
flags |= VK_IMAGE_ASPECT_PLANE_1_BIT;
break;
case Aspect::None:
UNREACHABLE();
}

View File

@ -49,7 +49,8 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
// True if the device reports it supports creating VkImages from external memory.
bool SupportsCreateImage(const ExternalImageDescriptor* descriptor,
VkFormat format,
VkImageUsageFlags usage);
VkImageUsageFlags usage,
bool* supportsDisjoint);
// Returns the parameters required for importing memory
ResultOrError<MemoryImportParams> GetMemoryImportParams(

View File

@ -17,6 +17,7 @@
#include "dawn_native/vulkan/BackendVk.h"
#include "dawn_native/vulkan/DeviceVk.h"
#include "dawn_native/vulkan/ResourceMemoryAllocatorVk.h"
#include "dawn_native/vulkan/UtilsVulkan.h"
#include "dawn_native/vulkan/VulkanError.h"
#include "dawn_native/vulkan/external_memory/MemoryService.h"
@ -24,40 +25,92 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
namespace {
// Some modifiers use multiple planes (for example, see the comment for
// I915_FORMAT_MOD_Y_TILED_CCS in drm/drm_fourcc.h), but dma-buf import in Dawn only
// supports single-plane formats.
ResultOrError<uint32_t> GetModifierPlaneCount(const VulkanFunctions& fn,
VkPhysicalDevice physicalDevice,
VkFormat format,
uint64_t modifier) {
VkDrmFormatModifierPropertiesListEXT formatModifierPropsList;
formatModifierPropsList.sType =
VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT;
formatModifierPropsList.pNext = nullptr;
bool GetFormatModifierProps(const VulkanFunctions& fn,
VkPhysicalDevice physicalDevice,
VkFormat format,
uint64_t modifier,
VkDrmFormatModifierPropertiesEXT* formatModifierProps) {
std::vector<VkDrmFormatModifierPropertiesEXT> formatModifierPropsVector;
VkFormatProperties2 formatProps = {};
formatProps.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
PNextChainBuilder formatPropsChain(&formatProps);
VkDrmFormatModifierPropertiesListEXT formatModifierPropsList = {};
formatModifierPropsList.drmFormatModifierCount = 0;
formatModifierPropsList.pDrmFormatModifierProperties = nullptr;
VkFormatProperties2 formatProps;
formatProps.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
formatProps.pNext = &formatModifierPropsList;
formatPropsChain.Add(&formatModifierPropsList,
VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT);
fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
uint32_t modifierCount = formatModifierPropsList.drmFormatModifierCount;
std::vector<VkDrmFormatModifierPropertiesEXT> formatModifierProps(modifierCount);
formatModifierPropsList.pDrmFormatModifierProperties = formatModifierProps.data();
formatModifierPropsVector.resize(modifierCount);
formatModifierPropsList.pDrmFormatModifierProperties = formatModifierPropsVector.data();
fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
for (const auto& props : formatModifierProps) {
for (const auto& props : formatModifierPropsVector) {
if (props.drmFormatModifier == modifier) {
uint32_t count = props.drmFormatModifierPlaneCount;
return count;
*formatModifierProps = props;
return true;
}
}
return false;
}
// Some modifiers use multiple planes (for example, see the comment for
// I915_FORMAT_MOD_Y_TILED_CCS in drm/drm_fourcc.h).
ResultOrError<uint32_t> GetModifierPlaneCount(const VulkanFunctions& fn,
VkPhysicalDevice physicalDevice,
VkFormat format,
uint64_t modifier) {
VkDrmFormatModifierPropertiesEXT props;
if (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props)) {
return static_cast<uint32_t>(props.drmFormatModifierPlaneCount);
}
return DAWN_FORMAT_VALIDATION_ERROR("DRM format modifier not supported.");
}
bool IsMultiPlanarVkFormat(VkFormat format) {
switch (format) {
case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
return true;
default:
return false;
}
}
bool SupportsDisjoint(const VulkanFunctions& fn,
VkPhysicalDevice physicalDevice,
VkFormat format,
uint64_t modifier) {
if (IsMultiPlanarVkFormat(format)) {
VkDrmFormatModifierPropertiesEXT props;
return (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props) &&
(props.drmFormatModifierTilingFeatures & VK_FORMAT_FEATURE_DISJOINT_BIT));
}
return false;
}
} // anonymous namespace
Service::Service(Device* device)
@ -77,12 +130,16 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
VkImageTiling tiling,
VkImageUsageFlags usage,
VkImageCreateFlags flags) {
return mSupported;
return mSupported && (!IsMultiPlanarVkFormat(format) ||
(format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM &&
mDevice->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)));
}
bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
VkFormat format,
VkImageUsageFlags usage) {
VkImageUsageFlags usage,
bool* supportsDisjoint) {
*supportsDisjoint = false;
// Early out before we try using extension functions
if (!mSupported) {
return false;
@ -104,42 +161,57 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
if (planeCount == 0) {
return false;
}
// TODO(hob): Support multi-plane formats like I915_FORMAT_MOD_Y_TILED_CCS.
if (planeCount > 1) {
// Only support the NV12 multi-planar format for now.
if (planeCount > 1 && format != VK_FORMAT_G8_B8R8_2PLANE_420_UNORM) {
return false;
}
*supportsDisjoint =
SupportsDisjoint(mDevice->fn, physicalDevice, format, dmaBufDescriptor->drmModifier);
// Verify that the format modifier of the external memory and the requested Vulkan format
// are actually supported together in a dma-buf import.
VkPhysicalDeviceImageDrmFormatModifierInfoEXT drmModifierInfo;
drmModifierInfo.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT;
drmModifierInfo.pNext = nullptr;
drmModifierInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
drmModifierInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkPhysicalDeviceExternalImageFormatInfo externalImageFormatInfo;
externalImageFormatInfo.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
externalImageFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
externalImageFormatInfo.pNext = &drmModifierInfo;
VkPhysicalDeviceImageFormatInfo2 imageFormatInfo;
VkPhysicalDeviceImageFormatInfo2 imageFormatInfo = {};
imageFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
imageFormatInfo.format = format;
imageFormatInfo.type = VK_IMAGE_TYPE_2D;
imageFormatInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
imageFormatInfo.usage = usage;
imageFormatInfo.flags = 0;
imageFormatInfo.pNext = &externalImageFormatInfo;
PNextChainBuilder imageFormatInfoChain(&imageFormatInfo);
VkExternalImageFormatProperties externalImageFormatProps;
externalImageFormatProps.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES;
externalImageFormatProps.pNext = nullptr;
VkPhysicalDeviceExternalImageFormatInfo externalImageFormatInfo = {};
externalImageFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
imageFormatInfoChain.Add(&externalImageFormatInfo,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO);
VkImageFormatProperties2 imageFormatProps;
VkPhysicalDeviceImageDrmFormatModifierInfoEXT drmModifierInfo = {};
drmModifierInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
drmModifierInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
imageFormatInfoChain.Add(
&drmModifierInfo, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT);
// For mutable vkimage of multi-planar format, we also need to make sure the each
// plane's view format can be supported.
std::array<VkFormat, 2> viewFormats;
VkImageFormatListCreateInfo imageFormatListInfo = {};
if (planeCount > 1) {
ASSERT(format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM);
viewFormats = {VK_FORMAT_R8_UNORM, VK_FORMAT_R8G8_UNORM};
imageFormatListInfo.viewFormatCount = 2;
imageFormatListInfo.pViewFormats = viewFormats.data();
imageFormatInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
imageFormatInfoChain.Add(&imageFormatListInfo,
VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
}
VkImageFormatProperties2 imageFormatProps = {};
imageFormatProps.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
imageFormatProps.pNext = &externalImageFormatProps;
PNextChainBuilder imageFormatPropsChain(&imageFormatProps);
VkExternalImageFormatProperties externalImageFormatProps = {};
imageFormatPropsChain.Add(&externalImageFormatProps,
VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES);
VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
physicalDevice, &imageFormatInfo, &imageFormatProps));
@ -172,8 +244,8 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
// Get the valid memory types that the external memory can be imported as.
mDevice->fn.GetMemoryFdPropertiesKHR(device, VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
dmaBufDescriptor->memoryFD, &fdProperties);
// Choose the best memory type that satisfies both the image's constraint and the import's
// constraint.
// Choose the best memory type that satisfies both the image's constraint and the
// import's constraint.
memoryRequirements.memoryTypeBits &= fdProperties.memoryTypeBits;
int memoryTypeIndex = mDevice->GetResourceMemoryAllocator()->FindBestTypeIndex(
memoryRequirements, MemoryKind::Opaque);
@ -190,23 +262,23 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
VkImage image) {
DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
VkMemoryDedicatedAllocateInfo memoryDedicatedAllocateInfo;
memoryDedicatedAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO;
memoryDedicatedAllocateInfo.pNext = nullptr;
memoryDedicatedAllocateInfo.image = image;
memoryDedicatedAllocateInfo.buffer = VkBuffer{};
VkImportMemoryFdInfoKHR importMemoryFdInfo;
importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
importMemoryFdInfo.pNext = &memoryDedicatedAllocateInfo;
importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
importMemoryFdInfo.fd = handle;
VkMemoryAllocateInfo memoryAllocateInfo;
VkMemoryAllocateInfo memoryAllocateInfo = {};
memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memoryAllocateInfo.pNext = &importMemoryFdInfo;
memoryAllocateInfo.allocationSize = importParams.allocationSize;
memoryAllocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
PNextChainBuilder memoryAllocateInfoChain(&memoryAllocateInfo);
VkImportMemoryFdInfoKHR importMemoryFdInfo;
importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
importMemoryFdInfo.fd = handle;
memoryAllocateInfoChain.Add(&importMemoryFdInfo,
VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR);
VkMemoryDedicatedAllocateInfo memoryDedicatedAllocateInfo;
memoryDedicatedAllocateInfo.image = image;
memoryDedicatedAllocateInfo.buffer = VkBuffer{};
memoryAllocateInfoChain.Add(&memoryDedicatedAllocateInfo,
VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO);
VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
DAWN_TRY(
@ -226,39 +298,54 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
VkDevice device = mDevice->GetVkDevice();
// Dawn currently doesn't support multi-plane formats, so we only need to create a single
// VkSubresourceLayout here.
VkSubresourceLayout planeLayout;
uint32_t planeCount;
DAWN_TRY_ASSIGN(planeCount,
GetModifierPlaneCount(mDevice->fn, physicalDevice, baseCreateInfo.format,
dmaBufDescriptor->drmModifier));
VkImageCreateInfo createInfo = baseCreateInfo;
createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
createInfo.flags = 0;
createInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
PNextChainBuilder createInfoChain(&createInfo);
VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo = {};
externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
createInfoChain.Add(&externalMemoryImageCreateInfo,
VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
// For single plane formats.
VkSubresourceLayout planeLayout = {};
planeLayout.offset = 0;
planeLayout.size = 0; // VK_EXT_image_drm_format_modifier mandates size = 0.
planeLayout.rowPitch = dmaBufDescriptor->stride;
planeLayout.arrayPitch = 0; // Not an array texture
planeLayout.depthPitch = 0; // Not a depth texture
uint32_t planeCount;
DAWN_TRY_ASSIGN(planeCount,
GetModifierPlaneCount(mDevice->fn, physicalDevice, baseCreateInfo.format,
dmaBufDescriptor->drmModifier));
ASSERT(planeCount == 1);
VkImageDrmFormatModifierExplicitCreateInfoEXT explicitCreateInfo;
explicitCreateInfo.sType =
VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT;
explicitCreateInfo.pNext = NULL;
VkImageDrmFormatModifierExplicitCreateInfoEXT explicitCreateInfo = {};
explicitCreateInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
explicitCreateInfo.drmFormatModifierPlaneCount = planeCount;
explicitCreateInfo.drmFormatModifierPlaneCount = 1;
explicitCreateInfo.pPlaneLayouts = &planeLayout;
VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
externalMemoryImageCreateInfo.pNext = &explicitCreateInfo;
externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
// For multi-planar formats, we can't explicitly specify VkSubresourceLayout for each plane
// due to the lack of knowledge about the required 'offset'. Alternatively
// VkImageDrmFormatModifierListCreateInfoEXT can be used to create image with the DRM format
// modifier.
VkImageDrmFormatModifierListCreateInfoEXT listCreateInfo = {};
listCreateInfo.drmFormatModifierCount = 1;
listCreateInfo.pDrmFormatModifiers = &dmaBufDescriptor->drmModifier;
VkImageCreateInfo createInfo = baseCreateInfo;
createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
createInfo.pNext = &externalMemoryImageCreateInfo;
createInfo.flags = 0;
createInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
if (planeCount > 1) {
// For multi-planar formats, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT specifies that a
// VkImageView can be plane's format which might differ from the image's format.
createInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
createInfoChain.Add(&listCreateInfo,
VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
} else {
createInfoChain.Add(
&explicitCreateInfo,
VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
}
// Create a new VkImage with tiling equal to the DRM format modifier.
VkImage image;

View File

@ -39,7 +39,9 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
VkFormat format,
VkImageUsageFlags usage) {
VkImageUsageFlags usage,
bool* supportsDisjoint) {
*supportsDisjoint = false;
return false;
}

View File

@ -81,7 +81,9 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
VkFormat format,
VkImageUsageFlags usage) {
VkImageUsageFlags usage,
bool* supportsDisjoint) {
*supportsDisjoint = false;
return mSupported;
}

View File

@ -81,7 +81,9 @@ namespace dawn_native { namespace vulkan { namespace external_memory {
bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
VkFormat format,
VkImageUsageFlags usage) {
VkImageUsageFlags usage,
bool* supportsDisjoint) {
*supportsDisjoint = false;
return mSupported;
}

View File

@ -423,7 +423,7 @@ source_set("dawn_end2end_tests_sources") {
sources += [
"end2end/D3D12CachingTests.cpp",
"end2end/D3D12ResourceWrappingTests.cpp",
"end2end/D3D12VideoViewsTests.cpp",
"end2end/VideoViewsTests_win.cpp",
]
libs += [
"d3d11.lib",
@ -448,6 +448,17 @@ source_set("dawn_end2end_tests_sources") {
]
deps += [ "${dawn_root}/src/utils:dawn_glfw" ]
}
if (dawn_enable_d3d12 || (dawn_enable_vulkan && is_chromeos)) {
sources += [
"end2end/VideoViewsTests.cpp",
"end2end/VideoViewsTests.h",
]
}
if (dawn_enable_vulkan && is_chromeos) {
sources += [ "end2end/VideoViewsTests_gbm.cpp" ]
}
}
source_set("dawn_white_box_tests_sources") {

View File

@ -1,470 +0,0 @@
// Copyright 2021 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "tests/DawnTest.h"
#include <d3d11.h>
#include <d3d12.h>
#include <dxgi1_4.h>
#include <wrl/client.h>
#include "dawn_native/D3D12Backend.h"
#include "utils/ComboRenderPipelineDescriptor.h"
#include "utils/WGPUHelpers.h"
using Microsoft::WRL::ComPtr;
namespace {
class D3D12VideoViewsTests : public DawnTest {
protected:
void SetUp() override {
DawnTest::SetUp();
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
DAWN_TEST_UNSUPPORTED_IF(!IsMultiPlanarFormatsSupported());
// Create the D3D11 device/contexts that will be used in subsequent tests
ComPtr<ID3D12Device> d3d12Device = dawn_native::d3d12::GetD3D12Device(device.Get());
const LUID adapterLuid = d3d12Device->GetAdapterLuid();
ComPtr<IDXGIFactory4> dxgiFactory;
HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
ASSERT_EQ(hr, S_OK);
ComPtr<IDXGIAdapter> dxgiAdapter;
hr = dxgiFactory->EnumAdapterByLuid(adapterLuid, IID_PPV_ARGS(&dxgiAdapter));
ASSERT_EQ(hr, S_OK);
ComPtr<ID3D11Device> d3d11Device;
D3D_FEATURE_LEVEL d3dFeatureLevel;
ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
hr = ::D3D11CreateDevice(dxgiAdapter.Get(), D3D_DRIVER_TYPE_UNKNOWN, nullptr, 0,
nullptr, 0, D3D11_SDK_VERSION, &d3d11Device, &d3dFeatureLevel,
&d3d11DeviceContext);
ASSERT_EQ(hr, S_OK);
// Runtime of the created texture (D3D11 device) and OpenSharedHandle runtime (Dawn's
// D3D12 device) must agree on resource sharing capability. For NV12 formats, D3D11
// requires at-least D3D11_SHARED_RESOURCE_TIER_2 support.
// https://docs.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_shared_resource_tier
D3D11_FEATURE_DATA_D3D11_OPTIONS5 featureOptions5{};
hr = d3d11Device->CheckFeatureSupport(D3D11_FEATURE_D3D11_OPTIONS5, &featureOptions5,
sizeof(featureOptions5));
ASSERT_EQ(hr, S_OK);
ASSERT_GE(featureOptions5.SharedResourceTier, D3D11_SHARED_RESOURCE_TIER_2);
mD3d11Device = std::move(d3d11Device);
}
std::vector<const char*> GetRequiredFeatures() override {
mIsMultiPlanarFormatsSupported = SupportsFeatures({"multiplanar-formats"});
if (!mIsMultiPlanarFormatsSupported) {
return {};
}
return {"multiplanar-formats"};
}
bool IsMultiPlanarFormatsSupported() const {
return mIsMultiPlanarFormatsSupported;
}
static DXGI_FORMAT GetDXGITextureFormat(wgpu::TextureFormat format) {
switch (format) {
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
return DXGI_FORMAT_NV12;
default:
UNREACHABLE();
return DXGI_FORMAT_UNKNOWN;
}
}
// Returns a pre-prepared multi-planar formatted texture
// The encoded texture data represents a 4x4 converted image. When |isCheckerboard| is true,
// the top left is a 2x2 yellow block, bottom right is a 2x2 red block, top right is a 2x2
// blue block, and bottom left is a 2x2 white block. When |isCheckerboard| is false, the
// image is converted from a solid yellow 4x4 block.
static std::vector<uint8_t> GetTestTextureData(wgpu::TextureFormat format,
bool isCheckerboard) {
constexpr uint8_t Yy = kYellowYUVColor[kYUVLumaPlaneIndex].r;
constexpr uint8_t Yu = kYellowYUVColor[kYUVChromaPlaneIndex].r;
constexpr uint8_t Yv = kYellowYUVColor[kYUVChromaPlaneIndex].g;
switch (format) {
// The first 16 bytes is the luma plane (Y), followed by the chroma plane (UV) which
// is half the number of bytes (subsampled by 2) but same bytes per line as luma
// plane.
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
if (isCheckerboard) {
constexpr uint8_t Wy = kWhiteYUVColor[kYUVLumaPlaneIndex].r;
constexpr uint8_t Wu = kWhiteYUVColor[kYUVChromaPlaneIndex].r;
constexpr uint8_t Wv = kWhiteYUVColor[kYUVChromaPlaneIndex].g;
constexpr uint8_t Ry = kRedYUVColor[kYUVLumaPlaneIndex].r;
constexpr uint8_t Ru = kRedYUVColor[kYUVChromaPlaneIndex].r;
constexpr uint8_t Rv = kRedYUVColor[kYUVChromaPlaneIndex].g;
constexpr uint8_t By = kBlueYUVColor[kYUVLumaPlaneIndex].r;
constexpr uint8_t Bu = kBlueYUVColor[kYUVChromaPlaneIndex].r;
constexpr uint8_t Bv = kBlueYUVColor[kYUVChromaPlaneIndex].g;
// clang-format off
return {
Wy, Wy, Ry, Ry, // plane 0, start + 0
Wy, Wy, Ry, Ry,
Yy, Yy, By, By,
Yy, Yy, By, By,
Wu, Wv, Ru, Rv, // plane 1, start + 16
Yu, Yv, Bu, Bv,
};
// clang-format on
} else {
// clang-format off
return {
Yy, Yy, Yy, Yy, // plane 0, start + 0
Yy, Yy, Yy, Yy,
Yy, Yy, Yy, Yy,
Yy, Yy, Yy, Yy,
Yu, Yv, Yu, Yv, // plane 1, start + 16
Yu, Yv, Yu, Yv,
};
// clang-format on
}
default:
UNREACHABLE();
return {};
}
}
void CreateVideoTextureForTest(wgpu::TextureFormat format,
wgpu::TextureUsage usage,
bool isCheckerboard,
wgpu::Texture* dawnTextureOut) {
wgpu::TextureDescriptor textureDesc;
textureDesc.format = format;
textureDesc.dimension = wgpu::TextureDimension::e2D;
textureDesc.usage = usage;
textureDesc.size = {kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels, 1};
// Create a DX11 texture with data then wrap it in a shared handle.
D3D11_TEXTURE2D_DESC d3dDescriptor;
d3dDescriptor.Width = kYUVImageDataWidthInTexels;
d3dDescriptor.Height = kYUVImageDataHeightInTexels;
d3dDescriptor.MipLevels = 1;
d3dDescriptor.ArraySize = 1;
d3dDescriptor.Format = GetDXGITextureFormat(format);
d3dDescriptor.SampleDesc.Count = 1;
d3dDescriptor.SampleDesc.Quality = 0;
d3dDescriptor.Usage = D3D11_USAGE_DEFAULT;
d3dDescriptor.BindFlags = D3D11_BIND_SHADER_RESOURCE;
d3dDescriptor.CPUAccessFlags = 0;
d3dDescriptor.MiscFlags =
D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
std::vector<uint8_t> initialData = GetTestTextureData(format, isCheckerboard);
D3D11_SUBRESOURCE_DATA subres;
subres.pSysMem = initialData.data();
subres.SysMemPitch = kYUVImageDataWidthInTexels;
ComPtr<ID3D11Texture2D> d3d11Texture;
HRESULT hr = mD3d11Device->CreateTexture2D(&d3dDescriptor, &subres, &d3d11Texture);
ASSERT_EQ(hr, S_OK);
ComPtr<IDXGIResource1> dxgiResource;
hr = d3d11Texture.As(&dxgiResource);
ASSERT_EQ(hr, S_OK);
HANDLE sharedHandle;
hr = dxgiResource->CreateSharedHandle(
nullptr, DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE, nullptr,
&sharedHandle);
ASSERT_EQ(hr, S_OK);
// DX11 texture should be initialized upon CreateTexture2D. However, if we do not
// acquire/release the keyed mutex before using the wrapped WebGPU texture, the WebGPU
// texture is left uninitialized. This is required for D3D11 and D3D12 interop.
ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
hr = d3d11Texture.As(&dxgiKeyedMutex);
ASSERT_EQ(hr, S_OK);
hr = dxgiKeyedMutex->AcquireSync(0, INFINITE);
ASSERT_EQ(hr, S_OK);
hr = dxgiKeyedMutex->ReleaseSync(1);
ASSERT_EQ(hr, S_OK);
// Open the DX11 texture in Dawn from the shared handle and return it as a WebGPU
// texture.
dawn_native::d3d12::ExternalImageDescriptorDXGISharedHandle externalImageDesc;
externalImageDesc.cTextureDescriptor =
reinterpret_cast<const WGPUTextureDescriptor*>(&textureDesc);
externalImageDesc.sharedHandle = sharedHandle;
std::unique_ptr<dawn_native::d3d12::ExternalImageDXGI> externalImage =
dawn_native::d3d12::ExternalImageDXGI::Create(device.Get(), &externalImageDesc);
// Handle is no longer needed once resources are created.
::CloseHandle(sharedHandle);
dawn_native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
externalAccessDesc.acquireMutexKey = 1;
externalAccessDesc.releaseMutexKey = 2;
externalAccessDesc.isInitialized = true;
externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(textureDesc.usage);
*dawnTextureOut = wgpu::Texture::Acquire(
externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
}
// Vertex shader used to render a sampled texture into a quad.
wgpu::ShaderModule GetTestVertexShaderModule() const {
return utils::CreateShaderModule(device, R"(
struct VertexOut {
[[location(0)]] texCoord : vec2 <f32>;
[[builtin(position)]] position : vec4<f32>;
};
[[stage(vertex)]]
fn main([[builtin(vertex_index)]] VertexIndex : u32) -> VertexOut {
var pos = array<vec2<f32>, 6>(
vec2<f32>(-1.0, 1.0),
vec2<f32>(-1.0, -1.0),
vec2<f32>(1.0, -1.0),
vec2<f32>(-1.0, 1.0),
vec2<f32>(1.0, -1.0),
vec2<f32>(1.0, 1.0)
);
var output : VertexOut;
output.position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
output.texCoord = vec2<f32>(output.position.xy * 0.5) + vec2<f32>(0.5, 0.5);
return output;
})");
}
// The width and height in texels are 4 for all YUV formats.
static constexpr uint32_t kYUVImageDataWidthInTexels = 4;
static constexpr uint32_t kYUVImageDataHeightInTexels = 4;
static constexpr size_t kYUVLumaPlaneIndex = 0;
static constexpr size_t kYUVChromaPlaneIndex = 1;
// RGB colors converted into YUV (per plane), for testing.
// RGB colors are mapped to the BT.601 definition of luma.
// https://docs.microsoft.com/en-us/windows/win32/medfound/about-yuv-video
static constexpr std::array<RGBA8, 2> kYellowYUVColor = {RGBA8{210, 0, 0, 0xFF}, // Y
RGBA8{16, 146, 0, 0xFF}}; // UV
static constexpr std::array<RGBA8, 2> kWhiteYUVColor = {RGBA8{235, 0, 0, 0xFF}, // Y
RGBA8{128, 128, 0, 0xFF}}; // UV
static constexpr std::array<RGBA8, 2> kBlueYUVColor = {RGBA8{41, 0, 0, 0xFF}, // Y
RGBA8{240, 110, 0, 0xFF}}; // UV
static constexpr std::array<RGBA8, 2> kRedYUVColor = {RGBA8{81, 0, 0, 0xFF}, // Y
RGBA8{90, 240, 0, 0xFF}}; // UV
ComPtr<ID3D11Device> mD3d11Device;
bool mIsMultiPlanarFormatsSupported = false;
};
} // namespace
// Samples the luminance (Y) plane from an imported NV12 texture into a single channel of an RGBA
// output attachment and checks for the expected pixel value in the rendered quad.
TEST_P(D3D12VideoViewsTests, NV12SampleYtoR) {
wgpu::Texture wgpuTexture;
CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
wgpu::TextureUsage::TextureBinding, /*isCheckerboard*/ false,
&wgpuTexture);
ASSERT_NE(wgpuTexture.Get(), nullptr);
wgpu::TextureViewDescriptor viewDesc;
viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
wgpu::TextureView textureView = wgpuTexture.CreateView(&viewDesc);
utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
[[group(0), binding(0)]] var sampler0 : sampler;
[[group(0), binding(1)]] var texture : texture_2d<f32>;
[[stage(fragment)]]
fn main([[location(0)]] texCoord : vec2<f32>) -> [[location(0)]] vec4<f32> {
let y : f32 = textureSample(texture, sampler0, texCoord).r;
return vec4<f32>(y, 0.0, 0.0, 1.0);
})");
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
device, kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels);
renderPipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
wgpu::Sampler sampler = device.CreateSampler();
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
pass.SetPipeline(renderPipeline);
pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, sampler}, {1, textureView}}));
pass.Draw(6);
pass.EndPass();
}
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
// Test the luma plane in the top left corner of RGB image.
EXPECT_PIXEL_RGBA8_EQ(kYellowYUVColor[kYUVLumaPlaneIndex], renderPass.color, 0, 0);
}
// Samples the chrominance (UV) plane from an imported texture into two channels of an RGBA output
// attachment and checks for the expected pixel value in the rendered quad.
TEST_P(D3D12VideoViewsTests, NV12SampleUVtoRG) {
wgpu::Texture wgpuTexture;
CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
wgpu::TextureUsage::TextureBinding, /*isCheckerboard*/ false,
&wgpuTexture);
ASSERT_NE(wgpuTexture.Get(), nullptr);
wgpu::TextureViewDescriptor viewDesc;
viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
wgpu::TextureView textureView = wgpuTexture.CreateView(&viewDesc);
utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
[[group(0), binding(0)]] var sampler0 : sampler;
[[group(0), binding(1)]] var texture : texture_2d<f32>;
[[stage(fragment)]]
fn main([[location(0)]] texCoord : vec2<f32>) -> [[location(0)]] vec4<f32> {
let u : f32 = textureSample(texture, sampler0, texCoord).r;
let v : f32 = textureSample(texture, sampler0, texCoord).g;
return vec4<f32>(u, v, 0.0, 1.0);
})");
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
device, kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels);
renderPipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
wgpu::Sampler sampler = device.CreateSampler();
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
pass.SetPipeline(renderPipeline);
pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, sampler}, {1, textureView}}));
pass.Draw(6);
pass.EndPass();
}
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
// Test the chroma plane in the top left corner of RGB image.
EXPECT_PIXEL_RGBA8_EQ(kYellowYUVColor[kYUVChromaPlaneIndex], renderPass.color, 0, 0);
}
// Renders a NV12 "checkerboard" texture into a RGB quad then checks the color at specific
// points to ensure the image has not been flipped.
TEST_P(D3D12VideoViewsTests, NV12SampleYUVtoRGB) {
// TODO(https://crbug.com/dawn/733): Figure out why Nvidia bot occasionally fails testing all
// four corners.
DAWN_SUPPRESS_TEST_IF(IsNvidia());
wgpu::Texture wgpuTexture;
CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
wgpu::TextureUsage::TextureBinding, /*isCheckerboard*/ true,
&wgpuTexture);
ASSERT_NE(wgpuTexture.Get(), nullptr);
wgpu::TextureViewDescriptor lumaViewDesc;
lumaViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
wgpu::TextureView lumaTextureView = wgpuTexture.CreateView(&lumaViewDesc);
wgpu::TextureViewDescriptor chromaViewDesc;
chromaViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
wgpu::TextureView chromaTextureView = wgpuTexture.CreateView(&chromaViewDesc);
utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
[[group(0), binding(0)]] var sampler0 : sampler;
[[group(0), binding(1)]] var lumaTexture : texture_2d<f32>;
[[group(0), binding(2)]] var chromaTexture : texture_2d<f32>;
[[stage(fragment)]]
fn main([[location(0)]] texCoord : vec2<f32>) -> [[location(0)]] vec4<f32> {
let y : f32 = textureSample(lumaTexture, sampler0, texCoord).r;
let u : f32 = textureSample(chromaTexture, sampler0, texCoord).r;
let v : f32 = textureSample(chromaTexture, sampler0, texCoord).g;
return vec4<f32>(y, u, v, 1.0);
})");
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
device, kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels);
renderPipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
wgpu::Sampler sampler = device.CreateSampler();
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
pass.SetPipeline(renderPipeline);
pass.SetBindGroup(
0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, sampler}, {1, lumaTextureView}, {2, chromaTextureView}}));
pass.Draw(6);
pass.EndPass();
}
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
// Test four corners of the checkerboard image (YUV color space).
RGBA8 yellowYUV(kYellowYUVColor[kYUVLumaPlaneIndex].r, kYellowYUVColor[kYUVChromaPlaneIndex].r,
kYellowYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
EXPECT_PIXEL_RGBA8_EQ(yellowYUV, renderPass.color, 0, 0); // top left
RGBA8 redYUV(kRedYUVColor[kYUVLumaPlaneIndex].r, kRedYUVColor[kYUVChromaPlaneIndex].r,
kRedYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
EXPECT_PIXEL_RGBA8_EQ(redYUV, renderPass.color, kYUVImageDataWidthInTexels - 1,
kYUVImageDataHeightInTexels - 1); // bottom right
RGBA8 blueYUV(kBlueYUVColor[kYUVLumaPlaneIndex].r, kBlueYUVColor[kYUVChromaPlaneIndex].r,
kBlueYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
EXPECT_PIXEL_RGBA8_EQ(blueYUV, renderPass.color, kYUVImageDataWidthInTexels - 1,
0); // top right
RGBA8 whiteYUV(kWhiteYUVColor[kYUVLumaPlaneIndex].r, kWhiteYUVColor[kYUVChromaPlaneIndex].r,
kWhiteYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
EXPECT_PIXEL_RGBA8_EQ(whiteYUV, renderPass.color, 0,
kYUVImageDataHeightInTexels - 1); // bottom left
}
DAWN_INSTANTIATE_TEST(D3D12VideoViewsTests, D3D12Backend());

View File

@ -0,0 +1,348 @@
// Copyright 2021 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "VideoViewsTests.h"
#include "utils/ComboRenderPipelineDescriptor.h"
#include "utils/WGPUHelpers.h"
VideoViewsTestBackend::PlatformTexture::PlatformTexture(wgpu::Texture&& texture)
: wgpuTexture(texture) {
}
VideoViewsTestBackend::PlatformTexture::~PlatformTexture() = default;
VideoViewsTestBackend::~VideoViewsTestBackend() = default;
constexpr std::array<RGBA8, 2> VideoViewsTests::kYellowYUVColor;
constexpr std::array<RGBA8, 2> VideoViewsTests::kWhiteYUVColor;
constexpr std::array<RGBA8, 2> VideoViewsTests::kBlueYUVColor;
constexpr std::array<RGBA8, 2> VideoViewsTests::kRedYUVColor;
void VideoViewsTests::SetUp() {
DawnTest::SetUp();
DAWN_TEST_UNSUPPORTED_IF(UsesWire());
DAWN_TEST_UNSUPPORTED_IF(!IsMultiPlanarFormatsSupported());
mBackend = VideoViewsTestBackend::Create();
mBackend->OnSetUp(device.Get());
}
void VideoViewsTests::TearDown() {
if (!UsesWire() && IsMultiPlanarFormatsSupported()) {
mBackend->OnTearDown();
}
DawnTest::TearDown();
}
std::vector<const char*> VideoViewsTests::GetRequiredFeatures() {
std::vector<const char*> requiredFeatures = {};
mIsMultiPlanarFormatsSupported = SupportsFeatures({"multiplanar-formats"});
if (mIsMultiPlanarFormatsSupported) {
requiredFeatures.push_back("multiplanar-formats");
}
requiredFeatures.push_back("dawn-internal-usages");
return requiredFeatures;
}
bool VideoViewsTests::IsMultiPlanarFormatsSupported() const {
return mIsMultiPlanarFormatsSupported;
}
// Returns a pre-prepared multi-planar formatted texture
// The encoded texture data represents a 4x4 converted image. When |isCheckerboard| is true,
// the top left is a 2x2 yellow block, bottom right is a 2x2 red block, top right is a 2x2
// blue block, and bottom left is a 2x2 white block. When |isCheckerboard| is false, the
// image is converted from a solid yellow 4x4 block.
// static
std::vector<uint8_t> VideoViewsTests::GetTestTextureData(wgpu::TextureFormat format,
bool isCheckerboard) {
constexpr uint8_t Yy = kYellowYUVColor[kYUVLumaPlaneIndex].r;
constexpr uint8_t Yu = kYellowYUVColor[kYUVChromaPlaneIndex].r;
constexpr uint8_t Yv = kYellowYUVColor[kYUVChromaPlaneIndex].g;
switch (format) {
// The first 16 bytes is the luma plane (Y), followed by the chroma plane (UV) which
// is half the number of bytes (subsampled by 2) but same bytes per line as luma
// plane.
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
if (isCheckerboard) {
constexpr uint8_t Wy = kWhiteYUVColor[kYUVLumaPlaneIndex].r;
constexpr uint8_t Wu = kWhiteYUVColor[kYUVChromaPlaneIndex].r;
constexpr uint8_t Wv = kWhiteYUVColor[kYUVChromaPlaneIndex].g;
constexpr uint8_t Ry = kRedYUVColor[kYUVLumaPlaneIndex].r;
constexpr uint8_t Ru = kRedYUVColor[kYUVChromaPlaneIndex].r;
constexpr uint8_t Rv = kRedYUVColor[kYUVChromaPlaneIndex].g;
constexpr uint8_t By = kBlueYUVColor[kYUVLumaPlaneIndex].r;
constexpr uint8_t Bu = kBlueYUVColor[kYUVChromaPlaneIndex].r;
constexpr uint8_t Bv = kBlueYUVColor[kYUVChromaPlaneIndex].g;
// clang-format off
return {
Wy, Wy, Ry, Ry, // plane 0, start + 0
Wy, Wy, Ry, Ry,
Yy, Yy, By, By,
Yy, Yy, By, By,
Wu, Wv, Ru, Rv, // plane 1, start + 16
Yu, Yv, Bu, Bv,
};
// clang-format on
} else {
// clang-format off
return {
Yy, Yy, Yy, Yy, // plane 0, start + 0
Yy, Yy, Yy, Yy,
Yy, Yy, Yy, Yy,
Yy, Yy, Yy, Yy,
Yu, Yv, Yu, Yv, // plane 1, start + 16
Yu, Yv, Yu, Yv,
};
// clang-format on
}
default:
UNREACHABLE();
return {};
}
}
// Vertex shader used to render a sampled texture into a quad.
wgpu::ShaderModule VideoViewsTests::GetTestVertexShaderModule() const {
return utils::CreateShaderModule(device, R"(
struct VertexOut {
[[location(0)]] texCoord : vec2 <f32>;
[[builtin(position)]] position : vec4<f32>;
};
[[stage(vertex)]]
fn main([[builtin(vertex_index)]] VertexIndex : u32) -> VertexOut {
var pos = array<vec2<f32>, 6>(
vec2<f32>(-1.0, 1.0),
vec2<f32>(-1.0, -1.0),
vec2<f32>(1.0, -1.0),
vec2<f32>(-1.0, 1.0),
vec2<f32>(1.0, -1.0),
vec2<f32>(1.0, 1.0)
);
var output : VertexOut;
output.position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
output.texCoord = vec2<f32>(output.position.xy * 0.5) + vec2<f32>(0.5, 0.5);
return output;
})");
}
// Samples the luminance (Y) plane from an imported NV12 texture into a single channel of an RGBA
// output attachment and checks for the expected pixel value in the rendered quad.
TEST_P(VideoViewsTests, NV12SampleYtoR) {
std::unique_ptr<VideoViewsTestBackend::PlatformTexture> platformTexture =
mBackend->CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
wgpu::TextureUsage::TextureBinding,
/*isCheckerboard*/ false);
ASSERT_NE(platformTexture.get(), nullptr);
if (!platformTexture->CanWrapAsWGPUTexture()) {
mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
GTEST_SKIP() << "Skipped because not supported.";
}
wgpu::TextureViewDescriptor viewDesc;
viewDesc.format = wgpu::TextureFormat::R8Unorm;
viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
wgpu::TextureView textureView = platformTexture->wgpuTexture.CreateView(&viewDesc);
utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
[[group(0), binding(0)]] var sampler0 : sampler;
[[group(0), binding(1)]] var texture : texture_2d<f32>;
[[stage(fragment)]]
fn main([[location(0)]] texCoord : vec2<f32>) -> [[location(0)]] vec4<f32> {
let y : f32 = textureSample(texture, sampler0, texCoord).r;
return vec4<f32>(y, 0.0, 0.0, 1.0);
})");
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
device, kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels);
renderPipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
wgpu::Sampler sampler = device.CreateSampler();
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
pass.SetPipeline(renderPipeline);
pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, sampler}, {1, textureView}}));
pass.Draw(6);
pass.EndPass();
}
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
// Test the luma plane in the top left corner of RGB image.
EXPECT_PIXEL_RGBA8_EQ(kYellowYUVColor[kYUVLumaPlaneIndex], renderPass.color, 0, 0);
mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
}
// Samples the chrominance (UV) plane from an imported texture into two channels of an RGBA output
// attachment and checks for the expected pixel value in the rendered quad.
TEST_P(VideoViewsTests, NV12SampleUVtoRG) {
std::unique_ptr<VideoViewsTestBackend::PlatformTexture> platformTexture =
mBackend->CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
wgpu::TextureUsage::TextureBinding,
/*isCheckerboard*/ false);
ASSERT_NE(platformTexture.get(), nullptr);
if (!platformTexture->CanWrapAsWGPUTexture()) {
mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
GTEST_SKIP() << "Skipped because not supported.";
}
wgpu::TextureViewDescriptor viewDesc;
viewDesc.format = wgpu::TextureFormat::RG8Unorm;
viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
wgpu::TextureView textureView = platformTexture->wgpuTexture.CreateView(&viewDesc);
utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
[[group(0), binding(0)]] var sampler0 : sampler;
[[group(0), binding(1)]] var texture : texture_2d<f32>;
[[stage(fragment)]]
fn main([[location(0)]] texCoord : vec2<f32>) -> [[location(0)]] vec4<f32> {
let u : f32 = textureSample(texture, sampler0, texCoord).r;
let v : f32 = textureSample(texture, sampler0, texCoord).g;
return vec4<f32>(u, v, 0.0, 1.0);
})");
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
device, kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels);
renderPipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
renderPipelineDescriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
wgpu::Sampler sampler = device.CreateSampler();
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
pass.SetPipeline(renderPipeline);
pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, sampler}, {1, textureView}}));
pass.Draw(6);
pass.EndPass();
}
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
// Test the chroma plane in the top left corner of RGB image.
EXPECT_PIXEL_RGBA8_EQ(kYellowYUVColor[kYUVChromaPlaneIndex], renderPass.color, 0, 0);
mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
}
// Renders a NV12 "checkerboard" texture into a RGB quad then checks the color at specific
// points to ensure the image has not been flipped.
TEST_P(VideoViewsTests, NV12SampleYUVtoRGB) {
// TODO(https://crbug.com/dawn/733): Figure out why Nvidia bot occasionally fails testing all
// four corners.
DAWN_SUPPRESS_TEST_IF(IsNvidia());
std::unique_ptr<VideoViewsTestBackend::PlatformTexture> platformTexture =
mBackend->CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
wgpu::TextureUsage::TextureBinding,
/*isCheckerboard*/ true);
ASSERT_NE(platformTexture.get(), nullptr);
if (!platformTexture->CanWrapAsWGPUTexture()) {
mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
GTEST_SKIP() << "Skipped because not supported.";
}
wgpu::TextureViewDescriptor lumaViewDesc;
lumaViewDesc.format = wgpu::TextureFormat::R8Unorm;
lumaViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
wgpu::TextureView lumaTextureView = platformTexture->wgpuTexture.CreateView(&lumaViewDesc);
wgpu::TextureViewDescriptor chromaViewDesc;
chromaViewDesc.format = wgpu::TextureFormat::RG8Unorm;
chromaViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
wgpu::TextureView chromaTextureView = platformTexture->wgpuTexture.CreateView(&chromaViewDesc);
utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
renderPipelineDescriptor.vertex.module = GetTestVertexShaderModule();
renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
[[group(0), binding(0)]] var sampler0 : sampler;
[[group(0), binding(1)]] var lumaTexture : texture_2d<f32>;
[[group(0), binding(2)]] var chromaTexture : texture_2d<f32>;
[[stage(fragment)]]
fn main([[location(0)]] texCoord : vec2<f32>) -> [[location(0)]] vec4<f32> {
let y : f32 = textureSample(lumaTexture, sampler0, texCoord).r;
let u : f32 = textureSample(chromaTexture, sampler0, texCoord).r;
let v : f32 = textureSample(chromaTexture, sampler0, texCoord).g;
return vec4<f32>(y, u, v, 1.0);
})");
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
device, kYUVImageDataWidthInTexels, kYUVImageDataHeightInTexels);
renderPipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
wgpu::Sampler sampler = device.CreateSampler();
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
pass.SetPipeline(renderPipeline);
pass.SetBindGroup(
0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
{{0, sampler}, {1, lumaTextureView}, {2, chromaTextureView}}));
pass.Draw(6);
pass.EndPass();
}
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
// Test four corners of the checkerboard image (YUV color space).
RGBA8 yellowYUV(kYellowYUVColor[kYUVLumaPlaneIndex].r, kYellowYUVColor[kYUVChromaPlaneIndex].r,
kYellowYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
EXPECT_PIXEL_RGBA8_EQ(yellowYUV, renderPass.color, 0, 0); // top left
RGBA8 redYUV(kRedYUVColor[kYUVLumaPlaneIndex].r, kRedYUVColor[kYUVChromaPlaneIndex].r,
kRedYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
EXPECT_PIXEL_RGBA8_EQ(redYUV, renderPass.color, kYUVImageDataWidthInTexels - 1,
kYUVImageDataHeightInTexels - 1); // bottom right
RGBA8 blueYUV(kBlueYUVColor[kYUVLumaPlaneIndex].r, kBlueYUVColor[kYUVChromaPlaneIndex].r,
kBlueYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
EXPECT_PIXEL_RGBA8_EQ(blueYUV, renderPass.color, kYUVImageDataWidthInTexels - 1,
0); // top right
RGBA8 whiteYUV(kWhiteYUVColor[kYUVLumaPlaneIndex].r, kWhiteYUVColor[kYUVChromaPlaneIndex].r,
kWhiteYUVColor[kYUVChromaPlaneIndex].g, 0xFF);
EXPECT_PIXEL_RGBA8_EQ(whiteYUV, renderPass.color, 0,
kYUVImageDataHeightInTexels - 1); // bottom left
mBackend->DestroyVideoTextureForTest(std::move(platformTexture));
}
DAWN_INSTANTIATE_TEST(VideoViewsTests, VideoViewsTestBackend::Backend());

View File

@ -0,0 +1,89 @@
// Copyright 2021 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef TESTS_VIDEOVIEWSTESTS_H_
#define TESTS_VIDEOVIEWSTESTS_H_
#include "tests/DawnTest.h"
#include <array>
#include <memory>
class VideoViewsTestBackend {
public:
static BackendTestConfig Backend();
static std::unique_ptr<VideoViewsTestBackend> Create();
virtual ~VideoViewsTestBackend();
virtual void OnSetUp(WGPUDevice device) = 0;
virtual void OnTearDown() = 0;
class PlatformTexture {
public:
PlatformTexture() = delete;
virtual ~PlatformTexture();
virtual bool CanWrapAsWGPUTexture() = 0;
protected:
explicit PlatformTexture(wgpu::Texture&& texture);
public:
wgpu::Texture wgpuTexture;
};
virtual std::unique_ptr<PlatformTexture> CreateVideoTextureForTest(wgpu::TextureFormat format,
wgpu::TextureUsage usage,
bool isCheckerboard) = 0;
virtual void DestroyVideoTextureForTest(std::unique_ptr<PlatformTexture>&& platformTexture) = 0;
};
class VideoViewsTests : public DawnTest {
public:
// The width and height in texels are 4 for all YUV formats.
static constexpr uint32_t kYUVImageDataWidthInTexels = 4;
static constexpr uint32_t kYUVImageDataHeightInTexels = 4;
static constexpr size_t kYUVLumaPlaneIndex = 0;
static constexpr size_t kYUVChromaPlaneIndex = 1;
// RGB colors converted into YUV (per plane), for testing.
// RGB colors are mapped to the BT.601 definition of luma.
// https://docs.microsoft.com/en-us/windows/win32/medfound/about-yuv-video
static constexpr std::array<RGBA8, 2> kYellowYUVColor = {RGBA8{210, 0, 0, 0xFF}, // Y
RGBA8{16, 146, 0, 0xFF}}; // UV
static constexpr std::array<RGBA8, 2> kWhiteYUVColor = {RGBA8{235, 0, 0, 0xFF}, // Y
RGBA8{128, 128, 0, 0xFF}}; // UV
static constexpr std::array<RGBA8, 2> kBlueYUVColor = {RGBA8{41, 0, 0, 0xFF}, // Y
RGBA8{240, 110, 0, 0xFF}}; // UV
static constexpr std::array<RGBA8, 2> kRedYUVColor = {RGBA8{81, 0, 0, 0xFF}, // Y
RGBA8{90, 240, 0, 0xFF}}; // UV
static std::vector<uint8_t> GetTestTextureData(wgpu::TextureFormat format, bool isCheckerboard);
protected:
void SetUp() override;
void TearDown() override;
std::vector<const char*> GetRequiredFeatures() override;
bool IsMultiPlanarFormatsSupported() const;
wgpu::ShaderModule GetTestVertexShaderModule() const;
std::unique_ptr<VideoViewsTestBackend> mBackend;
bool mIsMultiPlanarFormatsSupported = false;
};
#endif // TESTS_VIDEOVIEWSTESTS_H_

View File

@ -0,0 +1,203 @@
// Copyright 2021 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "VideoViewsTests.h"
#include "common/Assert.h"
#include "dawn_native/VulkanBackend.h"
#include <fcntl.h>
#include <gbm.h>
// "linux-chromeos-rel"'s gbm.h is too old to compile, missing this change at least:
// https://chromium-review.googlesource.com/c/chromiumos/platform/minigbm/+/1963001/10/gbm.h#244
#ifndef MINIGBM
# define GBM_BO_USE_TEXTURING (1 << 5)
# define GBM_BO_USE_SW_WRITE_RARELY (1 << 12)
# define GBM_BO_USE_HW_VIDEO_DECODER (1 << 13)
#endif
class PlatformTextureGbm : public VideoViewsTestBackend::PlatformTexture {
public:
PlatformTextureGbm(wgpu::Texture&& texture, gbm_bo* gbmBo)
: PlatformTexture(std::move(texture)), mGbmBo(gbmBo) {
}
~PlatformTextureGbm() override = default;
// TODO(chromium:1258986): Add DISJOINT vkImage support for multi-plannar formats.
bool CanWrapAsWGPUTexture() override {
ASSERT(mGbmBo != nullptr);
// Checks if all plane handles of a multi-planar gbm_bo are same.
gbm_bo_handle plane0Handle = gbm_bo_get_handle_for_plane(mGbmBo, 0);
for (int plane = 1; plane < gbm_bo_get_plane_count(mGbmBo); ++plane) {
if (gbm_bo_get_handle_for_plane(mGbmBo, plane).u32 != plane0Handle.u32) {
return false;
}
}
return true;
}
gbm_bo* GetGbmBo() {
return mGbmBo;
}
private:
gbm_bo* mGbmBo = nullptr;
};
class VideoViewsTestBackendGbm : public VideoViewsTestBackend {
public:
void OnSetUp(WGPUDevice device) override {
mWGPUDevice = device;
mGbmDevice = CreateGbmDevice();
}
void OnTearDown() override {
gbm_device_destroy(mGbmDevice);
}
private:
gbm_device* CreateGbmDevice() {
// Render nodes [1] are the primary interface for communicating with the GPU on
// devices that support DRM. The actual filename of the render node is
// implementation-specific, so we must scan through all possible filenames to find
// one that we can use [2].
//
// [1] https://dri.freedesktop.org/docs/drm/gpu/drm-uapi.html#render-nodes
// [2]
// https://cs.chromium.org/chromium/src/ui/ozone/platform/wayland/gpu/drm_render_node_path_finder.cc
const uint32_t kRenderNodeStart = 128;
const uint32_t kRenderNodeEnd = kRenderNodeStart + 16;
const std::string kRenderNodeTemplate = "/dev/dri/renderD";
int renderNodeFd = -1;
for (uint32_t i = kRenderNodeStart; i < kRenderNodeEnd; i++) {
std::string renderNode = kRenderNodeTemplate + std::to_string(i);
renderNodeFd = open(renderNode.c_str(), O_RDWR);
if (renderNodeFd >= 0)
break;
}
ASSERT(renderNodeFd > 0);
gbm_device* gbmDevice = gbm_create_device(renderNodeFd);
ASSERT(gbmDevice != nullptr);
return gbmDevice;
}
static uint32_t GetGbmBoFormat(wgpu::TextureFormat format) {
switch (format) {
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
return GBM_FORMAT_NV12;
default:
UNREACHABLE();
}
}
WGPUTextureFormat ToWGPUTextureFormat(wgpu::TextureFormat format) {
switch (format) {
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
return WGPUTextureFormat_R8BG8Biplanar420Unorm;
default:
UNREACHABLE();
}
}
WGPUTextureUsage ToWGPUTextureUsage(wgpu::TextureUsage usage) {
switch (usage) {
case wgpu::TextureUsage::TextureBinding:
return WGPUTextureUsage_TextureBinding;
default:
UNREACHABLE();
}
}
std::unique_ptr<VideoViewsTestBackend::PlatformTexture> CreateVideoTextureForTest(
wgpu::TextureFormat format,
wgpu::TextureUsage usage,
bool isCheckerboard) override {
uint32_t flags = GBM_BO_USE_SCANOUT | GBM_BO_USE_TEXTURING | GBM_BO_USE_HW_VIDEO_DECODER |
GBM_BO_USE_SW_WRITE_RARELY;
gbm_bo* gbmBo = gbm_bo_create(mGbmDevice, VideoViewsTests::kYUVImageDataWidthInTexels,
VideoViewsTests::kYUVImageDataHeightInTexels,
GetGbmBoFormat(format), flags);
if (gbmBo == nullptr) {
return nullptr;
}
void* mapHandle = nullptr;
uint32_t strideBytes = 0;
void* addr = gbm_bo_map(gbmBo, 0, 0, VideoViewsTests::kYUVImageDataWidthInTexels,
VideoViewsTests::kYUVImageDataHeightInTexels, GBM_BO_TRANSFER_WRITE,
&strideBytes, &mapHandle);
EXPECT_NE(addr, nullptr);
std::vector<uint8_t> initialData =
VideoViewsTests::GetTestTextureData(format, isCheckerboard);
std::memcpy(addr, initialData.data(), initialData.size());
gbm_bo_unmap(gbmBo, mapHandle);
wgpu::TextureDescriptor textureDesc;
textureDesc.format = format;
textureDesc.dimension = wgpu::TextureDimension::e2D;
textureDesc.usage = usage;
textureDesc.size = {VideoViewsTests::kYUVImageDataWidthInTexels,
VideoViewsTests::kYUVImageDataHeightInTexels, 1};
wgpu::DawnTextureInternalUsageDescriptor internalDesc;
internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
textureDesc.nextInChain = &internalDesc;
dawn_native::vulkan::ExternalImageDescriptorDmaBuf descriptor = {};
descriptor.cTextureDescriptor =
reinterpret_cast<const WGPUTextureDescriptor*>(&textureDesc);
descriptor.isInitialized = true;
descriptor.memoryFD = gbm_bo_get_fd(gbmBo);
descriptor.stride = gbm_bo_get_stride(gbmBo);
descriptor.drmModifier = gbm_bo_get_modifier(gbmBo);
descriptor.waitFDs = {};
return std::make_unique<PlatformTextureGbm>(
wgpu::Texture::Acquire(dawn_native::vulkan::WrapVulkanImage(mWGPUDevice, &descriptor)),
gbmBo);
}
void DestroyVideoTextureForTest(
std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& platformTexture) override {
// Exports the signal and ignores it.
dawn_native::vulkan::ExternalImageExportInfoDmaBuf exportInfo;
dawn_native::vulkan::ExportVulkanImage(platformTexture->wgpuTexture.Get(),
VK_IMAGE_LAYOUT_GENERAL, &exportInfo);
for (int fd : exportInfo.semaphoreHandles) {
ASSERT_NE(fd, -1);
close(fd);
}
gbm_bo* gbmBo = static_cast<PlatformTextureGbm*>(platformTexture.get())->GetGbmBo();
ASSERT_NE(gbmBo, nullptr);
gbm_bo_destroy(gbmBo);
}
WGPUDevice mWGPUDevice = nullptr;
gbm_device* mGbmDevice = nullptr;
};
// static
BackendTestConfig VideoViewsTestBackend::Backend() {
return VulkanBackend();
}
// static
std::unique_ptr<VideoViewsTestBackend> VideoViewsTestBackend::Create() {
return std::make_unique<VideoViewsTestBackendGbm>();
}

View File

@ -0,0 +1,191 @@
// Copyright 2021 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "VideoViewsTests.h"
#include "common/Assert.h"
#include "dawn_native/D3D12Backend.h"
#include <d3d11.h>
#include <d3d12.h>
#include <dxgi1_4.h>
#include <wrl/client.h>
using Microsoft::WRL::ComPtr;
class PlatformTextureWin : public VideoViewsTestBackend::PlatformTexture {
public:
explicit PlatformTextureWin(wgpu::Texture&& texture) : PlatformTexture(std::move(texture)) {
}
~PlatformTextureWin() override = default;
bool CanWrapAsWGPUTexture() override {
return true;
}
};
class VideoViewsTestBackendWin : public VideoViewsTestBackend {
public:
~VideoViewsTestBackendWin() override = default;
void OnSetUp(WGPUDevice device) override {
mWGPUDevice = device;
// Create the D3D11 device/contexts that will be used in subsequent tests
ComPtr<ID3D12Device> d3d12Device = dawn_native::d3d12::GetD3D12Device(device);
const LUID adapterLuid = d3d12Device->GetAdapterLuid();
ComPtr<IDXGIFactory4> dxgiFactory;
HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
ASSERT_EQ(hr, S_OK);
ComPtr<IDXGIAdapter> dxgiAdapter;
hr = dxgiFactory->EnumAdapterByLuid(adapterLuid, IID_PPV_ARGS(&dxgiAdapter));
ASSERT_EQ(hr, S_OK);
ComPtr<ID3D11Device> d3d11Device;
D3D_FEATURE_LEVEL d3dFeatureLevel;
ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
hr = ::D3D11CreateDevice(dxgiAdapter.Get(), D3D_DRIVER_TYPE_UNKNOWN, nullptr, 0, nullptr, 0,
D3D11_SDK_VERSION, &d3d11Device, &d3dFeatureLevel,
&d3d11DeviceContext);
ASSERT_EQ(hr, S_OK);
// Runtime of the created texture (D3D11 device) and OpenSharedHandle runtime (Dawn's
// D3D12 device) must agree on resource sharing capability. For NV12 formats, D3D11
// requires at-least D3D11_SHARED_RESOURCE_TIER_2 support.
// https://docs.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_shared_resource_tier
D3D11_FEATURE_DATA_D3D11_OPTIONS5 featureOptions5{};
hr = d3d11Device->CheckFeatureSupport(D3D11_FEATURE_D3D11_OPTIONS5, &featureOptions5,
sizeof(featureOptions5));
ASSERT_EQ(hr, S_OK);
ASSERT_GE(featureOptions5.SharedResourceTier, D3D11_SHARED_RESOURCE_TIER_2);
mD3d11Device = std::move(d3d11Device);
}
void OnTearDown() override {
}
protected:
static DXGI_FORMAT GetDXGITextureFormat(wgpu::TextureFormat format) {
switch (format) {
case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
return DXGI_FORMAT_NV12;
default:
UNREACHABLE();
}
}
std::unique_ptr<VideoViewsTestBackend::PlatformTexture> CreateVideoTextureForTest(
wgpu::TextureFormat format,
wgpu::TextureUsage usage,
bool isCheckerboard) override {
wgpu::TextureDescriptor textureDesc;
textureDesc.format = format;
textureDesc.dimension = wgpu::TextureDimension::e2D;
textureDesc.usage = usage;
textureDesc.size = {VideoViewsTests::kYUVImageDataWidthInTexels,
VideoViewsTests::kYUVImageDataHeightInTexels, 1};
// Create a DX11 texture with data then wrap it in a shared handle.
D3D11_TEXTURE2D_DESC d3dDescriptor;
d3dDescriptor.Width = VideoViewsTests::kYUVImageDataWidthInTexels;
d3dDescriptor.Height = VideoViewsTests::kYUVImageDataHeightInTexels;
d3dDescriptor.MipLevels = 1;
d3dDescriptor.ArraySize = 1;
d3dDescriptor.Format = GetDXGITextureFormat(format);
d3dDescriptor.SampleDesc.Count = 1;
d3dDescriptor.SampleDesc.Quality = 0;
d3dDescriptor.Usage = D3D11_USAGE_DEFAULT;
d3dDescriptor.BindFlags = D3D11_BIND_SHADER_RESOURCE;
d3dDescriptor.CPUAccessFlags = 0;
d3dDescriptor.MiscFlags =
D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
std::vector<uint8_t> initialData =
VideoViewsTests::GetTestTextureData(format, isCheckerboard);
D3D11_SUBRESOURCE_DATA subres;
subres.pSysMem = initialData.data();
subres.SysMemPitch = VideoViewsTests::kYUVImageDataWidthInTexels;
ComPtr<ID3D11Texture2D> d3d11Texture;
HRESULT hr = mD3d11Device->CreateTexture2D(&d3dDescriptor, &subres, &d3d11Texture);
ASSERT(hr == S_OK);
ComPtr<IDXGIResource1> dxgiResource;
hr = d3d11Texture.As(&dxgiResource);
ASSERT(hr == S_OK);
HANDLE sharedHandle;
hr = dxgiResource->CreateSharedHandle(
nullptr, DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE, nullptr,
&sharedHandle);
ASSERT(hr == S_OK);
// DX11 texture should be initialized upon CreateTexture2D. However, if we do not
// acquire/release the keyed mutex before using the wrapped WebGPU texture, the WebGPU
// texture is left uninitialized. This is required for D3D11 and D3D12 interop.
ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
hr = d3d11Texture.As(&dxgiKeyedMutex);
ASSERT(hr == S_OK);
hr = dxgiKeyedMutex->AcquireSync(0, INFINITE);
ASSERT(hr == S_OK);
hr = dxgiKeyedMutex->ReleaseSync(1);
ASSERT(hr == S_OK);
// Open the DX11 texture in Dawn from the shared handle and return it as a WebGPU
// texture.
dawn_native::d3d12::ExternalImageDescriptorDXGISharedHandle externalImageDesc;
externalImageDesc.cTextureDescriptor =
reinterpret_cast<const WGPUTextureDescriptor*>(&textureDesc);
externalImageDesc.sharedHandle = sharedHandle;
std::unique_ptr<dawn_native::d3d12::ExternalImageDXGI> externalImage =
dawn_native::d3d12::ExternalImageDXGI::Create(mWGPUDevice, &externalImageDesc);
// Handle is no longer needed once resources are created.
::CloseHandle(sharedHandle);
dawn_native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
externalAccessDesc.acquireMutexKey = 1;
externalAccessDesc.releaseMutexKey = 2;
externalAccessDesc.isInitialized = true;
externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(textureDesc.usage);
return std::make_unique<PlatformTextureWin>(wgpu::Texture::Acquire(
externalImage->ProduceTexture(mWGPUDevice, &externalAccessDesc)));
}
void DestroyVideoTextureForTest(
std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& PlatformTexture) override {
}
WGPUDevice mWGPUDevice = nullptr;
ComPtr<ID3D11Device> mD3d11Device;
};
// static
BackendTestConfig VideoViewsTestBackend::Backend() {
return D3D12Backend();
}
// static
std::unique_ptr<VideoViewsTestBackend> VideoViewsTestBackend::Create() {
return std::make_unique<VideoViewsTestBackendWin>();
}