Vulkan: Clamp @builtin(frag_depth) with push constant values

Start using Tint's ClampFragDepth transform in the Vulkan backend when
needed in order to correctly clamp @builtin(frag_depth) on Vulkan. Do
this by always reserving 8 bytes of push constant space to contain the
f32 min and max values from the last viewport command.

Reenables relevant CTS tests that were suppressed on Vulkan.

Bug: dawn:1125, dawn:1576, dawn:1616
Change-Id: I38f4f6c3c51c99b5e591a780fea9859537529534
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/105642
Reviewed-by: Austin Eng <enga@chromium.org>
Reviewed-by: Loko Kung <lokokung@google.com>
Kokoro: Kokoro <noreply+kokoro@google.com>
Commit-Queue: Corentin Wallez <cwallez@chromium.org>
This commit is contained in:
Corentin Wallez 2023-01-05 12:24:52 +00:00 committed by Dawn LUCI CQ
parent 452d6e3cc3
commit 8e68a89cfb
20 changed files with 348 additions and 49 deletions

View File

@ -26,6 +26,7 @@
#include "src/tint/inspector/inspector.h"
#include "src/tint/reader/reader.h"
#include "src/tint/transform/binding_remapper.h"
#include "src/tint/transform/clamp_frag_depth.h"
#include "src/tint/transform/first_index_offset.h"
#include "src/tint/transform/manager.h"
#include "src/tint/transform/multiplanar_external_texture.h"

View File

@ -216,6 +216,10 @@ const PerStage<ProgrammableStage>& PipelineBase::GetAllStages() const {
return mStages;
}
bool PipelineBase::HasStage(SingleShaderStage stage) const {
return mStageMask & StageBit(stage);
}
wgpu::ShaderStage PipelineBase::GetStageMask() const {
return mStageMask;
}

View File

@ -59,6 +59,7 @@ class PipelineBase : public ApiObjectBase, public CachedObject {
const RequiredBufferSizes& GetMinBufferSizes() const;
const ProgrammableStage& GetStage(SingleShaderStage stage) const;
const PerStage<ProgrammableStage>& GetAllStages() const;
bool HasStage(SingleShaderStage stage) const;
wgpu::ShaderStage GetStageMask() const;
ResultOrError<Ref<BindGroupLayoutBase>> GetBindGroupLayout(uint32_t groupIndex);

View File

@ -643,6 +643,10 @@ RenderPipelineBase::RenderPipelineBase(DeviceBase* device,
}
}
if (HasStage(SingleShaderStage::Fragment)) {
mUsesFragDepth = GetStage(SingleShaderStage::Fragment).metadata->usesFragDepth;
}
SetContentHash(ComputeContentHash());
GetObjectTrackingList()->Track(this);
@ -829,22 +833,24 @@ bool RenderPipelineBase::IsAlphaToCoverageEnabled() const {
const AttachmentState* RenderPipelineBase::GetAttachmentState() const {
ASSERT(!IsError());
return mAttachmentState.Get();
}
bool RenderPipelineBase::WritesDepth() const {
ASSERT(!IsError());
return mWritesDepth;
}
bool RenderPipelineBase::WritesStencil() const {
ASSERT(!IsError());
return mWritesStencil;
}
bool RenderPipelineBase::UsesFragDepth() const {
ASSERT(!IsError());
return mUsesFragDepth;
}
size_t RenderPipelineBase::ComputeContentHash() {
ObjectContentHasher recorder;

View File

@ -101,6 +101,7 @@ class RenderPipelineBase : public PipelineBase {
bool IsAlphaToCoverageEnabled() const;
bool WritesDepth() const;
bool WritesStencil() const;
bool UsesFragDepth() const;
const AttachmentState* GetAttachmentState() const;
@ -140,6 +141,7 @@ class RenderPipelineBase : public PipelineBase {
bool mUnclippedDepth = false;
bool mWritesDepth = false;
bool mWritesStencil = false;
bool mUsesFragDepth = false;
};
} // namespace dawn::native

View File

@ -646,6 +646,7 @@ ResultOrError<std::unique_ptr<EntryPointMetadata>> ReflectEntryPointUsingTint(
if (entryPoint.sample_index_used) {
totalInterStageShaderComponents += 1;
}
metadata->usesFragDepth = entryPoint.frag_depth_used;
metadata->totalInterStageShaderComponents = totalInterStageShaderComponents;
DelayedInvalidIf(totalInterStageShaderComponents > maxInterStageShaderComponents,

View File

@ -245,6 +245,7 @@ struct EntryPointMetadata {
std::unordered_set<std::string> initializedOverrides;
bool usesNumWorkgroups = false;
bool usesFragDepth = false;
// Used at render pipeline validation.
bool usesSampleMaskOutput = false;
};

View File

@ -1490,9 +1490,13 @@ MaybeError CommandBuffer::EncodeRenderPass(id<MTLRenderCommandEncoder> encoder,
slopeScale:newPipeline->GetDepthBiasSlopeScale()
clamp:newPipeline->GetDepthBiasClamp()];
if (@available(macOS 10.11, iOS 11.0, *)) {
MTLDepthClipMode clipMode = newPipeline->HasUnclippedDepth()
? MTLDepthClipModeClamp
: MTLDepthClipModeClip;
// When using @builtin(frag_depth) we need to clamp to the viewport, otherwise
// Metal writes the raw value to the depth buffer, which doesn't match other
// APIs.
MTLDepthClipMode clipMode =
(newPipeline->UsesFragDepth() || newPipeline->HasUnclippedDepth())
? MTLDepthClipModeClamp
: MTLDepthClipModeClip;
[encoder setDepthClipMode:clipMode];
}
newPipeline->Encode(encoder);

View File

@ -232,7 +232,9 @@ MaybeError Adapter::InitializeSupportedFeaturesImpl() {
mSupportedFeatures.EnableFeature(Feature::ChromiumExperimentalDp4a);
}
if (mDeviceInfo.HasExt(DeviceExt::DepthClipEnable) &&
// unclippedDepth=true translates to depthClipEnable=false, depthClamp=true
if (mDeviceInfo.features.depthClamp == VK_TRUE &&
mDeviceInfo.HasExt(DeviceExt::DepthClipEnable) &&
mDeviceInfo.depthClipEnableFeatures.depthClipEnable == VK_TRUE) {
mSupportedFeatures.EnableFeature(Feature::DepthClipControl);
}

View File

@ -1108,6 +1108,23 @@ MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* recordingCon
DescriptorSetTracker descriptorSets = {};
RenderPipeline* lastPipeline = nullptr;
// Tracking for the push constants needed by the ClampFragDepth transform.
// TODO(dawn:1125): Avoid the need for this when the depthClamp feature is available, but doing
// so would require fixing issue dawn:1576 first to have more dynamic push constant usage. (and
// also additional tests that the dirtying logic here is correct so with a Toggle we can test it
// on our infra).
ClampFragDepthArgs clampFragDepthArgs = {0.0f, 1.0f};
bool clampFragDepthArgsDirty = true;
auto ApplyClampFragDepthArgs = [&]() {
if (!clampFragDepthArgsDirty || lastPipeline == nullptr) {
return;
}
device->fn.CmdPushConstants(commands, ToBackend(lastPipeline->GetLayout())->GetHandle(),
VK_SHADER_STAGE_FRAGMENT_BIT, kClampFragDepthArgsOffset,
kClampFragDepthArgsSize, &clampFragDepthArgs);
clampFragDepthArgsDirty = false;
};
auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
switch (type) {
case Command::Draw: {
@ -1231,6 +1248,9 @@ MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* recordingCon
lastPipeline = pipeline;
descriptorSets.OnSetPipeline(pipeline);
// Apply the deferred min/maxDepth push constants update if needed.
ApplyClampFragDepthArgs();
break;
}
@ -1302,6 +1322,12 @@ MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* recordingCon
}
device->fn.CmdSetViewport(commands, 0, 1, &viewport);
// Try applying the push constants that contain min/maxDepth immediately. This can
// be deferred if no pipeline is currently bound.
clampFragDepthArgs = {viewport.minDepth, viewport.maxDepth};
clampFragDepthArgsDirty = true;
ApplyClampFragDepthArgs();
break;
}

View File

@ -61,7 +61,8 @@ MaybeError ComputePipeline::Initialize() {
ShaderModule::ModuleAndSpirv moduleAndSpirv;
DAWN_TRY_ASSIGN(moduleAndSpirv,
module->GetHandleAndSpirv(SingleShaderStage::Compute, computeStage, layout));
module->GetHandleAndSpirv(SingleShaderStage::Compute, computeStage, layout,
/*clampFragDepth*/ false));
createInfo.stage.module = moduleAndSpirv.module;
createInfo.stage.pName = moduleAndSpirv.remappedEntryPoint;

View File

@ -505,6 +505,7 @@ ResultOrError<VulkanDeviceKnobs> Device::CreateDevice(VkPhysicalDevice physicalD
ASSERT(deviceInfo.HasExt(DeviceExt::DepthClipEnable) &&
deviceInfo.depthClipEnableFeatures.depthClipEnable == VK_TRUE);
usedKnobs.features.depthClamp = VK_TRUE;
usedKnobs.depthClipEnableFeatures.depthClipEnable = VK_TRUE;
featuresChain.Add(&usedKnobs.depthClipEnableFeatures,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT);

View File

@ -46,14 +46,20 @@ MaybeError PipelineLayout::Initialize() {
numSetLayouts++;
}
// Always reserve push constant space for the ClampFragDepthArgs.
VkPushConstantRange depthClampArgsRange;
depthClampArgsRange.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
depthClampArgsRange.offset = kClampFragDepthArgsOffset;
depthClampArgsRange.size = kClampFragDepthArgsSize;
VkPipelineLayoutCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
createInfo.pNext = nullptr;
createInfo.flags = 0;
createInfo.setLayoutCount = numSetLayouts;
createInfo.pSetLayouts = AsVkArray(setLayouts.data());
createInfo.pushConstantRangeCount = 0;
createInfo.pPushConstantRanges = nullptr;
createInfo.pushConstantRangeCount = 1;
createInfo.pPushConstantRanges = &depthClampArgsRange;
// Record cache key information now since the createInfo is not stored.
StreamIn(&mCacheKey, stream::Iterable(cachedObjects.data(), numSetLayouts), createInfo);

View File

@ -24,6 +24,17 @@ namespace dawn::native::vulkan {
class Device;
// 8 bytes of push constant data are always reserved in the Vulkan pipeline layouts to be used by
// the code generated by the ClampFragDepth Tint transform. TODO(dawn:1576): Optimize usage of push
// constants so that they are only added to a pipeline / pipeline layout if needed.
struct ClampFragDepthArgs {
float min;
float max;
};
constexpr size_t kClampFragDepthArgsOffset = 0u;
constexpr size_t kClampFragDepthArgsSize = sizeof(ClampFragDepthArgs);
static_assert(kClampFragDepthArgsSize == 8u);
class PipelineLayout final : public PipelineLayoutBase {
public:
static ResultOrError<Ref<PipelineLayout>> Create(Device* device,

View File

@ -343,45 +343,38 @@ MaybeError RenderPipeline::Initialize() {
std::array<VkPipelineShaderStageCreateInfo, 2> shaderStages;
uint32_t stageCount = 0;
for (auto stage : IterateStages(this->GetStageMask())) {
VkPipelineShaderStageCreateInfo shaderStage;
auto AddShaderStage = [&](SingleShaderStage stage, VkShaderStageFlagBits vkStage,
bool clampFragDepth) -> MaybeError {
const ProgrammableStage& programmableStage = GetStage(stage);
ShaderModule* module = ToBackend(programmableStage.module.Get());
ShaderModule::ModuleAndSpirv moduleAndSpirv;
DAWN_TRY_ASSIGN(moduleAndSpirv,
module->GetHandleAndSpirv(stage, programmableStage, layout));
shaderStage.module = moduleAndSpirv.module;
shaderStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStage.pNext = nullptr;
shaderStage.flags = 0;
shaderStage.pSpecializationInfo = nullptr;
shaderStage.pName = moduleAndSpirv.remappedEntryPoint;
switch (stage) {
case dawn::native::SingleShaderStage::Vertex: {
shaderStage.stage = VK_SHADER_STAGE_VERTEX_BIT;
break;
}
case dawn::native::SingleShaderStage::Fragment: {
shaderStage.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
break;
}
default: {
// For render pipeline only Vertex and Fragment stage is possible
DAWN_UNREACHABLE();
break;
}
}
DAWN_ASSERT(stageCount < 2);
shaderStages[stageCount] = shaderStage;
stageCount++;
ToBackend(programmableStage.module)
->GetHandleAndSpirv(stage, programmableStage, layout, clampFragDepth));
// Record cache key for each shader since it will become inaccessible later on.
StreamIn(&mCacheKey, stream::Iterable(moduleAndSpirv.spirv, moduleAndSpirv.wordCount));
VkPipelineShaderStageCreateInfo* shaderStage = &shaderStages[stageCount];
shaderStage->module = moduleAndSpirv.module;
shaderStage->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStage->pNext = nullptr;
shaderStage->flags = 0;
shaderStage->pSpecializationInfo = nullptr;
shaderStage->stage = vkStage;
shaderStage->pName = moduleAndSpirv.remappedEntryPoint;
stageCount++;
return {};
};
// Add the vertex stage that's always present.
DAWN_TRY(AddShaderStage(SingleShaderStage::Vertex, VK_SHADER_STAGE_VERTEX_BIT,
/*clampFragDepth*/ false));
// Add the fragment stage if present.
if (GetStageMask() & wgpu::ShaderStage::Fragment) {
bool clampFragDepth = UsesFragDepth() && !HasUnclippedDepth();
DAWN_TRY(AddShaderStage(SingleShaderStage::Fragment, VK_SHADER_STAGE_FRAGMENT_BIT,
clampFragDepth));
}
PipelineVertexInputStateCreateInfoTemporaryAllocations tempAllocations;
@ -422,7 +415,7 @@ MaybeError RenderPipeline::Initialize() {
rasterization.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization.pNext = nullptr;
rasterization.flags = 0;
rasterization.depthClampEnable = VK_FALSE;
rasterization.depthClampEnable = HasUnclippedDepth();
rasterization.rasterizerDiscardEnable = VK_FALSE;
rasterization.polygonMode = VK_POLYGON_MODE_FILL;
rasterization.cullMode = VulkanCullMode(GetCullMode());

View File

@ -175,6 +175,7 @@ ShaderModule::~ShaderModule() = default;
X(bool, disableWorkgroupInit) \
X(bool, disableSymbolRenaming) \
X(bool, useZeroInitializeWorkgroupMemoryExtension) \
X(bool, clampFragDepth) \
X(CacheKey::UnsafeUnkeyedValue<dawn::platform::Platform*>, tracePlatform)
DAWN_MAKE_CACHE_REQUEST(SpirvCompilationRequest, SPIRV_COMPILATION_REQUEST_MEMBERS);
@ -183,7 +184,8 @@ DAWN_MAKE_CACHE_REQUEST(SpirvCompilationRequest, SPIRV_COMPILATION_REQUEST_MEMBE
ResultOrError<ShaderModule::ModuleAndSpirv> ShaderModule::GetHandleAndSpirv(
SingleShaderStage stage,
const ProgrammableStage& programmableStage,
const PipelineLayout* layout) {
const PipelineLayout* layout,
bool clampFragDepth) {
TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleVk::GetHandleAndSpirv");
// If the shader was destroyed, we should never call this function.
@ -258,6 +260,7 @@ ResultOrError<ShaderModule::ModuleAndSpirv> ShaderModule::GetHandleAndSpirv(
req.disableSymbolRenaming = GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming);
req.useZeroInitializeWorkgroupMemoryExtension =
GetDevice()->IsToggleEnabled(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension);
req.clampFragDepth = clampFragDepth;
req.tracePlatform = UnsafeUnkeyedValue(GetDevice()->GetPlatform());
req.substituteOverrideConfig = std::move(substituteOverrideConfig);
@ -305,6 +308,10 @@ ResultOrError<ShaderModule::ModuleAndSpirv> ShaderModule::GetHandleAndSpirv(
std::move(r.substituteOverrideConfig).value());
}
if (r.clampFragDepth) {
transformManager.Add<tint::transform::ClampFragDepth>();
}
tint::Program program;
tint::transform::DataMap transformOutputs;
{

View File

@ -64,7 +64,8 @@ class ShaderModule final : public ShaderModuleBase {
ResultOrError<ModuleAndSpirv> GetHandleAndSpirv(SingleShaderStage stage,
const ProgrammableStage& programmableStage,
const PipelineLayout* layout);
const PipelineLayout* layout,
bool clampFragDepth);
private:
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);

View File

@ -506,6 +506,7 @@ source_set("end2end_tests_sources") {
"end2end/ExperimentalDP4aTests.cpp",
"end2end/ExternalTextureTests.cpp",
"end2end/FirstIndexOffsetTests.cpp",
"end2end/FragDepthTests.cpp",
"end2end/GpuMemorySynchronizationTests.cpp",
"end2end/IndexFormatTests.cpp",
"end2end/MaxLimitTests.cpp",

View File

@ -0,0 +1,231 @@
// Copyright 2022 The Dawn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dawn/tests/DawnTest.h"
#include "dawn/utils/ComboRenderPipelineDescriptor.h"
#include "dawn/utils/WGPUHelpers.h"
constexpr wgpu::TextureFormat kDepthFormat = wgpu::TextureFormat::Depth32Float;
class FragDepthTests : public DawnTest {};
// Test that when writing to FragDepth the result is clamped to the viewport.
TEST_P(FragDepthTests, FragDepthIsClampedToViewport) {
// TODO(dawn:1125): Add the shader transform to clamp the frag depth to the GL backend.
DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
@vertex fn vs() -> @builtin(position) vec4<f32> {
return vec4<f32>(0.0, 0.0, 0.5, 1.0);
}
@fragment fn fs() -> @builtin(frag_depth) f32 {
return 1.0;
}
)");
// Create the pipeline that uses frag_depth to output the depth.
utils::ComboRenderPipelineDescriptor pDesc;
pDesc.vertex.module = module;
pDesc.vertex.entryPoint = "vs";
pDesc.primitive.topology = wgpu::PrimitiveTopology::PointList;
pDesc.cFragment.module = module;
pDesc.cFragment.entryPoint = "fs";
pDesc.cFragment.targetCount = 0;
wgpu::DepthStencilState* pDescDS = pDesc.EnableDepthStencil(kDepthFormat);
pDescDS->depthWriteEnabled = true;
pDescDS->depthCompare = wgpu::CompareFunction::Always;
wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pDesc);
// Create a depth-only render pass.
wgpu::TextureDescriptor depthDesc;
depthDesc.size = {1, 1};
depthDesc.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
depthDesc.format = kDepthFormat;
wgpu::Texture depthTexture = device.CreateTexture(&depthDesc);
utils::ComboRenderPassDescriptor renderPassDesc({}, depthTexture.CreateView());
renderPassDesc.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
renderPassDesc.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
// Draw a point with a skewed viewport, so 1.0 depth gets clamped to 0.5.
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
pass.SetViewport(0, 0, 1, 1, 0.0, 0.5);
pass.SetPipeline(pipeline);
pass.Draw(1);
pass.End();
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
EXPECT_PIXEL_FLOAT_EQ(0.5f, depthTexture, 0, 0);
}
// Test for the push constant logic for ClampFragDepth in Vulkan to check that changing the
// pipeline layout doesn't invalidate the push constants that were set.
TEST_P(FragDepthTests, ChangingPipelineLayoutDoesntInvalidateViewport) {
// TODO(dawn:1125): Add the shader transform to clamp the frag depth to the GL backend.
DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
@vertex fn vs() -> @builtin(position) vec4<f32> {
return vec4<f32>(0.0, 0.0, 0.5, 1.0);
}
@group(0) @binding(0) var<uniform> uniformDepth : f32;
@fragment fn fsUniform() -> @builtin(frag_depth) f32 {
return uniformDepth;
}
@group(0) @binding(0) var<storage, read> storageDepth : f32;
@fragment fn fsStorage() -> @builtin(frag_depth) f32 {
return storageDepth;
}
)");
// Create the pipeline and bindgroup for the pipeline layout with a uniform buffer.
utils::ComboRenderPipelineDescriptor upDesc;
upDesc.vertex.module = module;
upDesc.vertex.entryPoint = "vs";
upDesc.primitive.topology = wgpu::PrimitiveTopology::PointList;
upDesc.cFragment.module = module;
upDesc.cFragment.entryPoint = "fsUniform";
upDesc.cFragment.targetCount = 0;
wgpu::DepthStencilState* upDescDS = upDesc.EnableDepthStencil(kDepthFormat);
upDescDS->depthWriteEnabled = true;
upDescDS->depthCompare = wgpu::CompareFunction::Always;
wgpu::RenderPipeline uniformPipeline = device.CreateRenderPipeline(&upDesc);
wgpu::Buffer uniformBuffer =
utils::CreateBufferFromData<float>(device, wgpu::BufferUsage::Uniform, {0.0});
wgpu::BindGroup uniformBG =
utils::MakeBindGroup(device, uniformPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}});
// Create the pipeline and bindgroup for the pipeline layout with a uniform buffer.
utils::ComboRenderPipelineDescriptor spDesc;
spDesc.vertex.module = module;
spDesc.vertex.entryPoint = "vs";
spDesc.primitive.topology = wgpu::PrimitiveTopology::PointList;
spDesc.cFragment.module = module;
spDesc.cFragment.entryPoint = "fsStorage";
spDesc.cFragment.targetCount = 0;
wgpu::DepthStencilState* spDescDS = spDesc.EnableDepthStencil(kDepthFormat);
spDescDS->depthWriteEnabled = true;
spDescDS->depthCompare = wgpu::CompareFunction::Always;
wgpu::RenderPipeline storagePipeline = device.CreateRenderPipeline(&spDesc);
wgpu::Buffer storageBuffer =
utils::CreateBufferFromData<float>(device, wgpu::BufferUsage::Storage, {1.0});
wgpu::BindGroup storageBG =
utils::MakeBindGroup(device, storagePipeline.GetBindGroupLayout(0), {{0, storageBuffer}});
// Create a depth-only render pass.
wgpu::TextureDescriptor depthDesc;
depthDesc.size = {1, 1};
depthDesc.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
depthDesc.format = kDepthFormat;
wgpu::Texture depthTexture = device.CreateTexture(&depthDesc);
utils::ComboRenderPassDescriptor renderPassDesc({}, depthTexture.CreateView());
renderPassDesc.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
renderPassDesc.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
// Draw two point with a different pipeline layout to check Vulkan's behavior.
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
pass.SetViewport(0, 0, 1, 1, 0.0, 0.5);
// Writes 0.0.
pass.SetPipeline(uniformPipeline);
pass.SetBindGroup(0, uniformBG);
pass.Draw(1);
// Writes 1.0 clamped to 0.5.
pass.SetPipeline(storagePipeline);
pass.SetBindGroup(0, storageBG);
pass.Draw(1);
pass.End();
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
EXPECT_PIXEL_FLOAT_EQ(0.5f, depthTexture, 0, 0);
}
// Check that if the fragment is outside of the viewport during rasterization, it is clipped
// even if it output @builtin(frag_depth).
TEST_P(FragDepthTests, RasterizationClipBeforeFS) {
// TODO(dawn:1616): Metal too needs to clamping of @builtin(frag_depth) to the viewport.
DAWN_SUPPRESS_TEST_IF(IsMetal());
wgpu::ShaderModule module = utils::CreateShaderModule(device, R"(
@vertex fn vs() -> @builtin(position) vec4<f32> {
return vec4<f32>(0.0, 0.0, 5.0, 1.0);
}
@fragment fn fs() -> @builtin(frag_depth) f32 {
return 0.5;
}
)");
// Create the pipeline and bindgroup for the pipeline layout with a uniform buffer.
utils::ComboRenderPipelineDescriptor pDesc;
pDesc.vertex.module = module;
pDesc.vertex.entryPoint = "vs";
pDesc.primitive.topology = wgpu::PrimitiveTopology::PointList;
pDesc.cFragment.module = module;
pDesc.cFragment.entryPoint = "fs";
pDesc.cFragment.targetCount = 0;
wgpu::DepthStencilState* pDescDS = pDesc.EnableDepthStencil(kDepthFormat);
pDescDS->depthWriteEnabled = true;
pDescDS->depthCompare = wgpu::CompareFunction::Always;
wgpu::RenderPipeline uniformPipeline = device.CreateRenderPipeline(&pDesc);
// Create a depth-only render pass.
wgpu::TextureDescriptor depthDesc;
depthDesc.size = {1, 1};
depthDesc.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
depthDesc.format = kDepthFormat;
wgpu::Texture depthTexture = device.CreateTexture(&depthDesc);
utils::ComboRenderPassDescriptor renderPassDesc({}, depthTexture.CreateView());
renderPassDesc.cDepthStencilAttachmentInfo.depthClearValue = 0.0f;
renderPassDesc.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
renderPassDesc.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
// Draw a point with a depth outside of the viewport. It should get discarded.
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
pass.SetPipeline(uniformPipeline);
pass.Draw(1);
pass.End();
wgpu::CommandBuffer commands = encoder.Finish();
queue.Submit(1, &commands);
// The fragment should be discarded so the depth stayed 0.0, the depthClearValue.
EXPECT_PIXEL_FLOAT_EQ(0.0f, depthTexture, 0, 0);
}
DAWN_INSTANTIATE_TEST(FragDepthTests,
D3D12Backend(),
MetalBackend(),
OpenGLBackend(),
OpenGLESBackend(),
VulkanBackend());

View File

@ -165,8 +165,7 @@ crbug.com/dawn/0000 webgpu:util,texture,texel_data:unorm_texel_data_in_shader:fo
################################################################################
# depth_clip_clamp failures
################################################################################
crbug.com/dawn/1125 [ ubuntu ] webgpu:api,operation,rendering,depth_clip_clamp:depth_clamp_and_clip:* [ Failure ]
crbug.com/dawn/1125 [ ubuntu ] webgpu:api,operation,rendering,depth_clip_clamp:depth_test_input_clamped:* [ Failure ]
crbug.com/dawn/1125 [ mac ] webgpu:api,operation,rendering,depth_clip_clamp:depth_clamp_and_clip:* [ Failure ]
################################################################################
# compilation_info failures