2019-05-23 00:45:12 +00:00
|
|
|
// Copyright 2019 The Dawn Authors
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
#include "tests/DawnTest.h"
|
|
|
|
|
|
|
|
#include "utils/ComboRenderPipelineDescriptor.h"
|
2019-10-25 11:36:47 +00:00
|
|
|
#include "utils/WGPUHelpers.h"
|
2019-05-23 00:45:12 +00:00
|
|
|
|
|
|
|
constexpr uint32_t kRTSize = 400;
|
|
|
|
constexpr uint32_t kBufferElementsCount = kMinDynamicBufferOffsetAlignment / sizeof(uint32_t) + 2;
|
|
|
|
constexpr uint32_t kBufferSize = kBufferElementsCount * sizeof(uint32_t);
|
2019-07-05 08:01:10 +00:00
|
|
|
constexpr uint32_t kBindingSize = 8;
|
2019-05-23 00:45:12 +00:00
|
|
|
|
|
|
|
class DynamicBufferOffsetTests : public DawnTest {
|
|
|
|
protected:
|
2020-05-15 22:06:35 +00:00
|
|
|
void SetUp() override {
|
|
|
|
DawnTest::SetUp();
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2019-07-31 01:29:42 +00:00
|
|
|
// Mix up dynamic and non dynamic resources in one bind group and using not continuous
|
|
|
|
// binding number to cover more cases.
|
2019-05-23 00:45:12 +00:00
|
|
|
std::array<uint32_t, kBufferElementsCount> uniformData = {0};
|
|
|
|
uniformData[0] = 1;
|
|
|
|
uniformData[1] = 2;
|
|
|
|
|
2019-08-13 02:44:48 +00:00
|
|
|
mUniformBuffers[0] = utils::CreateBufferFromData(device, uniformData.data(), kBufferSize,
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::BufferUsage::Uniform);
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2019-07-31 01:29:42 +00:00
|
|
|
uniformData[uniformData.size() - 2] = 5;
|
|
|
|
uniformData[uniformData.size() - 1] = 6;
|
|
|
|
|
2019-08-13 02:44:48 +00:00
|
|
|
// Dynamic uniform buffer
|
|
|
|
mUniformBuffers[1] = utils::CreateBufferFromData(device, uniformData.data(), kBufferSize,
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::BufferUsage::Uniform);
|
2019-07-31 01:29:42 +00:00
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::BufferDescriptor storageBufferDescriptor;
|
2019-05-23 00:45:12 +00:00
|
|
|
storageBufferDescriptor.size = kBufferSize;
|
2019-08-27 08:21:39 +00:00
|
|
|
storageBufferDescriptor.usage =
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2019-08-13 02:44:48 +00:00
|
|
|
mStorageBuffers[0] = device.CreateBuffer(&storageBufferDescriptor);
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2019-08-13 02:44:48 +00:00
|
|
|
// Dynamic storage buffer
|
|
|
|
mStorageBuffers[1] = device.CreateBuffer(&storageBufferDescriptor);
|
2019-07-31 01:29:42 +00:00
|
|
|
|
2019-08-13 02:44:48 +00:00
|
|
|
// Default bind group layout
|
|
|
|
mBindGroupLayouts[0] = utils::MakeBindGroupLayout(
|
2019-10-28 13:27:36 +00:00
|
|
|
device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
|
2020-12-17 00:19:34 +00:00
|
|
|
wgpu::BufferBindingType::Uniform},
|
2019-10-28 13:27:36 +00:00
|
|
|
{1, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
|
2020-12-17 00:19:34 +00:00
|
|
|
wgpu::BufferBindingType::Storage},
|
2019-10-28 13:27:36 +00:00
|
|
|
{3, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
|
2020-12-17 00:19:34 +00:00
|
|
|
wgpu::BufferBindingType::Uniform, true},
|
2019-10-28 13:27:36 +00:00
|
|
|
{4, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
|
2020-12-17 00:19:34 +00:00
|
|
|
wgpu::BufferBindingType::Storage, true}});
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2019-08-13 02:44:48 +00:00
|
|
|
// Default bind group
|
|
|
|
mBindGroups[0] = utils::MakeBindGroup(device, mBindGroupLayouts[0],
|
|
|
|
{{0, mUniformBuffers[0], 0, kBindingSize},
|
|
|
|
{1, mStorageBuffers[0], 0, kBindingSize},
|
|
|
|
{3, mUniformBuffers[1], 0, kBindingSize},
|
|
|
|
{4, mStorageBuffers[1], 0, kBindingSize}});
|
|
|
|
|
|
|
|
// Extra uniform buffer for inheriting test
|
|
|
|
mUniformBuffers[2] = utils::CreateBufferFromData(device, uniformData.data(), kBufferSize,
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::BufferUsage::Uniform);
|
2019-08-13 02:44:48 +00:00
|
|
|
|
|
|
|
// Bind group layout for inheriting test
|
|
|
|
mBindGroupLayouts[1] = utils::MakeBindGroupLayout(
|
2019-10-28 13:27:36 +00:00
|
|
|
device, {{0, wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment,
|
2020-12-17 00:19:34 +00:00
|
|
|
wgpu::BufferBindingType::Uniform}});
|
2019-08-13 02:44:48 +00:00
|
|
|
|
|
|
|
// Bind group for inheriting test
|
|
|
|
mBindGroups[1] = utils::MakeBindGroup(device, mBindGroupLayouts[1],
|
|
|
|
{{0, mUniformBuffers[2], 0, kBindingSize}});
|
2019-05-23 00:45:12 +00:00
|
|
|
}
|
|
|
|
// Create objects to use as resources inside test bind groups.
|
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::BindGroup mBindGroups[2];
|
|
|
|
wgpu::BindGroupLayout mBindGroupLayouts[2];
|
|
|
|
wgpu::Buffer mUniformBuffers[3];
|
|
|
|
wgpu::Buffer mStorageBuffers[2];
|
|
|
|
wgpu::Texture mColorAttachment;
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::RenderPipeline CreateRenderPipeline(bool isInheritedPipeline = false) {
|
2021-03-24 15:55:32 +00:00
|
|
|
wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
|
2021-01-21 15:41:20 +00:00
|
|
|
[[builtin(vertex_index)]] var<in> VertexIndex : u32;
|
2020-12-17 19:41:57 +00:00
|
|
|
[[builtin(position)]] var<out> Position : vec4<f32>;
|
|
|
|
[[stage(vertex)]] fn main() -> void {
|
|
|
|
const pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
|
|
|
|
vec2<f32>(-1.0, 0.0),
|
|
|
|
vec2<f32>(-1.0, 1.0),
|
|
|
|
vec2<f32>( 0.0, 1.0));
|
|
|
|
Position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
|
|
|
|
})");
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2019-08-13 02:44:48 +00:00
|
|
|
// Construct fragment shader source
|
|
|
|
std::ostringstream fs;
|
|
|
|
std::string multipleNumber = isInheritedPipeline ? "2" : "1";
|
|
|
|
fs << R"(
|
2021-01-12 22:11:14 +00:00
|
|
|
// TODO(crbug.com/tint/386): Use the same struct.
|
2020-12-17 19:41:57 +00:00
|
|
|
[[block]] struct Buffer1 {
|
2021-03-17 09:48:19 +00:00
|
|
|
value : vec2<u32>;
|
2019-08-13 02:44:48 +00:00
|
|
|
};
|
2020-12-17 19:41:57 +00:00
|
|
|
|
|
|
|
[[block]] struct Buffer2 {
|
2021-03-17 09:48:19 +00:00
|
|
|
value : vec2<u32>;
|
2020-12-17 19:41:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
[[block]] struct Buffer3 {
|
2021-03-17 09:48:19 +00:00
|
|
|
value : vec2<u32>;
|
2020-12-17 19:41:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
[[block]] struct Buffer4 {
|
2021-03-17 09:48:19 +00:00
|
|
|
value : vec2<u32>;
|
2019-08-13 02:44:48 +00:00
|
|
|
};
|
2020-12-17 19:41:57 +00:00
|
|
|
|
2021-01-19 14:18:51 +00:00
|
|
|
[[group(0), binding(0)]] var<uniform> uBufferNotDynamic : Buffer1;
|
2021-03-18 17:20:48 +00:00
|
|
|
[[group(0), binding(1)]] var<storage> sBufferNotDynamic : [[access(read_write)]] Buffer2;
|
2021-01-19 14:18:51 +00:00
|
|
|
[[group(0), binding(3)]] var<uniform> uBuffer : Buffer3;
|
2021-03-18 17:20:48 +00:00
|
|
|
[[group(0), binding(4)]] var<storage> sBuffer : [[access(read_write)]] Buffer4;
|
2019-08-13 02:44:48 +00:00
|
|
|
)";
|
|
|
|
|
|
|
|
if (isInheritedPipeline) {
|
|
|
|
fs << R"(
|
2020-12-17 19:41:57 +00:00
|
|
|
[[block]] struct Buffer5 {
|
2021-03-17 09:48:19 +00:00
|
|
|
value : vec2<u32>;
|
2019-05-23 00:45:12 +00:00
|
|
|
};
|
2020-12-17 19:41:57 +00:00
|
|
|
|
2021-01-19 14:18:51 +00:00
|
|
|
[[group(1), binding(0)]] var<uniform> paddingBlock : Buffer5;
|
2019-08-13 02:44:48 +00:00
|
|
|
)";
|
|
|
|
}
|
|
|
|
|
2020-12-17 19:41:57 +00:00
|
|
|
fs << "[[location(0)]] var<out> fragColor : vec4<f32>;\n";
|
|
|
|
|
|
|
|
fs << "const multipleNumber : u32 = " << multipleNumber << "u;\n";
|
|
|
|
fs << R"(
|
|
|
|
[[stage(fragment)]] fn main() -> void {
|
|
|
|
sBufferNotDynamic.value = uBufferNotDynamic.value.xy;
|
|
|
|
sBuffer.value = vec2<u32>(multipleNumber, multipleNumber) * (uBuffer.value.xy + sBufferNotDynamic.value.xy);
|
|
|
|
fragColor = vec4<f32>(f32(uBuffer.value.x) / 255.0, f32(uBuffer.value.y) / 255.0,
|
|
|
|
1.0, 1.0);
|
|
|
|
}
|
|
|
|
)";
|
2019-08-13 02:44:48 +00:00
|
|
|
|
2021-03-24 15:55:32 +00:00
|
|
|
wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, fs.str().c_str());
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2021-03-18 02:54:27 +00:00
|
|
|
utils::ComboRenderPipelineDescriptor2 pipelineDescriptor;
|
|
|
|
pipelineDescriptor.vertex.module = vsModule;
|
|
|
|
pipelineDescriptor.cFragment.module = fsModule;
|
|
|
|
pipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::RGBA8Unorm;
|
2019-08-13 02:44:48 +00:00
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::PipelineLayoutDescriptor pipelineLayoutDescriptor;
|
2019-08-13 02:44:48 +00:00
|
|
|
if (isInheritedPipeline) {
|
|
|
|
pipelineLayoutDescriptor.bindGroupLayoutCount = 2;
|
|
|
|
} else {
|
|
|
|
pipelineLayoutDescriptor.bindGroupLayoutCount = 1;
|
|
|
|
}
|
|
|
|
pipelineLayoutDescriptor.bindGroupLayouts = mBindGroupLayouts;
|
|
|
|
pipelineDescriptor.layout = device.CreatePipelineLayout(&pipelineLayoutDescriptor);
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2021-03-18 02:54:27 +00:00
|
|
|
return device.CreateRenderPipeline2(&pipelineDescriptor);
|
2019-05-23 00:45:12 +00:00
|
|
|
}
|
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::ComputePipeline CreateComputePipeline(bool isInheritedPipeline = false) {
|
2019-08-13 02:44:48 +00:00
|
|
|
// Construct compute shader source
|
|
|
|
std::ostringstream cs;
|
|
|
|
std::string multipleNumber = isInheritedPipeline ? "2" : "1";
|
|
|
|
cs << R"(
|
2021-01-12 22:11:14 +00:00
|
|
|
// TODO(crbug.com/tint/386): Use the same struct.
|
2020-12-17 19:41:57 +00:00
|
|
|
[[block]] struct Buffer1 {
|
2021-03-17 09:48:19 +00:00
|
|
|
value : vec2<u32>;
|
2019-08-13 02:44:48 +00:00
|
|
|
};
|
2020-12-17 19:41:57 +00:00
|
|
|
|
|
|
|
[[block]] struct Buffer2 {
|
2021-03-17 09:48:19 +00:00
|
|
|
value : vec2<u32>;
|
2020-12-17 19:41:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
[[block]] struct Buffer3 {
|
2021-03-17 09:48:19 +00:00
|
|
|
value : vec2<u32>;
|
2020-12-17 19:41:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
[[block]] struct Buffer4 {
|
2021-03-17 09:48:19 +00:00
|
|
|
value : vec2<u32>;
|
2019-08-13 02:44:48 +00:00
|
|
|
};
|
2020-12-17 19:41:57 +00:00
|
|
|
|
2021-01-19 14:18:51 +00:00
|
|
|
[[group(0), binding(0)]] var<uniform> uBufferNotDynamic : Buffer1;
|
2021-03-18 17:20:48 +00:00
|
|
|
[[group(0), binding(1)]] var<storage> sBufferNotDynamic : [[access(read_write)]] Buffer2;
|
2021-01-19 14:18:51 +00:00
|
|
|
[[group(0), binding(3)]] var<uniform> uBuffer : Buffer3;
|
2021-03-18 17:20:48 +00:00
|
|
|
[[group(0), binding(4)]] var<storage> sBuffer : [[access(read_write)]] Buffer4;
|
2019-08-13 02:44:48 +00:00
|
|
|
)";
|
|
|
|
|
|
|
|
if (isInheritedPipeline) {
|
|
|
|
cs << R"(
|
2020-12-17 19:41:57 +00:00
|
|
|
[[block]] struct Buffer5 {
|
2021-03-17 09:48:19 +00:00
|
|
|
value : vec2<u32>;
|
2019-07-31 01:29:42 +00:00
|
|
|
};
|
2020-12-17 19:41:57 +00:00
|
|
|
|
2021-01-19 14:18:51 +00:00
|
|
|
[[group(1), binding(0)]] var<uniform> paddingBlock : Buffer5;
|
2019-08-13 02:44:48 +00:00
|
|
|
)";
|
|
|
|
}
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2020-12-17 19:41:57 +00:00
|
|
|
cs << "const multipleNumber : u32 = " << multipleNumber << "u;\n";
|
|
|
|
cs << R"(
|
|
|
|
[[stage(compute)]] fn main() -> void {
|
|
|
|
sBufferNotDynamic.value = uBufferNotDynamic.value.xy;
|
|
|
|
sBuffer.value = vec2<u32>(multipleNumber, multipleNumber) * (uBuffer.value.xy + sBufferNotDynamic.value.xy);
|
|
|
|
}
|
|
|
|
)";
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2021-03-24 15:55:32 +00:00
|
|
|
wgpu::ShaderModule csModule = utils::CreateShaderModule(device, cs.str().c_str());
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::ComputePipelineDescriptor csDesc;
|
2019-09-05 09:41:17 +00:00
|
|
|
csDesc.computeStage.module = csModule;
|
|
|
|
csDesc.computeStage.entryPoint = "main";
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::PipelineLayoutDescriptor pipelineLayoutDescriptor;
|
2019-08-13 02:44:48 +00:00
|
|
|
if (isInheritedPipeline) {
|
|
|
|
pipelineLayoutDescriptor.bindGroupLayoutCount = 2;
|
|
|
|
} else {
|
|
|
|
pipelineLayoutDescriptor.bindGroupLayoutCount = 1;
|
|
|
|
}
|
|
|
|
pipelineLayoutDescriptor.bindGroupLayouts = mBindGroupLayouts;
|
|
|
|
csDesc.layout = device.CreatePipelineLayout(&pipelineLayoutDescriptor);
|
|
|
|
|
2019-05-23 00:45:12 +00:00
|
|
|
return device.CreateComputePipeline(&csDesc);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Dynamic offsets are all zero and no effect to result.
|
|
|
|
TEST_P(DynamicBufferOffsetTests, BasicRenderPipeline) {
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::RenderPipeline pipeline = CreateRenderPipeline();
|
2019-05-23 00:45:12 +00:00
|
|
|
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
|
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
2019-11-01 15:51:01 +00:00
|
|
|
std::array<uint32_t, 2> offsets = {0, 0};
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::RenderPassEncoder renderPassEncoder =
|
2019-05-23 00:45:12 +00:00
|
|
|
commandEncoder.BeginRenderPass(&renderPass.renderPassInfo);
|
|
|
|
renderPassEncoder.SetPipeline(pipeline);
|
2019-08-13 02:44:48 +00:00
|
|
|
renderPassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
|
2020-03-31 16:21:35 +00:00
|
|
|
renderPassEncoder.Draw(3);
|
2019-05-23 00:45:12 +00:00
|
|
|
renderPassEncoder.EndPass();
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandBuffer commands = commandEncoder.Finish();
|
2019-05-23 00:45:12 +00:00
|
|
|
queue.Submit(1, &commands);
|
|
|
|
|
2019-07-31 01:29:42 +00:00
|
|
|
std::vector<uint32_t> expectedData = {2, 4};
|
2019-05-23 00:45:12 +00:00
|
|
|
EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 255, 255), renderPass.color, 0, 0);
|
2019-08-13 02:44:48 +00:00
|
|
|
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1], 0, expectedData.size());
|
2019-05-23 00:45:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Have non-zero dynamic offsets.
|
|
|
|
TEST_P(DynamicBufferOffsetTests, SetDynamicOffestsRenderPipeline) {
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::RenderPipeline pipeline = CreateRenderPipeline();
|
2019-05-23 00:45:12 +00:00
|
|
|
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
|
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
2019-11-01 15:51:01 +00:00
|
|
|
std::array<uint32_t, 2> offsets = {kMinDynamicBufferOffsetAlignment,
|
2019-05-23 00:45:12 +00:00
|
|
|
kMinDynamicBufferOffsetAlignment};
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::RenderPassEncoder renderPassEncoder =
|
2019-05-23 00:45:12 +00:00
|
|
|
commandEncoder.BeginRenderPass(&renderPass.renderPassInfo);
|
|
|
|
renderPassEncoder.SetPipeline(pipeline);
|
2019-08-13 02:44:48 +00:00
|
|
|
renderPassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
|
2020-03-31 16:21:35 +00:00
|
|
|
renderPassEncoder.Draw(3);
|
2019-05-23 00:45:12 +00:00
|
|
|
renderPassEncoder.EndPass();
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandBuffer commands = commandEncoder.Finish();
|
2019-05-23 00:45:12 +00:00
|
|
|
queue.Submit(1, &commands);
|
|
|
|
|
2019-07-31 01:29:42 +00:00
|
|
|
std::vector<uint32_t> expectedData = {6, 8};
|
2019-05-23 00:45:12 +00:00
|
|
|
EXPECT_PIXEL_RGBA8_EQ(RGBA8(5, 6, 255, 255), renderPass.color, 0, 0);
|
2019-08-13 02:44:48 +00:00
|
|
|
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1],
|
2019-05-23 00:45:12 +00:00
|
|
|
kMinDynamicBufferOffsetAlignment, expectedData.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Dynamic offsets are all zero and no effect to result.
|
|
|
|
TEST_P(DynamicBufferOffsetTests, BasicComputePipeline) {
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::ComputePipeline pipeline = CreateComputePipeline();
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2019-11-01 15:51:01 +00:00
|
|
|
std::array<uint32_t, 2> offsets = {0, 0};
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
|
|
|
wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
|
2019-05-23 00:45:12 +00:00
|
|
|
computePassEncoder.SetPipeline(pipeline);
|
2019-08-13 02:44:48 +00:00
|
|
|
computePassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
|
2020-03-31 16:23:35 +00:00
|
|
|
computePassEncoder.Dispatch(1);
|
2019-05-23 00:45:12 +00:00
|
|
|
computePassEncoder.EndPass();
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandBuffer commands = commandEncoder.Finish();
|
2019-05-23 00:45:12 +00:00
|
|
|
queue.Submit(1, &commands);
|
|
|
|
|
2019-07-31 01:29:42 +00:00
|
|
|
std::vector<uint32_t> expectedData = {2, 4};
|
2019-08-13 02:44:48 +00:00
|
|
|
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1], 0, expectedData.size());
|
2019-05-23 00:45:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Have non-zero dynamic offsets.
|
|
|
|
TEST_P(DynamicBufferOffsetTests, SetDynamicOffestsComputePipeline) {
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::ComputePipeline pipeline = CreateComputePipeline();
|
2019-05-23 00:45:12 +00:00
|
|
|
|
2019-11-01 15:51:01 +00:00
|
|
|
std::array<uint32_t, 2> offsets = {kMinDynamicBufferOffsetAlignment,
|
2019-05-23 00:45:12 +00:00
|
|
|
kMinDynamicBufferOffsetAlignment};
|
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
|
|
|
wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
|
2019-05-23 00:45:12 +00:00
|
|
|
computePassEncoder.SetPipeline(pipeline);
|
2019-08-13 02:44:48 +00:00
|
|
|
computePassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
|
2020-03-31 16:23:35 +00:00
|
|
|
computePassEncoder.Dispatch(1);
|
2019-05-23 00:45:12 +00:00
|
|
|
computePassEncoder.EndPass();
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandBuffer commands = commandEncoder.Finish();
|
2019-05-23 00:45:12 +00:00
|
|
|
queue.Submit(1, &commands);
|
|
|
|
|
2019-07-31 01:29:42 +00:00
|
|
|
std::vector<uint32_t> expectedData = {6, 8};
|
2019-08-13 02:44:48 +00:00
|
|
|
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1],
|
2019-05-23 00:45:12 +00:00
|
|
|
kMinDynamicBufferOffsetAlignment, expectedData.size());
|
|
|
|
}
|
|
|
|
|
2019-08-13 02:44:48 +00:00
|
|
|
// Test inherit dynamic offsets on render pipeline
|
|
|
|
TEST_P(DynamicBufferOffsetTests, InheritDynamicOffestsRenderPipeline) {
|
|
|
|
// Using default pipeline and setting dynamic offsets
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::RenderPipeline pipeline = CreateRenderPipeline();
|
|
|
|
wgpu::RenderPipeline testPipeline = CreateRenderPipeline(true);
|
2019-08-13 02:44:48 +00:00
|
|
|
|
|
|
|
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
|
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
2019-11-01 15:51:01 +00:00
|
|
|
std::array<uint32_t, 2> offsets = {kMinDynamicBufferOffsetAlignment,
|
2019-08-13 02:44:48 +00:00
|
|
|
kMinDynamicBufferOffsetAlignment};
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::RenderPassEncoder renderPassEncoder =
|
2019-08-13 02:44:48 +00:00
|
|
|
commandEncoder.BeginRenderPass(&renderPass.renderPassInfo);
|
|
|
|
renderPassEncoder.SetPipeline(pipeline);
|
|
|
|
renderPassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
|
2020-03-31 16:21:35 +00:00
|
|
|
renderPassEncoder.Draw(3);
|
2019-08-13 02:44:48 +00:00
|
|
|
renderPassEncoder.SetPipeline(testPipeline);
|
2019-10-09 16:08:42 +00:00
|
|
|
renderPassEncoder.SetBindGroup(1, mBindGroups[1]);
|
2020-03-31 16:21:35 +00:00
|
|
|
renderPassEncoder.Draw(3);
|
2019-08-13 02:44:48 +00:00
|
|
|
renderPassEncoder.EndPass();
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandBuffer commands = commandEncoder.Finish();
|
2019-08-13 02:44:48 +00:00
|
|
|
queue.Submit(1, &commands);
|
|
|
|
|
|
|
|
std::vector<uint32_t> expectedData = {12, 16};
|
|
|
|
EXPECT_PIXEL_RGBA8_EQ(RGBA8(5, 6, 255, 255), renderPass.color, 0, 0);
|
|
|
|
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1],
|
|
|
|
kMinDynamicBufferOffsetAlignment, expectedData.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test inherit dynamic offsets on compute pipeline
|
|
|
|
// TODO(shaobo.yan@intel.com) : Try this test on GTX1080 and cannot reproduce the failure.
|
|
|
|
// Suspect it is due to dawn doesn't handle sync between two dispatch and disable this case.
|
|
|
|
// Will double check root cause after got GTX1660.
|
|
|
|
TEST_P(DynamicBufferOffsetTests, InheritDynamicOffestsComputePipeline) {
|
2019-08-13 15:47:59 +00:00
|
|
|
DAWN_SKIP_TEST_IF(IsWindows());
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::ComputePipeline pipeline = CreateComputePipeline();
|
|
|
|
wgpu::ComputePipeline testPipeline = CreateComputePipeline(true);
|
2019-08-13 02:44:48 +00:00
|
|
|
|
2019-11-01 15:51:01 +00:00
|
|
|
std::array<uint32_t, 2> offsets = {kMinDynamicBufferOffsetAlignment,
|
2019-08-13 02:44:48 +00:00
|
|
|
kMinDynamicBufferOffsetAlignment};
|
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
|
|
|
wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
|
2019-08-13 02:44:48 +00:00
|
|
|
computePassEncoder.SetPipeline(pipeline);
|
|
|
|
computePassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
|
2020-03-31 16:23:35 +00:00
|
|
|
computePassEncoder.Dispatch(1);
|
2019-08-13 02:44:48 +00:00
|
|
|
computePassEncoder.SetPipeline(testPipeline);
|
2019-10-09 16:08:42 +00:00
|
|
|
computePassEncoder.SetBindGroup(1, mBindGroups[1]);
|
2020-03-31 16:23:35 +00:00
|
|
|
computePassEncoder.Dispatch(1);
|
2019-08-13 02:44:48 +00:00
|
|
|
computePassEncoder.EndPass();
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandBuffer commands = commandEncoder.Finish();
|
2019-08-13 02:44:48 +00:00
|
|
|
queue.Submit(1, &commands);
|
|
|
|
|
|
|
|
std::vector<uint32_t> expectedData = {12, 16};
|
|
|
|
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1],
|
|
|
|
kMinDynamicBufferOffsetAlignment, expectedData.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setting multiple dynamic offsets for the same bindgroup in one render pass.
|
|
|
|
TEST_P(DynamicBufferOffsetTests, UpdateDynamicOffestsMultipleTimesRenderPipeline) {
|
|
|
|
// Using default pipeline and setting dynamic offsets
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::RenderPipeline pipeline = CreateRenderPipeline();
|
2019-08-13 02:44:48 +00:00
|
|
|
|
|
|
|
utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
|
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
2019-11-01 15:51:01 +00:00
|
|
|
std::array<uint32_t, 2> offsets = {kMinDynamicBufferOffsetAlignment,
|
2019-08-13 02:44:48 +00:00
|
|
|
kMinDynamicBufferOffsetAlignment};
|
2019-11-01 15:51:01 +00:00
|
|
|
std::array<uint32_t, 2> testOffsets = {0, 0};
|
2019-08-13 02:44:48 +00:00
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::RenderPassEncoder renderPassEncoder =
|
2019-08-13 02:44:48 +00:00
|
|
|
commandEncoder.BeginRenderPass(&renderPass.renderPassInfo);
|
|
|
|
renderPassEncoder.SetPipeline(pipeline);
|
|
|
|
renderPassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
|
2020-03-31 16:21:35 +00:00
|
|
|
renderPassEncoder.Draw(3);
|
2019-08-13 02:44:48 +00:00
|
|
|
renderPassEncoder.SetBindGroup(0, mBindGroups[0], testOffsets.size(), testOffsets.data());
|
2020-03-31 16:21:35 +00:00
|
|
|
renderPassEncoder.Draw(3);
|
2019-08-13 02:44:48 +00:00
|
|
|
renderPassEncoder.EndPass();
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandBuffer commands = commandEncoder.Finish();
|
2019-08-13 02:44:48 +00:00
|
|
|
queue.Submit(1, &commands);
|
|
|
|
|
|
|
|
std::vector<uint32_t> expectedData = {2, 4};
|
|
|
|
EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 255, 255), renderPass.color, 0, 0);
|
|
|
|
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1], 0, expectedData.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setting multiple dynamic offsets for the same bindgroup in one compute pass.
|
2019-10-23 16:34:42 +00:00
|
|
|
TEST_P(DynamicBufferOffsetTests, UpdateDynamicOffsetsMultipleTimesComputePipeline) {
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::ComputePipeline pipeline = CreateComputePipeline();
|
2019-08-13 02:44:48 +00:00
|
|
|
|
2019-11-01 15:51:01 +00:00
|
|
|
std::array<uint32_t, 2> offsets = {kMinDynamicBufferOffsetAlignment,
|
2019-08-13 02:44:48 +00:00
|
|
|
kMinDynamicBufferOffsetAlignment};
|
2019-11-01 15:51:01 +00:00
|
|
|
std::array<uint32_t, 2> testOffsets = {0, 0};
|
2019-08-13 02:44:48 +00:00
|
|
|
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
|
|
|
wgpu::ComputePassEncoder computePassEncoder = commandEncoder.BeginComputePass();
|
2019-08-13 02:44:48 +00:00
|
|
|
computePassEncoder.SetPipeline(pipeline);
|
|
|
|
computePassEncoder.SetBindGroup(0, mBindGroups[0], offsets.size(), offsets.data());
|
2020-03-31 16:23:35 +00:00
|
|
|
computePassEncoder.Dispatch(1);
|
2019-08-13 02:44:48 +00:00
|
|
|
computePassEncoder.SetBindGroup(0, mBindGroups[0], testOffsets.size(), testOffsets.data());
|
2020-03-31 16:23:35 +00:00
|
|
|
computePassEncoder.Dispatch(1);
|
2019-08-13 02:44:48 +00:00
|
|
|
computePassEncoder.EndPass();
|
2019-10-28 13:27:36 +00:00
|
|
|
wgpu::CommandBuffer commands = commandEncoder.Finish();
|
2019-08-13 02:44:48 +00:00
|
|
|
queue.Submit(1, &commands);
|
|
|
|
|
|
|
|
std::vector<uint32_t> expectedData = {2, 4};
|
|
|
|
EXPECT_BUFFER_U32_RANGE_EQ(expectedData.data(), mStorageBuffers[1], 0, expectedData.size());
|
|
|
|
}
|
|
|
|
|
2019-07-31 01:29:42 +00:00
|
|
|
DAWN_INSTANTIATE_TEST(DynamicBufferOffsetTests,
|
2020-02-25 16:23:17 +00:00
|
|
|
D3D12Backend(),
|
|
|
|
MetalBackend(),
|
|
|
|
OpenGLBackend(),
|
2020-12-01 21:52:37 +00:00
|
|
|
OpenGLESBackend(),
|
2020-02-25 16:23:17 +00:00
|
|
|
VulkanBackend());
|