mirror of
https://github.com/encounter/dawn-cmake.git
synced 2025-12-19 01:46:35 +00:00
Support 3D textures in ClearTexture
Also fix and enable testing for texture initialization on OpenGLES. This CL also factors code so that opengl::Texture::ClearTexture can use the DoTexSubImage helper. Note: Clearing of compressed textures on GL still unimplemented. Bug: dawn:780 Change-Id: I5c1268ee570f2d4347d365465700dd416fbf5619 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/50121 Commit-Queue: Austin Eng <enga@chromium.org> Reviewed-by: Stephen White <senorblanco@chromium.org>
This commit is contained in:
committed by
Commit Bot service account
parent
8507f7e33a
commit
fd783ce627
@@ -989,6 +989,8 @@ TEST_P(CopyTests_T2B, Texture3DFull) {
|
||||
|
||||
// Test that copying a range of texture 3D depths in one texture-to-buffer-copy works.
|
||||
TEST_P(CopyTests_T2B, Texture3DSubRegion) {
|
||||
DAWN_SKIP_TEST_IF(IsANGLE()); // TODO(crbug.com/angleproject/5967)
|
||||
|
||||
constexpr uint32_t kWidth = 256;
|
||||
constexpr uint32_t kHeight = 128;
|
||||
constexpr uint32_t kDepth = 6u;
|
||||
@@ -1458,6 +1460,8 @@ TEST_P(CopyTests_B2T, Texture3DFull) {
|
||||
|
||||
// Test that copying a range of texture 3D Depths in one texture-to-buffer-copy works.
|
||||
TEST_P(CopyTests_B2T, Texture3DSubRegion) {
|
||||
DAWN_SKIP_TEST_IF(IsANGLE()); // TODO(crbug.com/angleproject/5967)
|
||||
|
||||
constexpr uint32_t kWidth = 256;
|
||||
constexpr uint32_t kHeight = 128;
|
||||
constexpr uint32_t kDepth = 6u;
|
||||
@@ -1834,6 +1838,7 @@ TEST_P(CopyTests_T2T, Texture2DArrayTo3DFull) {
|
||||
// Test that copying subregion of a 3D texture in one texture-to-texture-copy works.
|
||||
TEST_P(CopyTests_T2T, Texture3DSubRegion) {
|
||||
DAWN_SKIP_TEST_IF(IsD3D12()); // TODO(crbug.com/dawn/547): Implement on D3D12.
|
||||
DAWN_SKIP_TEST_IF(IsANGLE()); // TODO(crbug.com/angleproject/5967)
|
||||
|
||||
constexpr uint32_t kWidth = 256;
|
||||
constexpr uint32_t kHeight = 128;
|
||||
@@ -1865,6 +1870,7 @@ TEST_P(CopyTests_T2T, Texture3DTo2DArraySubRegion) {
|
||||
// works.
|
||||
TEST_P(CopyTests_T2T, Texture2DArrayTo3DSubRegion) {
|
||||
DAWN_SKIP_TEST_IF(IsD3D12()); // TODO(crbug.com/dawn/547): Implement on D3D12.
|
||||
DAWN_SKIP_TEST_IF(IsANGLE()); // TODO(crbug.com/angleproject/5967)
|
||||
|
||||
constexpr uint32_t kWidth = 256;
|
||||
constexpr uint32_t kHeight = 128;
|
||||
|
||||
@@ -20,11 +20,8 @@
|
||||
|
||||
class NonzeroTextureCreationTests : public DawnTest {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
DawnTest::SetUp();
|
||||
}
|
||||
|
||||
constexpr static uint32_t kSize = 128;
|
||||
constexpr static uint32_t kDepthOrArrayLayers = 7;
|
||||
};
|
||||
|
||||
// Test that texture clears 0xFF because toggle is enabled.
|
||||
@@ -38,16 +35,51 @@ TEST_P(NonzeroTextureCreationTests, TextureCreationClears) {
|
||||
descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
descriptor.mipLevelCount = 1;
|
||||
descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
|
||||
|
||||
// 2D
|
||||
{
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
std::vector<RGBA8> expected(kSize * kSize, RGBA8(255, 255, 255, 255));
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0}, {kSize, kSize});
|
||||
}
|
||||
|
||||
// 2D Array
|
||||
{
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
std::vector<RGBA8> expected(kSize * kSize * kDepthOrArrayLayers, RGBA8(255, 255, 255, 255));
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, 0}, {kSize, kSize, kDepthOrArrayLayers});
|
||||
}
|
||||
}
|
||||
|
||||
// Test that 3D texture clears to nonzero because toggle is enabled.
|
||||
TEST_P(NonzeroTextureCreationTests, Texture3DCreationClears) {
|
||||
// TODO(crbug.com/dawn/547): 3D texture copies not fully implemented on D3D12.
|
||||
// TODO(crbug.com/angleproject/5967): This texture readback hits an assert in ANGLE.
|
||||
DAWN_SKIP_TEST_IF(IsANGLE() || IsD3D12());
|
||||
|
||||
wgpu::TextureDescriptor descriptor;
|
||||
descriptor.dimension = wgpu::TextureDimension::e3D;
|
||||
descriptor.size.width = kSize;
|
||||
descriptor.size.height = kSize;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
descriptor.sampleCount = 1;
|
||||
descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
descriptor.mipLevelCount = 1;
|
||||
descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
RGBA8 filled(255, 255, 255, 255);
|
||||
EXPECT_PIXEL_RGBA8_EQ(filled, texture, 0, 0);
|
||||
std::vector<RGBA8> expected(kSize * kSize * kDepthOrArrayLayers, RGBA8(255, 255, 255, 255));
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, 0}, {kSize, kSize, kDepthOrArrayLayers});
|
||||
}
|
||||
|
||||
// Test that a depth texture clears 0xFF because toggle is enabled.
|
||||
TEST_P(NonzeroTextureCreationTests, Depth32TextureCreationDepthClears) {
|
||||
// Copies from depth textures not supported on the OpenGL backend right now.
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL());
|
||||
// Copies from depth textures not fully supported on the OpenGL backend right now.
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsOpenGLES());
|
||||
|
||||
wgpu::TextureDescriptor descriptor;
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
@@ -62,9 +94,22 @@ TEST_P(NonzeroTextureCreationTests, Depth32TextureCreationDepthClears) {
|
||||
// We can only really test Depth32Float here because Depth24Plus(Stencil8)? may be in an unknown
|
||||
// format.
|
||||
// TODO(crbug.com/dawn/145): Test other formats via sampling.
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
std::vector<float> expected(kSize * kSize, 1.f);
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0}, {kSize, kSize});
|
||||
|
||||
// 2D
|
||||
{
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
std::vector<float> expected(kSize * kSize, 1.f);
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0}, {kSize, kSize});
|
||||
}
|
||||
|
||||
// 2D Array
|
||||
{
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
std::vector<float> expected(kSize * kSize * kDepthOrArrayLayers, 1.f);
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, 0}, {kSize, kSize, kDepthOrArrayLayers});
|
||||
}
|
||||
}
|
||||
|
||||
// Test that non-zero mip level clears 0xFF because toggle is enabled.
|
||||
@@ -80,39 +125,37 @@ TEST_P(NonzeroTextureCreationTests, MipMapClears) {
|
||||
descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
descriptor.mipLevelCount = mipLevels;
|
||||
descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
std::vector<RGBA8> expected;
|
||||
RGBA8 filled(255, 255, 255, 255);
|
||||
for (uint32_t i = 0; i < kSize * kSize; ++i) {
|
||||
expected.push_back(filled);
|
||||
}
|
||||
uint32_t mipSize = kSize >> 2;
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0}, {mipSize, mipSize}, 2);
|
||||
}
|
||||
|
||||
// Test that non-zero array layers clears 0xFF because toggle is enabled.
|
||||
TEST_P(NonzeroTextureCreationTests, ArrayLayerClears) {
|
||||
constexpr uint32_t arrayLayers = 4;
|
||||
|
||||
wgpu::TextureDescriptor descriptor;
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
descriptor.size.width = kSize;
|
||||
descriptor.size.height = kSize;
|
||||
descriptor.size.depthOrArrayLayers = arrayLayers;
|
||||
descriptor.sampleCount = 1;
|
||||
descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
descriptor.mipLevelCount = 1;
|
||||
descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
std::vector<RGBA8> expected;
|
||||
RGBA8 filled(255, 255, 255, 255);
|
||||
for (uint32_t i = 0; i < kSize * kSize; ++i) {
|
||||
expected.push_back(filled);
|
||||
// 2D
|
||||
{
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
uint32_t mipSize = kSize >> 2;
|
||||
std::vector<RGBA8> expected(mipSize * mipSize, RGBA8(255, 255, 255, 255));
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0}, {mipSize, mipSize}, 2);
|
||||
}
|
||||
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, 2}, {kSize, kSize});
|
||||
// 2D Array
|
||||
{
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
uint32_t mipSize = kSize >> 2;
|
||||
std::vector<RGBA8> expected(mipSize * mipSize * kDepthOrArrayLayers,
|
||||
RGBA8(255, 255, 255, 255));
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, 0},
|
||||
{mipSize, mipSize, kDepthOrArrayLayers}, 2);
|
||||
}
|
||||
|
||||
// 3D
|
||||
{
|
||||
descriptor.dimension = wgpu::TextureDimension::e3D;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
uint32_t mipSize = kSize >> 2;
|
||||
uint32_t mipDepth = kDepthOrArrayLayers >> 2;
|
||||
std::vector<RGBA8> expected(mipSize * mipSize * mipDepth, RGBA8(255, 255, 255, 255));
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, 0}, {mipSize, mipSize, mipDepth}, 2);
|
||||
}
|
||||
}
|
||||
|
||||
// Test that nonrenderable texture formats clear 0x01 because toggle is enabled
|
||||
@@ -130,167 +173,87 @@ TEST_P(NonzeroTextureCreationTests, NonrenderableTextureFormat) {
|
||||
descriptor.format = wgpu::TextureFormat::RGBA8Snorm;
|
||||
descriptor.mipLevelCount = 1;
|
||||
descriptor.usage = wgpu::TextureUsage::CopySrc;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
// Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
|
||||
uint32_t bufferSize = 4 * kSize * kSize;
|
||||
std::vector<uint8_t> data(bufferSize, 100);
|
||||
wgpu::Buffer bufferDst = utils::CreateBufferFromData(
|
||||
device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
|
||||
|
||||
wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(bufferDst, 0, kSize * 4);
|
||||
wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
|
||||
wgpu::Extent3D copySize = {kSize, kSize, 1};
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, ©Size);
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
uint32_t expectedBytes = IsVulkan() ? 0x7F7F7F7F : 0x01010101;
|
||||
std::vector<uint32_t> expected(bufferSize, expectedBytes);
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), bufferDst, 0, 8);
|
||||
}
|
||||
|
||||
// Test that textures with more than 1 array layers and nonrenderable texture formats clear to 0x01
|
||||
// because toggle is enabled
|
||||
TEST_P(NonzeroTextureCreationTests, NonRenderableTextureClearWithMultiArrayLayers) {
|
||||
// TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support reading
|
||||
// from Snorm textures.
|
||||
DAWN_SKIP_TEST_IF(HasToggleEnabled("disable_snorm_read"));
|
||||
|
||||
wgpu::TextureDescriptor descriptor;
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
descriptor.size.width = kSize;
|
||||
descriptor.size.height = kSize;
|
||||
descriptor.size.depthOrArrayLayers = 2;
|
||||
descriptor.sampleCount = 1;
|
||||
descriptor.format = wgpu::TextureFormat::RGBA8Snorm;
|
||||
descriptor.mipLevelCount = 1;
|
||||
descriptor.usage = wgpu::TextureUsage::CopySrc;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
// Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
|
||||
uint32_t bufferSize = 4 * kSize * kSize;
|
||||
std::vector<uint8_t> data(bufferSize, 100);
|
||||
wgpu::Buffer bufferDst = utils::CreateBufferFromData(
|
||||
device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
|
||||
|
||||
wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(bufferDst, 0, kSize * 4);
|
||||
wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 1});
|
||||
wgpu::Extent3D copySize = {kSize, kSize, 1};
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, ©Size);
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
uint32_t expectedBytes = IsVulkan() ? 0x7F7F7F7F : 0x01010101;
|
||||
std::vector<uint32_t> expected(bufferSize, expectedBytes);
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), bufferDst, 0, 8);
|
||||
}
|
||||
|
||||
// Test that all subresources of a renderable texture are filled because the toggle is enabled.
|
||||
TEST_P(NonzeroTextureCreationTests, AllSubresourcesFilled) {
|
||||
wgpu::TextureDescriptor baseDescriptor;
|
||||
baseDescriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
baseDescriptor.size.width = kSize;
|
||||
baseDescriptor.size.height = kSize;
|
||||
baseDescriptor.size.depthOrArrayLayers = 1;
|
||||
baseDescriptor.sampleCount = 1;
|
||||
baseDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
baseDescriptor.mipLevelCount = 1;
|
||||
baseDescriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
|
||||
|
||||
RGBA8 filled(255, 255, 255, 255);
|
||||
|
||||
// 2D
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = baseDescriptor;
|
||||
// Some textures may be cleared with render pass load/store ops.
|
||||
// Test above the max attachment count.
|
||||
descriptor.size.depthOrArrayLayers = kMaxColorAttachments + 1;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor.size.depthOrArrayLayers; ++i) {
|
||||
EXPECT_TEXTURE_EQ(&filled, texture, {0, 0, i}, {1, 1}, 0);
|
||||
}
|
||||
// Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
|
||||
uint32_t bufferSize = kSize * kSize;
|
||||
std::vector<uint8_t> data(sizeof(uint32_t) * bufferSize, 100);
|
||||
wgpu::Buffer bufferDst = utils::CreateBufferFromData(
|
||||
device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
|
||||
|
||||
wgpu::ImageCopyBuffer imageCopyBuffer =
|
||||
utils::CreateImageCopyBuffer(bufferDst, 0, kSize * 4);
|
||||
wgpu::ImageCopyTexture imageCopyTexture =
|
||||
utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
|
||||
wgpu::Extent3D copySize = {kSize, kSize, 1};
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, ©Size);
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
uint32_t expectedBytes = IsVulkan() ? 0x7F7F7F7F : 0x01010101;
|
||||
std::vector<uint32_t> expected(bufferSize, expectedBytes);
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), bufferDst, 0, expected.size());
|
||||
}
|
||||
|
||||
// 2D Array
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = baseDescriptor;
|
||||
descriptor.mipLevelCount = 3;
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor.mipLevelCount; ++i) {
|
||||
EXPECT_TEXTURE_EQ(&filled, texture, {0, 0}, {1, 1}, i);
|
||||
}
|
||||
// Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
|
||||
uint32_t bufferSize = kSize * kSize * kDepthOrArrayLayers;
|
||||
std::vector<uint8_t> data(sizeof(uint32_t) * bufferSize, 100);
|
||||
wgpu::Buffer bufferDst = utils::CreateBufferFromData(
|
||||
device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
|
||||
|
||||
wgpu::ImageCopyBuffer imageCopyBuffer =
|
||||
utils::CreateImageCopyBuffer(bufferDst, 0, kSize * 4, kSize);
|
||||
wgpu::ImageCopyTexture imageCopyTexture =
|
||||
utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
|
||||
wgpu::Extent3D copySize = {kSize, kSize, kDepthOrArrayLayers};
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, ©Size);
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
uint32_t expectedBytes = IsVulkan() ? 0x7F7F7F7F : 0x01010101;
|
||||
std::vector<uint32_t> expected(bufferSize, expectedBytes);
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), bufferDst, 0, expected.size());
|
||||
}
|
||||
|
||||
// 3D
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = baseDescriptor;
|
||||
// Some textures may be cleared with render pass load/store ops.
|
||||
// Test above the max attachment count.
|
||||
descriptor.size.depthOrArrayLayers = kMaxColorAttachments + 1;
|
||||
descriptor.mipLevelCount = 3;
|
||||
descriptor.dimension = wgpu::TextureDimension::e3D;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor.size.depthOrArrayLayers; ++i) {
|
||||
for (uint32_t j = 0; j < descriptor.mipLevelCount; ++j) {
|
||||
EXPECT_TEXTURE_EQ(&filled, texture, {0, 0, i}, {1, 1}, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
|
||||
uint32_t bufferSize = kSize * kSize * kDepthOrArrayLayers;
|
||||
std::vector<uint8_t> data(sizeof(uint32_t) * bufferSize, 100);
|
||||
wgpu::Buffer bufferDst = utils::CreateBufferFromData(
|
||||
device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
|
||||
|
||||
// Test that all subresources of a nonrenderable texture are filled because the toggle is enabled.
|
||||
TEST_P(NonzeroTextureCreationTests, NonRenderableAllSubresourcesFilled) {
|
||||
wgpu::TextureDescriptor baseDescriptor;
|
||||
baseDescriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
baseDescriptor.size.width = kSize;
|
||||
baseDescriptor.size.height = kSize;
|
||||
baseDescriptor.size.depthOrArrayLayers = 1;
|
||||
baseDescriptor.sampleCount = 1;
|
||||
baseDescriptor.format = wgpu::TextureFormat::RGBA8Snorm;
|
||||
baseDescriptor.mipLevelCount = 1;
|
||||
baseDescriptor.usage = wgpu::TextureUsage::CopySrc;
|
||||
wgpu::ImageCopyBuffer imageCopyBuffer =
|
||||
utils::CreateImageCopyBuffer(bufferDst, 0, kSize * 4, kSize);
|
||||
wgpu::ImageCopyTexture imageCopyTexture =
|
||||
utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
|
||||
wgpu::Extent3D copySize = {kSize, kSize, kDepthOrArrayLayers};
|
||||
|
||||
RGBA8 filled = IsVulkan() ? RGBA8(127, 127, 127, 127) : RGBA8(1, 1, 1, 1);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, ©Size);
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = baseDescriptor;
|
||||
// Some textures may be cleared with render pass load/store ops.
|
||||
// Test above the max attachment count.
|
||||
descriptor.size.depthOrArrayLayers = kMaxColorAttachments + 1;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor.size.depthOrArrayLayers; ++i) {
|
||||
EXPECT_TEXTURE_EQ(&filled, texture, {0, 0, i}, {1, 1}, 0);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = baseDescriptor;
|
||||
descriptor.mipLevelCount = 3;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor.mipLevelCount; ++i) {
|
||||
EXPECT_TEXTURE_EQ(&filled, texture, {0, 0}, {1, 1}, i);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = baseDescriptor;
|
||||
// Some textures may be cleared with render pass load/store ops.
|
||||
// Test above the max attachment count.
|
||||
descriptor.size.depthOrArrayLayers = kMaxColorAttachments + 1;
|
||||
descriptor.mipLevelCount = 3;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor.size.depthOrArrayLayers; ++i) {
|
||||
for (uint32_t j = 0; j < descriptor.mipLevelCount; ++j) {
|
||||
EXPECT_TEXTURE_EQ(&filled, texture, {0, 0, i}, {1, 1}, j);
|
||||
}
|
||||
}
|
||||
uint32_t expectedBytes = IsVulkan() ? 0x7F7F7F7F : 0x01010101;
|
||||
std::vector<uint32_t> expected(bufferSize, expectedBytes);
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), bufferDst, 0, expected.size());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -301,5 +264,7 @@ DAWN_INSTANTIATE_TEST(NonzeroTextureCreationTests,
|
||||
{"lazy_clear_resource_on_first_use"}),
|
||||
OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"},
|
||||
{"lazy_clear_resource_on_first_use"}),
|
||||
OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"},
|
||||
{"lazy_clear_resource_on_first_use"}),
|
||||
VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"},
|
||||
{"lazy_clear_resource_on_first_use"}));
|
||||
|
||||
Reference in New Issue
Block a user