Support 3D textures in ClearTexture
Also fix and enable testing for texture initialization on OpenGLES. This CL also factors code so that opengl::Texture::ClearTexture can use the DoTexSubImage helper. Note: Clearing of compressed textures on GL still unimplemented. Bug: dawn:780 Change-Id: I5c1268ee570f2d4347d365465700dd416fbf5619 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/50121 Commit-Queue: Austin Eng <enga@chromium.org> Reviewed-by: Stephen White <senorblanco@chromium.org>
This commit is contained in:
parent
8507f7e33a
commit
fd783ce627
|
@ -807,27 +807,40 @@ namespace dawn_native { namespace d3d12 {
|
|||
D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(uint32_t mipLevel,
|
||||
uint32_t baseArrayLayer,
|
||||
uint32_t layerCount) const {
|
||||
ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
|
||||
D3D12_RENDER_TARGET_VIEW_DESC rtvDesc;
|
||||
rtvDesc.Format = GetD3D12Format();
|
||||
if (IsMultisampledTexture()) {
|
||||
ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
|
||||
ASSERT(GetNumMipLevels() == 1);
|
||||
ASSERT(layerCount == 1);
|
||||
ASSERT(baseArrayLayer == 0);
|
||||
ASSERT(mipLevel == 0);
|
||||
rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DMS;
|
||||
} else {
|
||||
// Currently we always use D3D12_TEX2D_ARRAY_RTV because we cannot specify base array
|
||||
// layer and layer count in D3D12_TEX2D_RTV. For 2D texture views, we treat them as
|
||||
// 1-layer 2D array textures. (Just like how we treat SRVs)
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_rtv
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array
|
||||
// _rtv
|
||||
rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
|
||||
rtvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
|
||||
rtvDesc.Texture2DArray.ArraySize = layerCount;
|
||||
rtvDesc.Texture2DArray.MipSlice = mipLevel;
|
||||
rtvDesc.Texture2DArray.PlaneSlice = 0;
|
||||
return rtvDesc;
|
||||
}
|
||||
switch (GetDimension()) {
|
||||
case wgpu::TextureDimension::e2D:
|
||||
// Currently we always use D3D12_TEX2D_ARRAY_RTV because we cannot specify base
|
||||
// array layer and layer count in D3D12_TEX2D_RTV. For 2D texture views, we treat
|
||||
// them as 1-layer 2D array textures. (Just like how we treat SRVs)
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_rtv
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array
|
||||
// _rtv
|
||||
rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
|
||||
rtvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
|
||||
rtvDesc.Texture2DArray.ArraySize = layerCount;
|
||||
rtvDesc.Texture2DArray.MipSlice = mipLevel;
|
||||
rtvDesc.Texture2DArray.PlaneSlice = 0;
|
||||
break;
|
||||
case wgpu::TextureDimension::e3D:
|
||||
rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE3D;
|
||||
rtvDesc.Texture3D.MipSlice = mipLevel;
|
||||
rtvDesc.Texture3D.FirstWSlice = baseArrayLayer;
|
||||
rtvDesc.Texture3D.WSize = layerCount;
|
||||
break;
|
||||
case wgpu::TextureDimension::e1D:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
return rtvDesc;
|
||||
}
|
||||
|
@ -951,9 +964,13 @@ namespace dawn_native { namespace d3d12 {
|
|||
for (Aspect aspect : IterateEnumMask(range.aspects)) {
|
||||
const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
|
||||
|
||||
uint32_t bytesPerRow = Align((GetWidth() / blockInfo.width) * blockInfo.byteSize,
|
||||
kTextureBytesPerRowAlignment);
|
||||
uint64_t bufferSize = bytesPerRow * (GetHeight() / blockInfo.height);
|
||||
Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
|
||||
|
||||
uint32_t bytesPerRow =
|
||||
Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
|
||||
kTextureBytesPerRowAlignment);
|
||||
uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
|
||||
largestMipSize.depthOrArrayLayers;
|
||||
DynamicUploader* uploader = device->GetDynamicUploader();
|
||||
UploadHandle uploadHandle;
|
||||
DAWN_TRY_ASSIGN(uploadHandle,
|
||||
|
@ -966,7 +983,7 @@ namespace dawn_native { namespace d3d12 {
|
|||
// compute d3d12 texture copy locations for texture and buffer
|
||||
Extent3D copySize = GetMipLevelPhysicalSize(level);
|
||||
|
||||
uint32_t rowsPerImage = GetHeight() / blockInfo.height;
|
||||
uint32_t rowsPerImage = copySize.height / blockInfo.height;
|
||||
TextureCopySubresource copySplit = ComputeTextureCopySubresource(
|
||||
{0, 0, 0}, copySize, blockInfo, uploadHandle.startOffset, bytesPerRow,
|
||||
rowsPerImage);
|
||||
|
|
|
@ -445,6 +445,7 @@ namespace dawn_native { namespace metal {
|
|||
continue;
|
||||
}
|
||||
|
||||
ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
|
||||
switch (aspect) {
|
||||
case Aspect::Depth:
|
||||
descriptor.depthAttachment.texture = GetMTLTexture();
|
||||
|
@ -482,6 +483,8 @@ namespace dawn_native { namespace metal {
|
|||
NSRef<MTLRenderPassDescriptor> descriptor;
|
||||
uint32_t attachment = 0;
|
||||
|
||||
uint32_t numZSlices = GetMipLevelVirtualSize(level).depthOrArrayLayers;
|
||||
|
||||
for (uint32_t arrayLayer = range.baseArrayLayer;
|
||||
arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
|
||||
if (clearValue == TextureBase::ClearValue::Zero &&
|
||||
|
@ -491,28 +494,33 @@ namespace dawn_native { namespace metal {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (descriptor == nullptr) {
|
||||
// Note that this creates a descriptor that's autoreleased so we don't
|
||||
// use AcquireNSRef
|
||||
descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
|
||||
}
|
||||
for (uint32_t z = 0; z < numZSlices; ++z) {
|
||||
if (descriptor == nullptr) {
|
||||
// Note that this creates a descriptor that's autoreleased so we
|
||||
// don't use AcquireNSRef
|
||||
descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
|
||||
}
|
||||
|
||||
[*descriptor colorAttachments][attachment].texture = GetMTLTexture();
|
||||
[*descriptor colorAttachments][attachment].loadAction = MTLLoadActionClear;
|
||||
[*descriptor colorAttachments][attachment].storeAction =
|
||||
MTLStoreActionStore;
|
||||
[*descriptor colorAttachments][attachment].clearColor =
|
||||
MTLClearColorMake(dClearColor, dClearColor, dClearColor, dClearColor);
|
||||
[*descriptor colorAttachments][attachment].level = level;
|
||||
[*descriptor colorAttachments][attachment].slice = arrayLayer;
|
||||
[*descriptor colorAttachments][attachment].texture = GetMTLTexture();
|
||||
[*descriptor colorAttachments][attachment].loadAction =
|
||||
MTLLoadActionClear;
|
||||
[*descriptor colorAttachments][attachment].storeAction =
|
||||
MTLStoreActionStore;
|
||||
[*descriptor colorAttachments][attachment].clearColor =
|
||||
MTLClearColorMake(dClearColor, dClearColor, dClearColor,
|
||||
dClearColor);
|
||||
[*descriptor colorAttachments][attachment].level = level;
|
||||
[*descriptor colorAttachments][attachment].slice = arrayLayer;
|
||||
[*descriptor colorAttachments][attachment].depthPlane = z;
|
||||
|
||||
attachment++;
|
||||
attachment++;
|
||||
|
||||
if (attachment == kMaxColorAttachments) {
|
||||
attachment = 0;
|
||||
commandContext->BeginRender(descriptor.Get());
|
||||
commandContext->EndRender();
|
||||
descriptor = nullptr;
|
||||
if (attachment == kMaxColorAttachments) {
|
||||
attachment = 0;
|
||||
commandContext->BeginRender(descriptor.Get());
|
||||
commandContext->EndRender();
|
||||
descriptor = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -538,9 +546,7 @@ namespace dawn_native { namespace metal {
|
|||
(largestMipSize.height / blockInfo.height),
|
||||
512llu);
|
||||
|
||||
// TODO(enga): Multiply by largestMipSize.depthOrArrayLayers and do a larger 3D copy to
|
||||
// clear a whole range of subresources when tracking that is improved.
|
||||
uint64_t bufferSize = largestMipBytesPerImage * 1;
|
||||
uint64_t bufferSize = largestMipBytesPerImage * largestMipSize.depthOrArrayLayers;
|
||||
|
||||
if (bufferSize > std::numeric_limits<NSUInteger>::max()) {
|
||||
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
|
||||
|
@ -577,7 +583,7 @@ namespace dawn_native { namespace metal {
|
|||
sourceBytesPerRow:largestMipBytesPerRow
|
||||
sourceBytesPerImage:largestMipBytesPerImage
|
||||
sourceSize:MTLSizeMake(virtualSize.width, virtualSize.height,
|
||||
1)
|
||||
virtualSize.depthOrArrayLayers)
|
||||
toTexture:GetMTLTexture()
|
||||
destinationSlice:arrayLayer
|
||||
destinationLevel:level
|
||||
|
|
|
@ -617,6 +617,13 @@ namespace dawn_native { namespace opengl {
|
|||
ASSERT(dst.aspect == Aspect::Color);
|
||||
|
||||
buffer->EnsureDataInitialized();
|
||||
SubresourceRange range = GetSubresourcesAffectedByCopy(dst, copy->copySize);
|
||||
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
|
||||
dst.mipLevel)) {
|
||||
dst.texture->SetIsSubresourceContentInitialized(true, range);
|
||||
} else {
|
||||
ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range);
|
||||
}
|
||||
|
||||
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
|
||||
|
||||
|
@ -1274,12 +1281,6 @@ namespace dawn_native { namespace opengl {
|
|||
const Extent3D& copySize) {
|
||||
Texture* texture = ToBackend(destination.texture.Get());
|
||||
ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
|
||||
SubresourceRange range = GetSubresourcesAffectedByCopy(destination, copySize);
|
||||
if (IsCompleteSubresourceCopiedTo(texture, copySize, destination.mipLevel)) {
|
||||
texture->SetIsSubresourceContentInitialized(true, range);
|
||||
} else {
|
||||
texture->EnsureSubresourceContentInitialized(range);
|
||||
}
|
||||
|
||||
const GLFormat& format = texture->GetGLFormat();
|
||||
GLenum target = texture->GetGLTarget();
|
||||
|
|
|
@ -62,6 +62,14 @@ namespace dawn_native { namespace opengl {
|
|||
textureCopy.origin = destination.origin;
|
||||
textureCopy.aspect =
|
||||
SelectFormatAspects(destination.texture->GetFormat(), destination.aspect);
|
||||
|
||||
SubresourceRange range = GetSubresourcesAffectedByCopy(textureCopy, writeSizePixel);
|
||||
if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel,
|
||||
destination.mipLevel)) {
|
||||
destination.texture->SetIsSubresourceContentInitialized(true, range);
|
||||
} else {
|
||||
ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
|
||||
}
|
||||
DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, data, dataLayout, writeSizePixel);
|
||||
return {};
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "common/Math.h"
|
||||
#include "dawn_native/EnumMaskIterator.h"
|
||||
#include "dawn_native/opengl/BufferGL.h"
|
||||
#include "dawn_native/opengl/CommandBufferGL.h"
|
||||
#include "dawn_native/opengl/DeviceGL.h"
|
||||
#include "dawn_native/opengl/UtilsGL.h"
|
||||
|
||||
|
@ -247,6 +248,17 @@ namespace dawn_native { namespace opengl {
|
|||
gl.GenFramebuffers(1, &framebuffer);
|
||||
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
|
||||
|
||||
GLenum attachment;
|
||||
if (range.aspects == (Aspect::Depth | Aspect::Stencil)) {
|
||||
attachment = GL_DEPTH_STENCIL_ATTACHMENT;
|
||||
} else if (range.aspects == Aspect::Depth) {
|
||||
attachment = GL_DEPTH_ATTACHMENT;
|
||||
} else if (range.aspects == Aspect::Stencil) {
|
||||
attachment = GL_STENCIL_ATTACHMENT;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
for (uint32_t level = range.baseMipLevel;
|
||||
level < range.baseMipLevel + range.levelCount; ++level) {
|
||||
switch (GetDimension()) {
|
||||
|
@ -268,9 +280,9 @@ namespace dawn_native { namespace opengl {
|
|||
continue;
|
||||
}
|
||||
|
||||
gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER,
|
||||
GL_DEPTH_STENCIL_ATTACHMENT, GetGLTarget(),
|
||||
GetHandle(), static_cast<GLint>(level));
|
||||
gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
|
||||
GetGLTarget(), GetHandle(),
|
||||
static_cast<GLint>(level));
|
||||
DoClear(aspectsToClear);
|
||||
} else {
|
||||
for (uint32_t layer = range.baseArrayLayer;
|
||||
|
@ -292,9 +304,8 @@ namespace dawn_native { namespace opengl {
|
|||
}
|
||||
|
||||
gl.FramebufferTextureLayer(
|
||||
GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
|
||||
GetHandle(), static_cast<GLint>(level),
|
||||
static_cast<GLint>(layer));
|
||||
GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
|
||||
static_cast<GLint>(level), static_cast<GLint>(layer));
|
||||
DoClear(aspectsToClear);
|
||||
}
|
||||
}
|
||||
|
@ -310,13 +321,30 @@ namespace dawn_native { namespace opengl {
|
|||
} else {
|
||||
ASSERT(range.aspects == Aspect::Color);
|
||||
|
||||
// For gl.ClearBufferiv/uiv calls
|
||||
constexpr std::array<GLuint, 4> kClearColorDataUint0 = {0u, 0u, 0u, 0u};
|
||||
constexpr std::array<GLuint, 4> kClearColorDataUint1 = {1u, 1u, 1u, 1u};
|
||||
std::array<GLuint, 4> clearColorData;
|
||||
clearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0u : 1u);
|
||||
|
||||
// For gl.ClearBufferfv calls
|
||||
constexpr std::array<GLfloat, 4> kClearColorDataFloat0 = {0.f, 0.f, 0.f, 0.f};
|
||||
constexpr std::array<GLfloat, 4> kClearColorDataFloat1 = {1.f, 1.f, 1.f, 1.f};
|
||||
std::array<GLfloat, 4> fClearColorData;
|
||||
fClearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f);
|
||||
|
||||
static constexpr uint32_t MAX_TEXEL_SIZE = 16;
|
||||
const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
|
||||
ASSERT(blockInfo.byteSize <= MAX_TEXEL_SIZE);
|
||||
|
||||
std::array<GLbyte, MAX_TEXEL_SIZE> clearColorData;
|
||||
clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 255;
|
||||
clearColorData.fill(clearColor);
|
||||
// For gl.ClearTexSubImage calls
|
||||
constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes0 = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes255 = {
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
|
||||
|
||||
wgpu::TextureComponentType baseType =
|
||||
GetFormat().GetAspectInfo(Aspect::Color).baseType;
|
||||
|
||||
const GLFormat& glFormat = GetGLFormat();
|
||||
for (uint32_t level = range.baseMipLevel;
|
||||
|
@ -333,27 +361,84 @@ namespace dawn_native { namespace opengl {
|
|||
if (gl.IsAtLeastGL(4, 4)) {
|
||||
gl.ClearTexSubImage(mHandle, static_cast<GLint>(level), 0, 0,
|
||||
static_cast<GLint>(layer), mipSize.width,
|
||||
mipSize.height, 1, glFormat.format, glFormat.type,
|
||||
clearColorData.data());
|
||||
} else {
|
||||
GLuint framebuffer = 0;
|
||||
gl.GenFramebuffers(1, &framebuffer);
|
||||
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
|
||||
if (GetArrayLayers() == 1 &&
|
||||
GetDimension() == wgpu::TextureDimension::e2D) {
|
||||
gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
|
||||
GetGLTarget(), GetHandle(), level);
|
||||
} else {
|
||||
gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER,
|
||||
GL_COLOR_ATTACHMENT0, GetHandle(), level,
|
||||
layer);
|
||||
}
|
||||
gl.Disable(GL_SCISSOR_TEST);
|
||||
gl.ClearBufferiv(GL_COLOR, 0,
|
||||
reinterpret_cast<const GLint*>(clearColorData.data()));
|
||||
gl.Enable(GL_SCISSOR_TEST);
|
||||
gl.DeleteFramebuffers(1, &framebuffer);
|
||||
mipSize.height, mipSize.depthOrArrayLayers,
|
||||
glFormat.format, glFormat.type,
|
||||
clearValue == TextureBase::ClearValue::Zero
|
||||
? kClearColorDataBytes0.data()
|
||||
: kClearColorDataBytes255.data());
|
||||
continue;
|
||||
}
|
||||
|
||||
GLuint framebuffer = 0;
|
||||
gl.GenFramebuffers(1, &framebuffer);
|
||||
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
|
||||
|
||||
GLenum attachment = GL_COLOR_ATTACHMENT0;
|
||||
gl.DrawBuffers(1, &attachment);
|
||||
|
||||
gl.Disable(GL_SCISSOR_TEST);
|
||||
gl.ColorMask(true, true, true, true);
|
||||
|
||||
auto DoClear = [&]() {
|
||||
switch (baseType) {
|
||||
case wgpu::TextureComponentType::Float: {
|
||||
gl.ClearBufferfv(GL_COLOR, 0,
|
||||
clearValue == TextureBase::ClearValue::Zero
|
||||
? kClearColorDataFloat0.data()
|
||||
: kClearColorDataFloat1.data());
|
||||
break;
|
||||
}
|
||||
case wgpu::TextureComponentType::Uint: {
|
||||
gl.ClearBufferuiv(GL_COLOR, 0,
|
||||
clearValue == TextureBase::ClearValue::Zero
|
||||
? kClearColorDataUint0.data()
|
||||
: kClearColorDataUint1.data());
|
||||
break;
|
||||
}
|
||||
case wgpu::TextureComponentType::Sint: {
|
||||
gl.ClearBufferiv(GL_COLOR, 0,
|
||||
reinterpret_cast<const GLint*>(
|
||||
clearValue == TextureBase::ClearValue::Zero
|
||||
? kClearColorDataUint0.data()
|
||||
: kClearColorDataUint1.data()));
|
||||
break;
|
||||
}
|
||||
|
||||
case wgpu::TextureComponentType::DepthComparison:
|
||||
UNREACHABLE();
|
||||
}
|
||||
};
|
||||
|
||||
if (GetArrayLayers() == 1) {
|
||||
switch (GetDimension()) {
|
||||
case wgpu::TextureDimension::e1D:
|
||||
UNREACHABLE();
|
||||
case wgpu::TextureDimension::e2D:
|
||||
gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
|
||||
GetGLTarget(), GetHandle(), level);
|
||||
DoClear();
|
||||
break;
|
||||
case wgpu::TextureDimension::e3D:
|
||||
uint32_t depth =
|
||||
GetMipLevelVirtualSize(level).depthOrArrayLayers;
|
||||
for (GLint z = 0; z < static_cast<GLint>(depth); ++z) {
|
||||
gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment,
|
||||
GetHandle(), level, z);
|
||||
DoClear();
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
} else {
|
||||
ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
|
||||
gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
|
||||
level, layer);
|
||||
DoClear();
|
||||
}
|
||||
|
||||
gl.Enable(GL_SCISSOR_TEST);
|
||||
gl.DeleteFramebuffers(1, &framebuffer);
|
||||
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -363,20 +448,27 @@ namespace dawn_native { namespace opengl {
|
|||
// create temp buffer with clear color to copy to the texture image
|
||||
const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
|
||||
ASSERT(kTextureBytesPerRowAlignment % blockInfo.byteSize == 0);
|
||||
uint32_t bytesPerRow = Align((GetWidth() / blockInfo.width) * blockInfo.byteSize,
|
||||
kTextureBytesPerRowAlignment);
|
||||
|
||||
Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
|
||||
uint32_t bytesPerRow =
|
||||
Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 4);
|
||||
|
||||
// Make sure that we are not rounding
|
||||
ASSERT(bytesPerRow % blockInfo.byteSize == 0);
|
||||
ASSERT(GetHeight() % blockInfo.height == 0);
|
||||
ASSERT(largestMipSize.height % blockInfo.height == 0);
|
||||
|
||||
uint64_t bufferSize64 = static_cast<uint64_t>(bytesPerRow) *
|
||||
(largestMipSize.height / blockInfo.height) *
|
||||
largestMipSize.depthOrArrayLayers;
|
||||
if (bufferSize64 > std::numeric_limits<size_t>::max()) {
|
||||
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
|
||||
}
|
||||
size_t bufferSize = static_cast<size_t>(bufferSize64);
|
||||
|
||||
dawn_native::BufferDescriptor descriptor = {};
|
||||
descriptor.mappedAtCreation = true;
|
||||
descriptor.usage = wgpu::BufferUsage::CopySrc;
|
||||
descriptor.size = bytesPerRow * (GetHeight() / blockInfo.height);
|
||||
if (descriptor.size > std::numeric_limits<uint32_t>::max()) {
|
||||
return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
|
||||
}
|
||||
descriptor.size = bufferSize;
|
||||
|
||||
// We don't count the lazy clear of srcBuffer because it is an internal buffer.
|
||||
// TODO(natlee@microsoft.com): use Dynamic Uploader here for temp buffer
|
||||
|
@ -384,57 +476,38 @@ namespace dawn_native { namespace opengl {
|
|||
DAWN_TRY_ASSIGN(srcBuffer, Buffer::CreateInternalBuffer(device, &descriptor, false));
|
||||
|
||||
// Fill the buffer with clear color
|
||||
memset(srcBuffer->GetMappedRange(0, descriptor.size), clearColor, descriptor.size);
|
||||
memset(srcBuffer->GetMappedRange(0, bufferSize), clearColor, bufferSize);
|
||||
srcBuffer->Unmap();
|
||||
|
||||
// Bind buffer and texture, and make the buffer to texture copy
|
||||
gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
|
||||
(bytesPerRow / blockInfo.byteSize) * blockInfo.width);
|
||||
gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
|
||||
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
|
||||
for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
|
||||
++level) {
|
||||
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
|
||||
gl.ActiveTexture(GL_TEXTURE0);
|
||||
gl.BindTexture(GetGLTarget(), GetHandle());
|
||||
TextureCopy textureCopy;
|
||||
textureCopy.texture = this;
|
||||
textureCopy.mipLevel = level;
|
||||
textureCopy.origin = {};
|
||||
textureCopy.aspect = Aspect::Color;
|
||||
|
||||
Extent3D size = GetMipLevelPhysicalSize(level);
|
||||
switch (GetDimension()) {
|
||||
case wgpu::TextureDimension::e2D:
|
||||
if (GetArrayLayers() == 1) {
|
||||
if (clearValue == TextureBase::ClearValue::Zero &&
|
||||
IsSubresourceContentInitialized(
|
||||
SubresourceRange::SingleMipAndLayer(level, 0, Aspect::Color))) {
|
||||
// Skip lazy clears if already initialized.
|
||||
continue;
|
||||
}
|
||||
gl.TexSubImage2D(GetGLTarget(), static_cast<GLint>(level), 0, 0,
|
||||
size.width, size.height, GetGLFormat().format,
|
||||
GetGLFormat().type, 0);
|
||||
} else {
|
||||
for (uint32_t layer = range.baseArrayLayer;
|
||||
layer < range.baseArrayLayer + range.layerCount; ++layer) {
|
||||
if (clearValue == TextureBase::ClearValue::Zero &&
|
||||
IsSubresourceContentInitialized(
|
||||
SubresourceRange::SingleMipAndLayer(level, layer,
|
||||
Aspect::Color))) {
|
||||
// Skip lazy clears if already initialized.
|
||||
continue;
|
||||
}
|
||||
gl.TexSubImage3D(GetGLTarget(), static_cast<GLint>(level), 0, 0,
|
||||
static_cast<GLint>(layer), size.width, size.height,
|
||||
1, GetGLFormat().format, GetGLFormat().type, 0);
|
||||
}
|
||||
}
|
||||
break;
|
||||
TextureDataLayout dataLayout;
|
||||
dataLayout.offset = 0;
|
||||
dataLayout.bytesPerRow = bytesPerRow;
|
||||
dataLayout.rowsPerImage = largestMipSize.height;
|
||||
|
||||
case wgpu::TextureDimension::e1D:
|
||||
case wgpu::TextureDimension::e3D:
|
||||
UNREACHABLE();
|
||||
Extent3D mipSize = GetMipLevelPhysicalSize(level);
|
||||
|
||||
for (uint32_t layer = range.baseArrayLayer;
|
||||
layer < range.baseArrayLayer + range.layerCount; ++layer) {
|
||||
if (clearValue == TextureBase::ClearValue::Zero &&
|
||||
IsSubresourceContentInitialized(
|
||||
SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
|
||||
// Skip lazy clears if already initialized.
|
||||
continue;
|
||||
}
|
||||
|
||||
textureCopy.origin.z = layer;
|
||||
DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, 0, dataLayout, mipSize);
|
||||
}
|
||||
}
|
||||
gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
|
||||
gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
|
||||
|
||||
gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
|
||||
}
|
||||
if (clearValue == TextureBase::ClearValue::Zero) {
|
||||
|
|
|
@ -1027,9 +1027,13 @@ namespace dawn_native { namespace vulkan {
|
|||
ASSERT(range.aspects == Aspect::Color);
|
||||
const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(range.aspects).block;
|
||||
|
||||
uint32_t bytesPerRow = Align((GetWidth() / blockInfo.width) * blockInfo.byteSize,
|
||||
device->GetOptimalBytesPerRowAlignment());
|
||||
uint64_t bufferSize = bytesPerRow * (GetHeight() / blockInfo.height);
|
||||
Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
|
||||
|
||||
uint32_t bytesPerRow =
|
||||
Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
|
||||
device->GetOptimalBytesPerRowAlignment());
|
||||
uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
|
||||
largestMipSize.depthOrArrayLayers;
|
||||
DynamicUploader* uploader = device->GetDynamicUploader();
|
||||
UploadHandle uploadHandle;
|
||||
DAWN_TRY_ASSIGN(uploadHandle,
|
||||
|
@ -1040,6 +1044,7 @@ namespace dawn_native { namespace vulkan {
|
|||
std::vector<VkBufferImageCopy> regions;
|
||||
for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
|
||||
++level) {
|
||||
Extent3D copySize = GetMipLevelPhysicalSize(level);
|
||||
imageRange.baseMipLevel = level;
|
||||
for (uint32_t layer = range.baseArrayLayer;
|
||||
layer < range.baseArrayLayer + range.layerCount; ++layer) {
|
||||
|
@ -1052,7 +1057,7 @@ namespace dawn_native { namespace vulkan {
|
|||
|
||||
TextureDataLayout dataLayout;
|
||||
dataLayout.offset = uploadHandle.startOffset;
|
||||
dataLayout.rowsPerImage = GetHeight() / blockInfo.height;
|
||||
dataLayout.rowsPerImage = copySize.height / blockInfo.height;
|
||||
dataLayout.bytesPerRow = bytesPerRow;
|
||||
TextureCopy textureCopy;
|
||||
textureCopy.aspect = range.aspects;
|
||||
|
@ -1060,8 +1065,8 @@ namespace dawn_native { namespace vulkan {
|
|||
textureCopy.origin = {0, 0, layer};
|
||||
textureCopy.texture = this;
|
||||
|
||||
regions.push_back(ComputeBufferImageCopyRegion(dataLayout, textureCopy,
|
||||
GetMipLevelPhysicalSize(level)));
|
||||
regions.push_back(
|
||||
ComputeBufferImageCopyRegion(dataLayout, textureCopy, copySize));
|
||||
}
|
||||
}
|
||||
device->fn.CmdCopyBufferToImage(
|
||||
|
|
|
@ -989,6 +989,8 @@ TEST_P(CopyTests_T2B, Texture3DFull) {
|
|||
|
||||
// Test that copying a range of texture 3D depths in one texture-to-buffer-copy works.
|
||||
TEST_P(CopyTests_T2B, Texture3DSubRegion) {
|
||||
DAWN_SKIP_TEST_IF(IsANGLE()); // TODO(crbug.com/angleproject/5967)
|
||||
|
||||
constexpr uint32_t kWidth = 256;
|
||||
constexpr uint32_t kHeight = 128;
|
||||
constexpr uint32_t kDepth = 6u;
|
||||
|
@ -1458,6 +1460,8 @@ TEST_P(CopyTests_B2T, Texture3DFull) {
|
|||
|
||||
// Test that copying a range of texture 3D Depths in one texture-to-buffer-copy works.
|
||||
TEST_P(CopyTests_B2T, Texture3DSubRegion) {
|
||||
DAWN_SKIP_TEST_IF(IsANGLE()); // TODO(crbug.com/angleproject/5967)
|
||||
|
||||
constexpr uint32_t kWidth = 256;
|
||||
constexpr uint32_t kHeight = 128;
|
||||
constexpr uint32_t kDepth = 6u;
|
||||
|
@ -1834,6 +1838,7 @@ TEST_P(CopyTests_T2T, Texture2DArrayTo3DFull) {
|
|||
// Test that copying subregion of a 3D texture in one texture-to-texture-copy works.
|
||||
TEST_P(CopyTests_T2T, Texture3DSubRegion) {
|
||||
DAWN_SKIP_TEST_IF(IsD3D12()); // TODO(crbug.com/dawn/547): Implement on D3D12.
|
||||
DAWN_SKIP_TEST_IF(IsANGLE()); // TODO(crbug.com/angleproject/5967)
|
||||
|
||||
constexpr uint32_t kWidth = 256;
|
||||
constexpr uint32_t kHeight = 128;
|
||||
|
@ -1865,6 +1870,7 @@ TEST_P(CopyTests_T2T, Texture3DTo2DArraySubRegion) {
|
|||
// works.
|
||||
TEST_P(CopyTests_T2T, Texture2DArrayTo3DSubRegion) {
|
||||
DAWN_SKIP_TEST_IF(IsD3D12()); // TODO(crbug.com/dawn/547): Implement on D3D12.
|
||||
DAWN_SKIP_TEST_IF(IsANGLE()); // TODO(crbug.com/angleproject/5967)
|
||||
|
||||
constexpr uint32_t kWidth = 256;
|
||||
constexpr uint32_t kHeight = 128;
|
||||
|
|
|
@ -20,11 +20,8 @@
|
|||
|
||||
class NonzeroTextureCreationTests : public DawnTest {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
DawnTest::SetUp();
|
||||
}
|
||||
|
||||
constexpr static uint32_t kSize = 128;
|
||||
constexpr static uint32_t kDepthOrArrayLayers = 7;
|
||||
};
|
||||
|
||||
// Test that texture clears 0xFF because toggle is enabled.
|
||||
|
@ -38,16 +35,51 @@ TEST_P(NonzeroTextureCreationTests, TextureCreationClears) {
|
|||
descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
descriptor.mipLevelCount = 1;
|
||||
descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
|
||||
|
||||
// 2D
|
||||
{
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
std::vector<RGBA8> expected(kSize * kSize, RGBA8(255, 255, 255, 255));
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0}, {kSize, kSize});
|
||||
}
|
||||
|
||||
// 2D Array
|
||||
{
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
std::vector<RGBA8> expected(kSize * kSize * kDepthOrArrayLayers, RGBA8(255, 255, 255, 255));
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, 0}, {kSize, kSize, kDepthOrArrayLayers});
|
||||
}
|
||||
}
|
||||
|
||||
// Test that 3D texture clears to nonzero because toggle is enabled.
|
||||
TEST_P(NonzeroTextureCreationTests, Texture3DCreationClears) {
|
||||
// TODO(crbug.com/dawn/547): 3D texture copies not fully implemented on D3D12.
|
||||
// TODO(crbug.com/angleproject/5967): This texture readback hits an assert in ANGLE.
|
||||
DAWN_SKIP_TEST_IF(IsANGLE() || IsD3D12());
|
||||
|
||||
wgpu::TextureDescriptor descriptor;
|
||||
descriptor.dimension = wgpu::TextureDimension::e3D;
|
||||
descriptor.size.width = kSize;
|
||||
descriptor.size.height = kSize;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
descriptor.sampleCount = 1;
|
||||
descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
descriptor.mipLevelCount = 1;
|
||||
descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
RGBA8 filled(255, 255, 255, 255);
|
||||
EXPECT_PIXEL_RGBA8_EQ(filled, texture, 0, 0);
|
||||
std::vector<RGBA8> expected(kSize * kSize * kDepthOrArrayLayers, RGBA8(255, 255, 255, 255));
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, 0}, {kSize, kSize, kDepthOrArrayLayers});
|
||||
}
|
||||
|
||||
// Test that a depth texture clears 0xFF because toggle is enabled.
|
||||
TEST_P(NonzeroTextureCreationTests, Depth32TextureCreationDepthClears) {
|
||||
// Copies from depth textures not supported on the OpenGL backend right now.
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL());
|
||||
// Copies from depth textures not fully supported on the OpenGL backend right now.
|
||||
DAWN_SKIP_TEST_IF(IsOpenGL() || IsOpenGLES());
|
||||
|
||||
wgpu::TextureDescriptor descriptor;
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
|
@ -62,9 +94,22 @@ TEST_P(NonzeroTextureCreationTests, Depth32TextureCreationDepthClears) {
|
|||
// We can only really test Depth32Float here because Depth24Plus(Stencil8)? may be in an unknown
|
||||
// format.
|
||||
// TODO(crbug.com/dawn/145): Test other formats via sampling.
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
std::vector<float> expected(kSize * kSize, 1.f);
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0}, {kSize, kSize});
|
||||
|
||||
// 2D
|
||||
{
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
std::vector<float> expected(kSize * kSize, 1.f);
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0}, {kSize, kSize});
|
||||
}
|
||||
|
||||
// 2D Array
|
||||
{
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
std::vector<float> expected(kSize * kSize * kDepthOrArrayLayers, 1.f);
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, 0}, {kSize, kSize, kDepthOrArrayLayers});
|
||||
}
|
||||
}
|
||||
|
||||
// Test that non-zero mip level clears 0xFF because toggle is enabled.
|
||||
|
@ -80,39 +125,37 @@ TEST_P(NonzeroTextureCreationTests, MipMapClears) {
|
|||
descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
descriptor.mipLevelCount = mipLevels;
|
||||
descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
std::vector<RGBA8> expected;
|
||||
RGBA8 filled(255, 255, 255, 255);
|
||||
for (uint32_t i = 0; i < kSize * kSize; ++i) {
|
||||
expected.push_back(filled);
|
||||
}
|
||||
uint32_t mipSize = kSize >> 2;
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0}, {mipSize, mipSize}, 2);
|
||||
}
|
||||
|
||||
// Test that non-zero array layers clears 0xFF because toggle is enabled.
|
||||
TEST_P(NonzeroTextureCreationTests, ArrayLayerClears) {
|
||||
constexpr uint32_t arrayLayers = 4;
|
||||
|
||||
wgpu::TextureDescriptor descriptor;
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
descriptor.size.width = kSize;
|
||||
descriptor.size.height = kSize;
|
||||
descriptor.size.depthOrArrayLayers = arrayLayers;
|
||||
descriptor.sampleCount = 1;
|
||||
descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
descriptor.mipLevelCount = 1;
|
||||
descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
std::vector<RGBA8> expected;
|
||||
RGBA8 filled(255, 255, 255, 255);
|
||||
for (uint32_t i = 0; i < kSize * kSize; ++i) {
|
||||
expected.push_back(filled);
|
||||
// 2D
|
||||
{
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
uint32_t mipSize = kSize >> 2;
|
||||
std::vector<RGBA8> expected(mipSize * mipSize, RGBA8(255, 255, 255, 255));
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0}, {mipSize, mipSize}, 2);
|
||||
}
|
||||
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, 2}, {kSize, kSize});
|
||||
// 2D Array
|
||||
{
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
uint32_t mipSize = kSize >> 2;
|
||||
std::vector<RGBA8> expected(mipSize * mipSize * kDepthOrArrayLayers,
|
||||
RGBA8(255, 255, 255, 255));
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, 0},
|
||||
{mipSize, mipSize, kDepthOrArrayLayers}, 2);
|
||||
}
|
||||
|
||||
// 3D
|
||||
{
|
||||
descriptor.dimension = wgpu::TextureDimension::e3D;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
uint32_t mipSize = kSize >> 2;
|
||||
uint32_t mipDepth = kDepthOrArrayLayers >> 2;
|
||||
std::vector<RGBA8> expected(mipSize * mipSize * mipDepth, RGBA8(255, 255, 255, 255));
|
||||
EXPECT_TEXTURE_EQ(expected.data(), texture, {0, 0, 0}, {mipSize, mipSize, mipDepth}, 2);
|
||||
}
|
||||
}
|
||||
|
||||
// Test that nonrenderable texture formats clear 0x01 because toggle is enabled
|
||||
|
@ -130,167 +173,87 @@ TEST_P(NonzeroTextureCreationTests, NonrenderableTextureFormat) {
|
|||
descriptor.format = wgpu::TextureFormat::RGBA8Snorm;
|
||||
descriptor.mipLevelCount = 1;
|
||||
descriptor.usage = wgpu::TextureUsage::CopySrc;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
// Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
|
||||
uint32_t bufferSize = 4 * kSize * kSize;
|
||||
std::vector<uint8_t> data(bufferSize, 100);
|
||||
wgpu::Buffer bufferDst = utils::CreateBufferFromData(
|
||||
device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
|
||||
|
||||
wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(bufferDst, 0, kSize * 4);
|
||||
wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
|
||||
wgpu::Extent3D copySize = {kSize, kSize, 1};
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, ©Size);
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
uint32_t expectedBytes = IsVulkan() ? 0x7F7F7F7F : 0x01010101;
|
||||
std::vector<uint32_t> expected(bufferSize, expectedBytes);
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), bufferDst, 0, 8);
|
||||
}
|
||||
|
||||
// Test that textures with more than 1 array layers and nonrenderable texture formats clear to 0x01
|
||||
// because toggle is enabled
|
||||
TEST_P(NonzeroTextureCreationTests, NonRenderableTextureClearWithMultiArrayLayers) {
|
||||
// TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support reading
|
||||
// from Snorm textures.
|
||||
DAWN_SKIP_TEST_IF(HasToggleEnabled("disable_snorm_read"));
|
||||
|
||||
wgpu::TextureDescriptor descriptor;
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
descriptor.size.width = kSize;
|
||||
descriptor.size.height = kSize;
|
||||
descriptor.size.depthOrArrayLayers = 2;
|
||||
descriptor.sampleCount = 1;
|
||||
descriptor.format = wgpu::TextureFormat::RGBA8Snorm;
|
||||
descriptor.mipLevelCount = 1;
|
||||
descriptor.usage = wgpu::TextureUsage::CopySrc;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
// Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
|
||||
uint32_t bufferSize = 4 * kSize * kSize;
|
||||
std::vector<uint8_t> data(bufferSize, 100);
|
||||
wgpu::Buffer bufferDst = utils::CreateBufferFromData(
|
||||
device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
|
||||
|
||||
wgpu::ImageCopyBuffer imageCopyBuffer = utils::CreateImageCopyBuffer(bufferDst, 0, kSize * 4);
|
||||
wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 1});
|
||||
wgpu::Extent3D copySize = {kSize, kSize, 1};
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, ©Size);
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
uint32_t expectedBytes = IsVulkan() ? 0x7F7F7F7F : 0x01010101;
|
||||
std::vector<uint32_t> expected(bufferSize, expectedBytes);
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), bufferDst, 0, 8);
|
||||
}
|
||||
|
||||
// Test that all subresources of a renderable texture are filled because the toggle is enabled.
|
||||
TEST_P(NonzeroTextureCreationTests, AllSubresourcesFilled) {
|
||||
wgpu::TextureDescriptor baseDescriptor;
|
||||
baseDescriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
baseDescriptor.size.width = kSize;
|
||||
baseDescriptor.size.height = kSize;
|
||||
baseDescriptor.size.depthOrArrayLayers = 1;
|
||||
baseDescriptor.sampleCount = 1;
|
||||
baseDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
baseDescriptor.mipLevelCount = 1;
|
||||
baseDescriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
|
||||
|
||||
RGBA8 filled(255, 255, 255, 255);
|
||||
|
||||
// 2D
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = baseDescriptor;
|
||||
// Some textures may be cleared with render pass load/store ops.
|
||||
// Test above the max attachment count.
|
||||
descriptor.size.depthOrArrayLayers = kMaxColorAttachments + 1;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor.size.depthOrArrayLayers; ++i) {
|
||||
EXPECT_TEXTURE_EQ(&filled, texture, {0, 0, i}, {1, 1}, 0);
|
||||
}
|
||||
// Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
|
||||
uint32_t bufferSize = kSize * kSize;
|
||||
std::vector<uint8_t> data(sizeof(uint32_t) * bufferSize, 100);
|
||||
wgpu::Buffer bufferDst = utils::CreateBufferFromData(
|
||||
device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
|
||||
|
||||
wgpu::ImageCopyBuffer imageCopyBuffer =
|
||||
utils::CreateImageCopyBuffer(bufferDst, 0, kSize * 4);
|
||||
wgpu::ImageCopyTexture imageCopyTexture =
|
||||
utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
|
||||
wgpu::Extent3D copySize = {kSize, kSize, 1};
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, ©Size);
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
uint32_t expectedBytes = IsVulkan() ? 0x7F7F7F7F : 0x01010101;
|
||||
std::vector<uint32_t> expected(bufferSize, expectedBytes);
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), bufferDst, 0, expected.size());
|
||||
}
|
||||
|
||||
// 2D Array
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = baseDescriptor;
|
||||
descriptor.mipLevelCount = 3;
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor.mipLevelCount; ++i) {
|
||||
EXPECT_TEXTURE_EQ(&filled, texture, {0, 0}, {1, 1}, i);
|
||||
}
|
||||
// Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
|
||||
uint32_t bufferSize = kSize * kSize * kDepthOrArrayLayers;
|
||||
std::vector<uint8_t> data(sizeof(uint32_t) * bufferSize, 100);
|
||||
wgpu::Buffer bufferDst = utils::CreateBufferFromData(
|
||||
device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
|
||||
|
||||
wgpu::ImageCopyBuffer imageCopyBuffer =
|
||||
utils::CreateImageCopyBuffer(bufferDst, 0, kSize * 4, kSize);
|
||||
wgpu::ImageCopyTexture imageCopyTexture =
|
||||
utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
|
||||
wgpu::Extent3D copySize = {kSize, kSize, kDepthOrArrayLayers};
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, ©Size);
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
uint32_t expectedBytes = IsVulkan() ? 0x7F7F7F7F : 0x01010101;
|
||||
std::vector<uint32_t> expected(bufferSize, expectedBytes);
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), bufferDst, 0, expected.size());
|
||||
}
|
||||
|
||||
// 3D
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = baseDescriptor;
|
||||
// Some textures may be cleared with render pass load/store ops.
|
||||
// Test above the max attachment count.
|
||||
descriptor.size.depthOrArrayLayers = kMaxColorAttachments + 1;
|
||||
descriptor.mipLevelCount = 3;
|
||||
descriptor.dimension = wgpu::TextureDimension::e3D;
|
||||
descriptor.size.depthOrArrayLayers = kDepthOrArrayLayers;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor.size.depthOrArrayLayers; ++i) {
|
||||
for (uint32_t j = 0; j < descriptor.mipLevelCount; ++j) {
|
||||
EXPECT_TEXTURE_EQ(&filled, texture, {0, 0, i}, {1, 1}, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Set buffer with dirty data so we know it is cleared by the lazy cleared texture copy
|
||||
uint32_t bufferSize = kSize * kSize * kDepthOrArrayLayers;
|
||||
std::vector<uint8_t> data(sizeof(uint32_t) * bufferSize, 100);
|
||||
wgpu::Buffer bufferDst = utils::CreateBufferFromData(
|
||||
device, data.data(), static_cast<uint32_t>(data.size()), wgpu::BufferUsage::CopySrc);
|
||||
|
||||
// Test that all subresources of a nonrenderable texture are filled because the toggle is enabled.
|
||||
TEST_P(NonzeroTextureCreationTests, NonRenderableAllSubresourcesFilled) {
|
||||
wgpu::TextureDescriptor baseDescriptor;
|
||||
baseDescriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
baseDescriptor.size.width = kSize;
|
||||
baseDescriptor.size.height = kSize;
|
||||
baseDescriptor.size.depthOrArrayLayers = 1;
|
||||
baseDescriptor.sampleCount = 1;
|
||||
baseDescriptor.format = wgpu::TextureFormat::RGBA8Snorm;
|
||||
baseDescriptor.mipLevelCount = 1;
|
||||
baseDescriptor.usage = wgpu::TextureUsage::CopySrc;
|
||||
wgpu::ImageCopyBuffer imageCopyBuffer =
|
||||
utils::CreateImageCopyBuffer(bufferDst, 0, kSize * 4, kSize);
|
||||
wgpu::ImageCopyTexture imageCopyTexture =
|
||||
utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
|
||||
wgpu::Extent3D copySize = {kSize, kSize, kDepthOrArrayLayers};
|
||||
|
||||
RGBA8 filled = IsVulkan() ? RGBA8(127, 127, 127, 127) : RGBA8(1, 1, 1, 1);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, ©Size);
|
||||
wgpu::CommandBuffer commands = encoder.Finish();
|
||||
queue.Submit(1, &commands);
|
||||
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = baseDescriptor;
|
||||
// Some textures may be cleared with render pass load/store ops.
|
||||
// Test above the max attachment count.
|
||||
descriptor.size.depthOrArrayLayers = kMaxColorAttachments + 1;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor.size.depthOrArrayLayers; ++i) {
|
||||
EXPECT_TEXTURE_EQ(&filled, texture, {0, 0, i}, {1, 1}, 0);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = baseDescriptor;
|
||||
descriptor.mipLevelCount = 3;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor.mipLevelCount; ++i) {
|
||||
EXPECT_TEXTURE_EQ(&filled, texture, {0, 0}, {1, 1}, i);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
wgpu::TextureDescriptor descriptor = baseDescriptor;
|
||||
// Some textures may be cleared with render pass load/store ops.
|
||||
// Test above the max attachment count.
|
||||
descriptor.size.depthOrArrayLayers = kMaxColorAttachments + 1;
|
||||
descriptor.mipLevelCount = 3;
|
||||
wgpu::Texture texture = device.CreateTexture(&descriptor);
|
||||
|
||||
for (uint32_t i = 0; i < descriptor.size.depthOrArrayLayers; ++i) {
|
||||
for (uint32_t j = 0; j < descriptor.mipLevelCount; ++j) {
|
||||
EXPECT_TEXTURE_EQ(&filled, texture, {0, 0, i}, {1, 1}, j);
|
||||
}
|
||||
}
|
||||
uint32_t expectedBytes = IsVulkan() ? 0x7F7F7F7F : 0x01010101;
|
||||
std::vector<uint32_t> expected(bufferSize, expectedBytes);
|
||||
EXPECT_BUFFER_U32_RANGE_EQ(expected.data(), bufferDst, 0, expected.size());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -301,5 +264,7 @@ DAWN_INSTANTIATE_TEST(NonzeroTextureCreationTests,
|
|||
{"lazy_clear_resource_on_first_use"}),
|
||||
OpenGLBackend({"nonzero_clear_resources_on_creation_for_testing"},
|
||||
{"lazy_clear_resource_on_first_use"}),
|
||||
OpenGLESBackend({"nonzero_clear_resources_on_creation_for_testing"},
|
||||
{"lazy_clear_resource_on_first_use"}),
|
||||
VulkanBackend({"nonzero_clear_resources_on_creation_for_testing"},
|
||||
{"lazy_clear_resource_on_first_use"}));
|
||||
|
|
Loading…
Reference in New Issue