d3d11: enable some CopyTests

To make B2B, B2T and T2B testes work, this CL implements several
missed functions:
 - Texture::Clear() is need for initializing texture data
 - Texture::Read() is need for readback data from 2d and 3d textures

Bug: dawn:1740,dawn:1768
Change-Id: Ib9e354c82fcdc8cc4e86b1699fa652cfc4a50721
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/128621
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Kokoro: Kokoro <noreply+kokoro@google.com>
Commit-Queue: Peng Huang <penghuang@chromium.org>
This commit is contained in:
Peng Huang 2023-04-28 17:22:03 +00:00 committed by Dawn LUCI CQ
parent ad571cfb7c
commit e5a4cfb935
8 changed files with 305 additions and 151 deletions

View File

@ -750,6 +750,17 @@ bool TextureBase::IsMultisampledTexture() const {
return mSampleCount > 1; return mSampleCount > 1;
} }
bool TextureBase::CoverFullSubresource(const Extent3D& size) const {
switch (GetDimension()) {
case wgpu::TextureDimension::e1D:
return size.width == GetSize().width;
case wgpu::TextureDimension::e2D:
return size.width == GetSize().width && size.height == GetSize().height;
case wgpu::TextureDimension::e3D:
return size == GetSize();
}
}
Extent3D TextureBase::GetMipLevelSingleSubresourceVirtualSize(uint32_t level) const { Extent3D TextureBase::GetMipLevelSingleSubresourceVirtualSize(uint32_t level) const {
Extent3D extent = {std::max(mSize.width >> level, 1u), 1u, 1u}; Extent3D extent = {std::max(mSize.width >> level, 1u), 1u, 1u};
if (mDimension == wgpu::TextureDimension::e1D) { if (mDimension == wgpu::TextureDimension::e1D) {

View File

@ -79,6 +79,9 @@ class TextureBase : public ApiObjectBase {
bool IsMultisampledTexture() const; bool IsMultisampledTexture() const;
// Returns true if the size covers the whole subresource.
bool CoverFullSubresource(const Extent3D& size) const;
// For a texture with non-block-compressed texture format, its physical size is always equal // For a texture with non-block-compressed texture format, its physical size is always equal
// to its virtual size. For a texture with block compressed texture format, the physical // to its virtual size. For a texture with block compressed texture format, the physical
// size is the one with paddings if necessary, which is always a multiple of the block size // size is the one with paddings if necessary, which is always a multiple of the block size

View File

@ -401,9 +401,7 @@ MaybeError Buffer::Write(CommandRecordingContext* commandContext,
uint64_t offset, uint64_t offset,
const void* data, const void* data,
size_t size) { size_t size) {
if (size == 0) { DAWN_ASSERT(size != 0);
return {};
}
MarkUsedInPendingCommands(); MarkUsedInPendingCommands();
// Map the buffer if it is possible, so EnsureDataInitializedAsDestination() and WriteInternal() // Map the buffer if it is possible, so EnsureDataInitializedAsDestination() and WriteInternal()

View File

@ -232,8 +232,6 @@ Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
MaybeError CommandBuffer::Execute() { MaybeError CommandBuffer::Execute() {
CommandRecordingContext* commandContext = ToBackend(GetDevice())->GetPendingCommandContext(); CommandRecordingContext* commandContext = ToBackend(GetDevice())->GetPendingCommandContext();
ID3D11DeviceContext1* d3d11DeviceContext1 = commandContext->GetD3D11DeviceContext1();
auto LazyClearSyncScope = [commandContext](const SyncScopeResourceUsage& scope) -> MaybeError { auto LazyClearSyncScope = [commandContext](const SyncScopeResourceUsage& scope) -> MaybeError {
for (size_t i = 0; i < scope.textures.size(); i++) { for (size_t i = 0; i < scope.textures.size(); i++) {
Texture* texture = ToBackend(scope.textures[i]); Texture* texture = ToBackend(scope.textures[i]);
@ -366,82 +364,31 @@ MaybeError CommandBuffer::Execute() {
auto& dst = copy->destination; auto& dst = copy->destination;
SubresourceRange subresources = GetSubresourcesAffectedByCopy(src, copy->copySize); SubresourceRange subresources = GetSubresourcesAffectedByCopy(src, copy->copySize);
DAWN_TRY(ToBackend(src.texture)
->EnsureSubresourceContentInitialized(commandContext, subresources));
TextureDescriptor desc = {};
desc.label = "CopyTextureToBufferStaging";
desc.dimension = src.texture->GetDimension();
desc.size.width = copy->copySize.width;
desc.size.height = copy->copySize.height;
desc.size.depthOrArrayLayers = copy->copySize.depthOrArrayLayers;
desc.format = src.texture->GetFormat().format;
desc.mipLevelCount = 1;
desc.sampleCount = 1;
Ref<Texture> stagingTexture;
DAWN_TRY_ASSIGN(stagingTexture,
Texture::CreateStaging(ToBackend(GetDevice()), &desc));
CopyTextureToTextureCmd copyTextureToBufferCmd;
copyTextureToBufferCmd.source = src;
copyTextureToBufferCmd.destination.texture = stagingTexture.Get();
copyTextureToBufferCmd.destination.origin = {0, 0, 0};
copyTextureToBufferCmd.destination.mipLevel = 0;
copyTextureToBufferCmd.destination.aspect = src.aspect;
copyTextureToBufferCmd.copySize = copy->copySize;
DAWN_TRY(Texture::Copy(commandContext, &copyTextureToBufferCmd));
Buffer* buffer = ToBackend(dst.buffer.Get()); Buffer* buffer = ToBackend(dst.buffer.Get());
Buffer::ScopedMap scopedDstMap; Buffer::ScopedMap scopedDstMap;
DAWN_TRY_ASSIGN(scopedDstMap, Buffer::ScopedMap::Create(buffer)); DAWN_TRY_ASSIGN(scopedDstMap, Buffer::ScopedMap::Create(buffer));
DAWN_TRY(buffer->EnsureDataInitializedAsDestination(commandContext, copy)); Texture::ReadCallback callback;
for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
// Copy the staging texture to the buffer.
// The Map() will block until the GPU is done with the texture.
// TODO(dawn:1705): avoid blocking the CPU.
D3D11_MAPPED_SUBRESOURCE mappedResource;
DAWN_TRY(
CheckHRESULT(d3d11DeviceContext1->Map(stagingTexture->GetD3D11Resource(), z,
D3D11_MAP_READ, 0, &mappedResource),
"D3D11 map staging texture"));
uint8_t* pSrcData = reinterpret_cast<uint8_t*>(mappedResource.pData);
const TexelBlockInfo& blockInfo =
ToBackend(src.texture)->GetFormat().GetAspectInfo(src.aspect).block;
uint32_t bytesPerRow = blockInfo.byteSize * copy->copySize.width;
if (scopedDstMap.GetMappedData()) { if (scopedDstMap.GetMappedData()) {
uint8_t* pDstData = scopedDstMap.GetMappedData() + dst.offset + callback = [&](const uint8_t* data, uint64_t offset,
dst.bytesPerRow * dst.rowsPerImage * z; uint64_t size) -> MaybeError {
for (uint32_t y = 0; y < copy->copySize.height; ++y) { memcpy(scopedDstMap.GetMappedData() + dst.offset + offset, data, size);
memcpy(pDstData, pSrcData, bytesPerRow); return {};
pDstData += dst.bytesPerRow; };
pSrcData += mappedResource.RowPitch;
}
} else { } else {
uint64_t dstOffset = dst.offset + dst.bytesPerRow * dst.rowsPerImage * z; callback = [&](const uint8_t* data, uint64_t offset,
if (dst.bytesPerRow == bytesPerRow && uint64_t size) -> MaybeError {
mappedResource.RowPitch == bytesPerRow) { DAWN_TRY(ToBackend(dst.buffer)
// If there is no padding in the rows, we can upload the whole image in ->Write(commandContext, dst.offset + offset, data, size));
// one buffer->Write() call. return {};
DAWN_TRY(buffer->Write(commandContext, dstOffset, pSrcData, };
dst.bytesPerRow * copy->copySize.height));
} else {
// Otherwise, we need to upload each row separately.
for (uint32_t y = 0; y < copy->copySize.height; ++y) {
DAWN_TRY(buffer->Write(commandContext, dstOffset, pSrcData,
bytesPerRow));
dstOffset += dst.bytesPerRow;
pSrcData += mappedResource.RowPitch;
}
}
}
d3d11DeviceContext1->Unmap(stagingTexture->GetD3D11Resource(), z);
} }
DAWN_TRY(ToBackend(src.texture)
->Read(commandContext, subresources, src.origin, copy->copySize,
dst.bytesPerRow, dst.rowsPerImage, callback));
dst.buffer->MarkUsedInPendingCommands(); dst.buffer->MarkUsedInPendingCommands();
break; break;
} }

View File

@ -50,6 +50,11 @@ MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
uint64_t bufferOffset, uint64_t bufferOffset,
const void* data, const void* data,
size_t size) { size_t size) {
if (size == 0) {
// skip the empty write
return {};
}
CommandRecordingContext* commandContext = ToBackend(GetDevice())->GetPendingCommandContext(); CommandRecordingContext* commandContext = ToBackend(GetDevice())->GetPendingCommandContext();
return ToBackend(buffer)->Write(commandContext, bufferOffset, data, size); return ToBackend(buffer)->Write(commandContext, bufferOffset, data, size);
} }

View File

@ -49,6 +49,18 @@ UINT D3D11TextureBindFlags(wgpu::TextureUsage usage, const Format& format) {
return bindFlags; return bindFlags;
} }
Aspect D3D11Aspect(Aspect aspect) {
// https://learn.microsoft.com/en-us/windows/win32/direct3d12/subresources
// Planar formats existed in Direct3D 11, but individual planes could not be addressed
// individually.
if (IsSubset(aspect, Aspect::Depth | Aspect::Stencil)) {
return Aspect::CombinedDepthStencil;
}
ASSERT(HasOneBit(aspect));
return aspect;
}
} // namespace } // namespace
// static // static
@ -104,9 +116,9 @@ T Texture::GetD3D11TextureDesc() const {
} }
desc.MipLevels = static_cast<UINT16>(GetNumMipLevels()); desc.MipLevels = static_cast<UINT16>(GetNumMipLevels());
desc.Format = d3d::DXGITextureFormat(GetFormat().format); desc.Format = GetD3D11Format();
desc.Usage = mIsStaging ? D3D11_USAGE_STAGING : D3D11_USAGE_DEFAULT; desc.Usage = mIsStaging ? D3D11_USAGE_STAGING : D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11TextureBindFlags(GetUsage(), GetFormat()); desc.BindFlags = D3D11TextureBindFlags(GetInternalUsage(), GetFormat());
constexpr UINT kCPUReadWriteFlags = D3D11_CPU_ACCESS_READ | D3D11_CPU_ACCESS_WRITE; constexpr UINT kCPUReadWriteFlags = D3D11_CPU_ACCESS_READ | D3D11_CPU_ACCESS_WRITE;
desc.CPUAccessFlags = mIsStaging ? kCPUReadWriteFlags : 0; desc.CPUAccessFlags = mIsStaging ? kCPUReadWriteFlags : 0;
@ -116,6 +128,12 @@ T Texture::GetD3D11TextureDesc() const {
MaybeError Texture::InitializeAsInternalTexture() { MaybeError Texture::InitializeAsInternalTexture() {
Device* device = ToBackend(GetDevice()); Device* device = ToBackend(GetDevice());
if (GetFormat().isRenderable && !mIsStaging) {
// If the texture format is renderable, we need to add the render attachment usage
// internally, so the texture can be cleared with GPU.
AddInternalUsage(wgpu::TextureUsage::RenderAttachment);
}
switch (GetDimension()) { switch (GetDimension()) {
case wgpu::TextureDimension::e1D: { case wgpu::TextureDimension::e1D: {
D3D11_TEXTURE1D_DESC desc = GetD3D11TextureDesc<D3D11_TEXTURE1D_DESC>(); D3D11_TEXTURE1D_DESC desc = GetD3D11TextureDesc<D3D11_TEXTURE1D_DESC>();
@ -146,7 +164,8 @@ MaybeError Texture::InitializeAsInternalTexture() {
} }
} }
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) { // Staging texture is used internally, so we don't need to clear it.
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) && !mIsStaging) {
CommandRecordingContext* commandContext = device->GetPendingCommandContext(); CommandRecordingContext* commandContext = device->GetPendingCommandContext();
DAWN_TRY(Clear(commandContext, GetAllSubresources(), TextureBase::ClearValue::NonZero)); DAWN_TRY(Clear(commandContext, GetAllSubresources(), TextureBase::ClearValue::NonZero));
} }
@ -184,28 +203,6 @@ ID3D11Resource* Texture::GetD3D11Resource() const {
return mD3d11Resource.Get(); return mD3d11Resource.Get();
} }
DXGI_FORMAT Texture::GetD3D11CopyableSubresourceFormat(Aspect aspect) const {
// TODO(dawn:1705): share the code with D3D12
ASSERT(GetFormat().aspects & aspect);
switch (GetFormat().format) {
case wgpu::TextureFormat::Depth24PlusStencil8:
case wgpu::TextureFormat::Depth32FloatStencil8:
case wgpu::TextureFormat::Stencil8:
switch (aspect) {
case Aspect::Depth:
return DXGI_FORMAT_R32_FLOAT;
case Aspect::Stencil:
return DXGI_FORMAT_R8_UINT;
default:
UNREACHABLE();
}
default:
ASSERT(HasOneBit(GetFormat().aspects));
return GetD3D11Format();
}
}
D3D11_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(const Format& format, D3D11_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(const Format& format,
const SubresourceRange& range) const { const SubresourceRange& range) const {
D3D11_RENDER_TARGET_VIEW_DESC rtvDesc; D3D11_RENDER_TARGET_VIEW_DESC rtvDesc;
@ -278,12 +275,16 @@ D3D11_DEPTH_STENCIL_VIEW_DESC Texture::GetDSVDescriptor(const SubresourceRange&
MaybeError Texture::Clear(CommandRecordingContext* commandContext, MaybeError Texture::Clear(CommandRecordingContext* commandContext,
const SubresourceRange& range, const SubresourceRange& range,
TextureBase::ClearValue clearValue) { TextureBase::ClearValue clearValue) {
// TODO(dawn:1740): Clear non-renderable texture. bool isRenderable = GetInternalUsage() & wgpu::TextureUsage::RenderAttachment;
if ((GetUsage() & wgpu::TextureUsage::RenderAttachment) == 0) {
return DAWN_UNIMPLEMENTED_ERROR("Clearing non-renderable textures is not implemented"); if (!isRenderable) {
return DAWN_UNIMPLEMENTED_ERROR("Clearing non-renderable textures");
} }
TextureViewDescriptor desc; ID3D11DeviceContext* d3d11DeviceContext = commandContext->GetD3D11DeviceContext();
TextureViewDescriptor desc = {};
desc.label = "ClearTextureView";
desc.format = GetFormat().format; desc.format = GetFormat().format;
switch (GetDimension()) { switch (GetDimension()) {
case wgpu::TextureDimension::e1D: case wgpu::TextureDimension::e1D:
@ -296,11 +297,6 @@ MaybeError Texture::Clear(CommandRecordingContext* commandContext,
desc.dimension = wgpu::TextureViewDimension::e3D; desc.dimension = wgpu::TextureViewDimension::e3D;
break; break;
} }
// TODO(dawn:1740): support clearing multiple layers.
if (range.levelCount != 1 || range.layerCount != 1) {
return DAWN_UNIMPLEMENTED_ERROR("Clearing multiple layers is not implemented");
}
desc.baseMipLevel = range.baseMipLevel; desc.baseMipLevel = range.baseMipLevel;
desc.mipLevelCount = range.levelCount; desc.mipLevelCount = range.levelCount;
desc.baseArrayLayer = range.baseArrayLayer; desc.baseArrayLayer = range.baseArrayLayer;
@ -308,14 +304,36 @@ MaybeError Texture::Clear(CommandRecordingContext* commandContext,
desc.aspect = wgpu::TextureAspect::All; desc.aspect = wgpu::TextureAspect::All;
Ref<TextureView> view = TextureView::Create(this, &desc); Ref<TextureView> view = TextureView::Create(this, &desc);
if (GetFormat().HasDepthOrStencil()) {
ComPtr<ID3D11DepthStencilView> d3d11DSV;
DAWN_TRY_ASSIGN(d3d11DSV, view->CreateD3D11DepthStencilView(/*depthReadOnly=*/false,
/*stencilReadOnly=*/false));
UINT clearFlags = 0;
if (GetFormat().HasDepth() && range.aspects & Aspect::Depth) {
clearFlags |= D3D11_CLEAR_DEPTH;
}
if (GetFormat().HasStencil() && range.aspects & Aspect::Stencil) {
clearFlags |= D3D11_CLEAR_STENCIL;
}
d3d11DeviceContext->ClearDepthStencilView(
d3d11DSV.Get(), clearFlags, clearValue == TextureBase::ClearValue::Zero ? 0.0f : 1.0f,
0);
} else {
static constexpr std::array<float, 4> kZero = {0.0f, 0.0f, 0.0f, 0.0f};
static constexpr std::array<float, 4> kNonZero = {1.0f, 1.0f, 1.0f, 1.0f};
ComPtr<ID3D11RenderTargetView> d3d11RTV; ComPtr<ID3D11RenderTargetView> d3d11RTV;
DAWN_TRY_ASSIGN(d3d11RTV, view->CreateD3D11RenderTargetView()); DAWN_TRY_ASSIGN(d3d11RTV, view->CreateD3D11RenderTargetView());
d3d11DeviceContext->ClearRenderTargetView(
d3d11RTV.Get(),
clearValue == TextureBase::ClearValue::Zero ? kZero.data() : kNonZero.data());
}
static constexpr std::array<float, 4> zero = {0.0f, 0.0f, 0.0f, 0.0f}; if (clearValue == TextureBase::ClearValue::Zero) {
static constexpr std::array<float, 4> nonZero = {1.0f, 1.0f, 1.0f, 1.0f}; SetIsSubresourceContentInitialized(true, range);
GetDevice()->IncrementLazyClearCountForTesting();
commandContext->GetD3D11DeviceContext()->ClearRenderTargetView( }
d3d11RTV.Get(), clearValue == TextureBase::ClearValue::Zero ? zero.data() : nonZero.data());
return {}; return {};
} }
@ -348,11 +366,18 @@ MaybeError Texture::Write(CommandRecordingContext* commandContext,
const uint8_t* data, const uint8_t* data,
uint32_t bytesPerRow, uint32_t bytesPerRow,
uint32_t rowsPerImage) { uint32_t rowsPerImage) {
DAWN_ASSERT(size.width != 0 && size.height != 0 && size.depthOrArrayLayers != 0); ASSERT(size.width != 0 && size.height != 0 && size.depthOrArrayLayers != 0);
if (GetFormat().HasDepth() && GetFormat().HasStencil()) {
return DAWN_UNIMPLEMENTED_ERROR("Write combined depth/stencil textures");
}
if (IsCompleteSubresourceCopiedTo(this, size, subresources.baseMipLevel)) { if (IsCompleteSubresourceCopiedTo(this, size, subresources.baseMipLevel)) {
SetIsSubresourceContentInitialized(true, subresources); SetIsSubresourceContentInitialized(true, subresources);
} else { } else {
// Dawn validation should have ensured that full subresources write for depth/stencil
// textures.
ASSERT(!GetFormat().HasDepthOrStencil());
DAWN_TRY(EnsureSubresourceContentInitialized(commandContext, subresources)); DAWN_TRY(EnsureSubresourceContentInitialized(commandContext, subresources));
} }
@ -366,17 +391,20 @@ MaybeError Texture::Write(CommandRecordingContext* commandContext,
dstBox.front = origin.z; dstBox.front = origin.z;
dstBox.back = origin.z + size.depthOrArrayLayers; dstBox.back = origin.z + size.depthOrArrayLayers;
uint32_t subresource = uint32_t subresource =
GetSubresourceIndex(subresources.baseMipLevel, origin.z, subresources.aspects); GetSubresourceIndex(subresources.baseMipLevel, 0, D3D11Aspect(subresources.aspects));
commandContext->GetD3D11DeviceContext1()->UpdateSubresource(GetD3D11Resource(), subresource, commandContext->GetD3D11DeviceContext1()->UpdateSubresource(GetD3D11Resource(), subresource,
&dstBox, data, bytesPerRow, 0); &dstBox, data, bytesPerRow,
bytesPerRow * rowsPerImage);
} else { } else {
dstBox.front = 0; dstBox.front = 0;
dstBox.back = 1; dstBox.back = 1;
for (uint32_t z = origin.z; z < size.depthOrArrayLayers; ++z) { for (uint32_t layer = 0; layer < subresources.layerCount; ++layer) {
uint32_t subresource = uint32_t subresource =
GetSubresourceIndex(subresources.baseMipLevel, z, subresources.aspects); GetSubresourceIndex(subresources.baseMipLevel, subresources.baseArrayLayer + layer,
D3D11Aspect(subresources.aspects));
D3D11_BOX* pDstBox = GetFormat().HasDepthOrStencil() ? nullptr : &dstBox;
commandContext->GetD3D11DeviceContext1()->UpdateSubresource( commandContext->GetD3D11DeviceContext1()->UpdateSubresource(
GetD3D11Resource(), subresource, &dstBox, data, bytesPerRow, 0); GetD3D11Resource(), subresource, pDstBox, data, bytesPerRow, 0);
data += rowsPerImage * bytesPerRow; data += rowsPerImage * bytesPerRow;
} }
} }
@ -384,6 +412,124 @@ MaybeError Texture::Write(CommandRecordingContext* commandContext,
return {}; return {};
} }
MaybeError Texture::ReadStaging(CommandRecordingContext* commandContext,
const SubresourceRange& subresources,
const Origin3D& origin,
Extent3D size,
uint32_t dstBytesPerRow,
uint32_t dstRowsPerImage,
Texture::ReadCallback callback) {
ASSERT(size.width != 0 && size.height != 0 && size.depthOrArrayLayers != 0);
ASSERT(mIsStaging);
ASSERT(subresources.baseArrayLayer == 0);
ASSERT(origin.z == 0);
ID3D11DeviceContext1* d3d11DeviceContext1 = commandContext->GetD3D11DeviceContext1();
const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(subresources.aspects).block;
if (GetDimension() == wgpu::TextureDimension::e2D) {
for (uint32_t layer = 0; layer < subresources.layerCount; ++layer) {
// Copy the staging texture to the buffer.
// The Map() will block until the GPU is done with the texture.
// TODO(dawn:1705): avoid blocking the CPU.
D3D11_MAPPED_SUBRESOURCE mappedResource;
DAWN_TRY(CheckHRESULT(d3d11DeviceContext1->Map(GetD3D11Resource(), layer,
D3D11_MAP_READ, 0, &mappedResource),
"D3D11 map staging texture"));
uint8_t* pSrcData = static_cast<uint8_t*>(mappedResource.pData);
uint32_t bytesPerRow = blockInfo.byteSize * size.width;
uint64_t dstOffset = dstBytesPerRow * dstRowsPerImage * layer;
if (dstBytesPerRow == bytesPerRow && mappedResource.RowPitch == bytesPerRow) {
// If there is no padding in the rows, we can upload the whole image
// in one read.
DAWN_TRY(callback(pSrcData, dstOffset, dstBytesPerRow * size.height));
} else {
// Otherwise, we need to read each row separately.
for (uint32_t y = 0; y < size.height; ++y) {
DAWN_TRY(callback(pSrcData, dstOffset, bytesPerRow));
dstOffset += dstBytesPerRow;
pSrcData += mappedResource.RowPitch;
}
}
d3d11DeviceContext1->Unmap(GetD3D11Resource(), layer);
}
return {};
}
// 3D textures are copied one slice at a time.
// Copy the staging texture to the buffer.
// The Map() will block until the GPU is done with the texture.
// TODO(dawn:1705): avoid blocking the CPU.
D3D11_MAPPED_SUBRESOURCE mappedResource;
DAWN_TRY(CheckHRESULT(
d3d11DeviceContext1->Map(GetD3D11Resource(), 0, D3D11_MAP_READ, 0, &mappedResource),
"D3D11 map staging texture"));
uint32_t bytesPerRow = blockInfo.byteSize * size.width;
for (uint32_t z = 0; z < size.depthOrArrayLayers; ++z) {
uint64_t dstOffset = dstBytesPerRow * dstRowsPerImage * z;
uint8_t* pSrcData =
static_cast<uint8_t*>(mappedResource.pData) + z * mappedResource.DepthPitch;
if (dstBytesPerRow == bytesPerRow && mappedResource.RowPitch == bytesPerRow) {
// If there is no padding in the rows, we can upload the whole image
// in one read.
DAWN_TRY(callback(pSrcData, dstOffset, bytesPerRow * size.height));
} else {
// Otherwise, we need to read each row separately.
for (uint32_t y = 0; y < size.height; ++y) {
DAWN_TRY(callback(pSrcData, dstOffset, bytesPerRow));
dstOffset += dstBytesPerRow;
pSrcData += mappedResource.RowPitch;
}
}
}
d3d11DeviceContext1->Unmap(GetD3D11Resource(), 0);
return {};
}
MaybeError Texture::Read(CommandRecordingContext* commandContext,
const SubresourceRange& subresources,
const Origin3D& origin,
Extent3D size,
uint32_t dstBytesPerRow,
uint32_t dstRowsPerImage,
Texture::ReadCallback callback) {
ASSERT(size.width != 0 && size.height != 0 && size.depthOrArrayLayers != 0);
ASSERT(!mIsStaging);
DAWN_TRY(EnsureSubresourceContentInitialized(commandContext, subresources));
TextureDescriptor desc = {};
desc.label = "CopyTextureToBufferStaging";
desc.dimension = GetDimension();
desc.size = size;
desc.format = GetFormat().format;
desc.mipLevelCount = subresources.levelCount;
desc.sampleCount = GetSampleCount();
Ref<Texture> stagingTexture;
DAWN_TRY_ASSIGN(stagingTexture, CreateStaging(ToBackend(GetDevice()), &desc));
CopyTextureToTextureCmd copyCmd;
copyCmd.source.texture = this;
copyCmd.source.origin = origin;
copyCmd.source.mipLevel = subresources.baseMipLevel;
copyCmd.source.aspect = subresources.aspects;
copyCmd.destination.texture = stagingTexture.Get();
copyCmd.destination.origin = {0, 0, 0};
copyCmd.destination.mipLevel = 0;
copyCmd.destination.aspect = subresources.aspects;
copyCmd.copySize = size;
DAWN_TRY(Texture::Copy(commandContext, &copyCmd));
SubresourceRange stagingSubresources = SubresourceRange::MakeFull(
subresources.aspects, subresources.layerCount, subresources.levelCount);
return stagingTexture->ReadStaging(commandContext, stagingSubresources, {0, 0, 0}, size,
dstBytesPerRow, dstRowsPerImage, callback);
}
// static // static
MaybeError Texture::Copy(CommandRecordingContext* commandContext, CopyTextureToTextureCmd* copy) { MaybeError Texture::Copy(CommandRecordingContext* commandContext, CopyTextureToTextureCmd* copy) {
ASSERT(copy->copySize.width != 0 && copy->copySize.height != 0 && ASSERT(copy->copySize.width != 0 && copy->copySize.height != 0 &&
@ -392,39 +538,63 @@ MaybeError Texture::Copy(CommandRecordingContext* commandContext, CopyTextureToT
auto& src = copy->source; auto& src = copy->source;
auto& dst = copy->destination; auto& dst = copy->destination;
SubresourceRange subresources = GetSubresourcesAffectedByCopy(src, copy->copySize); ASSERT(src.aspect == dst.aspect);
DAWN_TRY(
ToBackend(src.texture)->EnsureSubresourceContentInitialized(commandContext, subresources));
subresources = GetSubresourcesAffectedByCopy(dst, copy->copySize); // TODO(dawn:1705): support copy between textures with different dimensions.
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize, dst.mipLevel)) { if (src.texture->GetDimension() != dst.texture->GetDimension()) {
dst.texture->SetIsSubresourceContentInitialized(true, subresources); return DAWN_UNIMPLEMENTED_ERROR("Copy between textures with different dimensions");
} else {
// Partial update subresource of a depth/stencil texture is not allowed.
DAWN_ASSERT(!dst.texture->GetFormat().HasDepthOrStencil());
DAWN_TRY(ToBackend(dst.texture)
->EnsureSubresourceContentInitialized(commandContext, subresources));
} }
bool isWholeTextureCopy = SubresourceRange srcSubresources = GetSubresourcesAffectedByCopy(src, copy->copySize);
src.texture->GetSize() == copy->copySize && dst.texture->GetSize() == copy->copySize; DAWN_TRY(ToBackend(src.texture)
->EnsureSubresourceContentInitialized(commandContext, srcSubresources));
SubresourceRange dstSubresources = GetSubresourcesAffectedByCopy(dst, copy->copySize);
if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize, dst.mipLevel)) {
dst.texture->SetIsSubresourceContentInitialized(true, dstSubresources);
} else {
// Partial update subresource of a depth/stencil texture is not allowed. // Partial update subresource of a depth/stencil texture is not allowed.
DAWN_ASSERT(isWholeTextureCopy || !dst.texture->GetFormat().HasDepthOrStencil()); ASSERT(!dst.texture->GetFormat().HasDepthOrStencil());
DAWN_TRY(ToBackend(dst.texture)
->EnsureSubresourceContentInitialized(commandContext, dstSubresources));
}
D3D11_BOX srcBox; D3D11_BOX srcBox;
srcBox.left = src.origin.x; srcBox.left = src.origin.x;
srcBox.right = src.origin.x + copy->copySize.width; srcBox.right = src.origin.x + copy->copySize.width;
srcBox.top = src.origin.y; srcBox.top = src.origin.y;
srcBox.bottom = src.origin.y + copy->copySize.height; srcBox.bottom = src.origin.y + copy->copySize.height;
switch (src.texture->GetDimension()) {
case wgpu::TextureDimension::e2D:
srcBox.front = 0; srcBox.front = 0;
srcBox.back = 1; srcBox.back = 1;
break;
case wgpu::TextureDimension::e3D:
srcBox.front = src.origin.z;
srcBox.back = src.origin.z + copy->copySize.depthOrArrayLayers;
break;
default:
// TODO(dawn:1705): support 1d texture.
UNREACHABLE();
}
uint32_t subresource = src.texture->GetSubresourceIndex(src.mipLevel, src.origin.z, src.aspect); bool isWholeSubresource = src.texture->CoverFullSubresource(copy->copySize);
// Partial update subresource of a depth/stencil texture is not allowed.
ASSERT(isWholeSubresource || !src.texture->GetFormat().HasDepthOrStencil());
for (uint32_t layer = 0; layer < srcSubresources.layerCount; ++layer) {
uint32_t srcSubresource =
src.texture->GetSubresourceIndex(src.mipLevel, srcSubresources.baseArrayLayer + layer,
D3D11Aspect(srcSubresources.aspects));
uint32_t dstSubresource =
dst.texture->GetSubresourceIndex(dst.mipLevel, dstSubresources.baseArrayLayer + layer,
D3D11Aspect(dstSubresources.aspects));
commandContext->GetD3D11DeviceContext1()->CopySubresourceRegion( commandContext->GetD3D11DeviceContext1()->CopySubresourceRegion(
ToBackend(dst.texture)->GetD3D11Resource(), dst.mipLevel, dst.origin.x, dst.origin.y, ToBackend(dst.texture)->GetD3D11Resource(), dstSubresource, dst.origin.x, dst.origin.y,
dst.origin.z, ToBackend(src.texture)->GetD3D11Resource(), subresource, 0, ToBackend(src.texture)->GetD3D11Resource(), srcSubresource,
isWholeTextureCopy ? nullptr : &srcBox); isWholeSubresource ? nullptr : &srcBox);
}
return {}; return {};
} }
@ -444,7 +614,7 @@ DXGI_FORMAT TextureView::GetD3D11Format() const {
ResultOrError<ComPtr<ID3D11ShaderResourceView>> TextureView::CreateD3D11ShaderResourceView() const { ResultOrError<ComPtr<ID3D11ShaderResourceView>> TextureView::CreateD3D11ShaderResourceView() const {
Device* device = ToBackend(GetDevice()); Device* device = ToBackend(GetDevice());
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc; D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
srvDesc.Format = d3d::DXGITextureFormat(GetFormat().format); srvDesc.Format = GetD3D11Format();
const Format& textureFormat = GetTexture()->GetFormat(); const Format& textureFormat = GetTexture()->GetFormat();
// TODO(dawn:1705): share below code with D3D12? // TODO(dawn:1705): share below code with D3D12?

View File

@ -37,14 +37,10 @@ class Texture final : public TextureBase {
static ResultOrError<Ref<Texture>> Create(Device* device, static ResultOrError<Ref<Texture>> Create(Device* device,
const TextureDescriptor* descriptor, const TextureDescriptor* descriptor,
ComPtr<ID3D11Resource> d3d11Texture); ComPtr<ID3D11Resource> d3d11Texture);
static ResultOrError<Ref<Texture>> CreateStaging(Device* device,
const TextureDescriptor* descriptor);
DXGI_FORMAT GetD3D11Format() const; DXGI_FORMAT GetD3D11Format() const;
ID3D11Resource* GetD3D11Resource() const; ID3D11Resource* GetD3D11Resource() const;
DXGI_FORMAT GetD3D11CopyableSubresourceFormat(Aspect aspect) const;
D3D11_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(const Format& format, D3D11_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(const Format& format,
const SubresourceRange& range) const; const SubresourceRange& range) const;
D3D11_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(const SubresourceRange& range, D3D11_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(const SubresourceRange& range,
@ -61,9 +57,20 @@ class Texture final : public TextureBase {
const uint8_t* data, const uint8_t* data,
uint32_t bytesPerRow, uint32_t bytesPerRow,
uint32_t rowsPerImage); uint32_t rowsPerImage);
using ReadCallback = std::function<MaybeError(const uint8_t* data, size_t offset, size_t size)>;
MaybeError Read(CommandRecordingContext* commandContext,
const SubresourceRange& subresources,
const Origin3D& origin,
Extent3D size,
uint32_t bytesPerRow,
uint32_t rowsPerImage,
ReadCallback callback);
static MaybeError Copy(CommandRecordingContext* commandContext, CopyTextureToTextureCmd* copy); static MaybeError Copy(CommandRecordingContext* commandContext, CopyTextureToTextureCmd* copy);
private: private:
static ResultOrError<Ref<Texture>> CreateStaging(Device* device,
const TextureDescriptor* descriptor);
Texture(Device* device, Texture(Device* device,
const TextureDescriptor* descriptor, const TextureDescriptor* descriptor,
TextureState state, TextureState state,
@ -85,6 +92,13 @@ class Texture final : public TextureBase {
MaybeError Clear(CommandRecordingContext* commandContext, MaybeError Clear(CommandRecordingContext* commandContext,
const SubresourceRange& range, const SubresourceRange& range,
TextureBase::ClearValue clearValue); TextureBase::ClearValue clearValue);
MaybeError ReadStaging(CommandRecordingContext* commandContext,
const SubresourceRange& subresources,
const Origin3D& origin,
Extent3D size,
uint32_t bytesPerRow,
uint32_t rowsPerImage,
ReadCallback callback);
const bool mIsStaging = false; const bool mIsStaging = false;
ComPtr<ID3D11Resource> mD3d11Resource; ComPtr<ID3D11Resource> mD3d11Resource;

View File

@ -1353,6 +1353,7 @@ TEST_P(CopyTests_T2B, Texture3DMipUnaligned) {
} }
DAWN_INSTANTIATE_TEST(CopyTests_T2B, DAWN_INSTANTIATE_TEST(CopyTests_T2B,
D3D11Backend(),
D3D12Backend(), D3D12Backend(),
MetalBackend(), MetalBackend(),
OpenGLBackend(), OpenGLBackend(),
@ -1946,6 +1947,7 @@ TEST_P(CopyTests_B2T, Texture3DMipUnaligned) {
} }
DAWN_INSTANTIATE_TEST(CopyTests_B2T, DAWN_INSTANTIATE_TEST(CopyTests_B2T,
D3D11Backend(),
D3D12Backend(), D3D12Backend(),
MetalBackend(), MetalBackend(),
OpenGLBackend(), OpenGLBackend(),
@ -2444,6 +2446,7 @@ TEST_P(CopyTests_T2T, Texture3DMipUnaligned) {
} }
} }
// TODO(dawn:1705): enable this test for D3D11
DAWN_INSTANTIATE_TEST_P( DAWN_INSTANTIATE_TEST_P(
CopyTests_T2T, CopyTests_T2T,
{D3D12Backend(), {D3D12Backend(),
@ -2470,8 +2473,8 @@ TEST_P(CopyTests_Formats, SrgbCompatibility) {
} }
DAWN_INSTANTIATE_TEST_P(CopyTests_Formats, DAWN_INSTANTIATE_TEST_P(CopyTests_Formats,
{D3D12Backend(), MetalBackend(), OpenGLBackend(), OpenGLESBackend(), {D3D11Backend(), D3D12Backend(), MetalBackend(), OpenGLBackend(),
VulkanBackend()}, OpenGLESBackend(), VulkanBackend()},
{wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureFormat::RGBA8UnormSrgb, {wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureFormat::RGBA8UnormSrgb,
wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::BGRA8UnormSrgb}); wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::BGRA8UnormSrgb});
@ -2502,6 +2505,7 @@ TEST_P(CopyTests_B2B, ZeroSizedCopy) {
} }
DAWN_INSTANTIATE_TEST(CopyTests_B2B, DAWN_INSTANTIATE_TEST(CopyTests_B2B,
D3D11Backend(),
D3D12Backend(), D3D12Backend(),
MetalBackend(), MetalBackend(),
OpenGLBackend(), OpenGLBackend(),
@ -2530,6 +2534,7 @@ TEST_P(ClearBufferTests, ZeroSizedClear) {
} }
DAWN_INSTANTIATE_TEST(ClearBufferTests, DAWN_INSTANTIATE_TEST(ClearBufferTests,
D3D11Backend(),
D3D12Backend(), D3D12Backend(),
MetalBackend(), MetalBackend(),
OpenGLBackend(), OpenGLBackend(),
@ -2712,8 +2717,9 @@ TEST_P(CopyToDepthStencilTextureAfterDestroyingBigBufferTests, DoTest) {
DAWN_INSTANTIATE_TEST_P( DAWN_INSTANTIATE_TEST_P(
CopyToDepthStencilTextureAfterDestroyingBigBufferTests, CopyToDepthStencilTextureAfterDestroyingBigBufferTests,
{D3D12Backend(), D3D12Backend({"d3d12_force_clear_copyable_depth_stencil_texture_on_creation"}), {D3D11Backend(), D3D12Backend(),
MetalBackend(), OpenGLBackend(), OpenGLESBackend(), VulkanBackend()}, D3D12Backend({"d3d12_force_clear_copyable_depth_stencil_texture_on_creation"}), MetalBackend(),
OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
{wgpu::TextureFormat::Depth16Unorm, wgpu::TextureFormat::Stencil8}, {wgpu::TextureFormat::Depth16Unorm, wgpu::TextureFormat::Stencil8},
{InitializationMethod::CopyBufferToTexture, InitializationMethod::WriteTexture, {InitializationMethod::CopyBufferToTexture, InitializationMethod::WriteTexture,
InitializationMethod::CopyTextureToTexture}, InitializationMethod::CopyTextureToTexture},