Update arg names for GPUComputePassEncoder.dispatch()
Slightly silly, since it has no effect on API use, but it is nice to keep the arg names in sync with the spec. Bug: dawn:1270 Change-Id: I1f8cfabefb3a721691c092815cbb66c959980b5e Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/78245 Auto-Submit: Brandon Jones <bajones@chromium.org> Reviewed-by: Corentin Wallez <cwallez@chromium.org> Commit-Queue: Corentin Wallez <cwallez@chromium.org>
This commit is contained in:
parent
153d1cfece
commit
913e158429
|
@ -799,9 +799,9 @@
|
||||||
{
|
{
|
||||||
"name": "dispatch",
|
"name": "dispatch",
|
||||||
"args": [
|
"args": [
|
||||||
{"name": "x", "type": "uint32_t"},
|
{"name": "workgroupCountX", "type": "uint32_t"},
|
||||||
{"name": "y", "type": "uint32_t", "default": "1"},
|
{"name": "workgroupCountY", "type": "uint32_t", "default": "1"},
|
||||||
{"name": "z", "type": "uint32_t", "default": "1"}
|
{"name": "workgroupCountZ", "type": "uint32_t", "default": "1"}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -159,7 +159,9 @@ namespace dawn::native {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ComputePassEncoder::APIDispatch(uint32_t x, uint32_t y, uint32_t z) {
|
void ComputePassEncoder::APIDispatch(uint32_t workgroupCountX,
|
||||||
|
uint32_t workgroupCountY,
|
||||||
|
uint32_t workgroupCountZ) {
|
||||||
mEncodingContext->TryEncode(
|
mEncodingContext->TryEncode(
|
||||||
this,
|
this,
|
||||||
[&](CommandAllocator* allocator) -> MaybeError {
|
[&](CommandAllocator* allocator) -> MaybeError {
|
||||||
|
@ -169,20 +171,20 @@ namespace dawn::native {
|
||||||
uint32_t workgroupsPerDimension =
|
uint32_t workgroupsPerDimension =
|
||||||
GetDevice()->GetLimits().v1.maxComputeWorkgroupsPerDimension;
|
GetDevice()->GetLimits().v1.maxComputeWorkgroupsPerDimension;
|
||||||
|
|
||||||
DAWN_INVALID_IF(
|
DAWN_INVALID_IF(workgroupCountX > workgroupsPerDimension,
|
||||||
x > workgroupsPerDimension,
|
"Dispatch workgroup count X (%u) exceeds max compute "
|
||||||
"Dispatch size X (%u) exceeds max compute workgroups per dimension (%u).",
|
"workgroups per dimension (%u).",
|
||||||
x, workgroupsPerDimension);
|
workgroupCountX, workgroupsPerDimension);
|
||||||
|
|
||||||
DAWN_INVALID_IF(
|
DAWN_INVALID_IF(workgroupCountY > workgroupsPerDimension,
|
||||||
y > workgroupsPerDimension,
|
"Dispatch workgroup count Y (%u) exceeds max compute "
|
||||||
"Dispatch size Y (%u) exceeds max compute workgroups per dimension (%u).",
|
"workgroups per dimension (%u).",
|
||||||
y, workgroupsPerDimension);
|
workgroupCountY, workgroupsPerDimension);
|
||||||
|
|
||||||
DAWN_INVALID_IF(
|
DAWN_INVALID_IF(workgroupCountZ > workgroupsPerDimension,
|
||||||
z > workgroupsPerDimension,
|
"Dispatch workgroup count Z (%u) exceeds max compute "
|
||||||
"Dispatch size Z (%u) exceeds max compute workgroups per dimension (%u).",
|
"workgroups per dimension (%u).",
|
||||||
z, workgroupsPerDimension);
|
workgroupCountZ, workgroupsPerDimension);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record the synchronization scope for Dispatch, which is just the current
|
// Record the synchronization scope for Dispatch, which is just the current
|
||||||
|
@ -190,13 +192,14 @@ namespace dawn::native {
|
||||||
AddDispatchSyncScope();
|
AddDispatchSyncScope();
|
||||||
|
|
||||||
DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
|
DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
|
||||||
dispatch->x = x;
|
dispatch->x = workgroupCountX;
|
||||||
dispatch->y = y;
|
dispatch->y = workgroupCountY;
|
||||||
dispatch->z = z;
|
dispatch->z = workgroupCountZ;
|
||||||
|
|
||||||
return {};
|
return {};
|
||||||
},
|
},
|
||||||
"encoding %s.Dispatch(%u, %u, %u).", this, x, y, z);
|
"encoding %s.Dispatch(%u, %u, %u).", this, workgroupCountX, workgroupCountY,
|
||||||
|
workgroupCountZ);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultOrError<std::pair<Ref<BufferBase>, uint64_t>>
|
ResultOrError<std::pair<Ref<BufferBase>, uint64_t>>
|
||||||
|
|
|
@ -40,7 +40,9 @@ namespace dawn::native {
|
||||||
|
|
||||||
void APIEndPass();
|
void APIEndPass();
|
||||||
|
|
||||||
void APIDispatch(uint32_t x, uint32_t y = 1, uint32_t z = 1);
|
void APIDispatch(uint32_t workgroupCountX,
|
||||||
|
uint32_t workgroupCountY = 1,
|
||||||
|
uint32_t workgroupCountZ = 1);
|
||||||
void APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
|
void APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
|
||||||
void APISetPipeline(ComputePipelineBase* pipeline);
|
void APISetPipeline(ComputePipelineBase* pipeline);
|
||||||
|
|
||||||
|
|
|
@ -37,10 +37,10 @@ namespace wgpu::binding {
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPUComputePassEncoder::dispatch(Napi::Env,
|
void GPUComputePassEncoder::dispatch(Napi::Env,
|
||||||
interop::GPUSize32 x,
|
interop::GPUSize32 workgroupCountX,
|
||||||
interop::GPUSize32 y,
|
interop::GPUSize32 workgroupCountY,
|
||||||
interop::GPUSize32 z) {
|
interop::GPUSize32 workgroupCountZ) {
|
||||||
enc_.Dispatch(x, y, z);
|
enc_.Dispatch(workgroupCountX, workgroupCountY, workgroupCountZ);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPUComputePassEncoder::dispatchIndirect(
|
void GPUComputePassEncoder::dispatchIndirect(
|
||||||
|
|
|
@ -37,9 +37,9 @@ namespace wgpu::binding {
|
||||||
void setPipeline(Napi::Env,
|
void setPipeline(Napi::Env,
|
||||||
interop::Interface<interop::GPUComputePipeline> pipeline) override;
|
interop::Interface<interop::GPUComputePipeline> pipeline) override;
|
||||||
void dispatch(Napi::Env,
|
void dispatch(Napi::Env,
|
||||||
interop::GPUSize32 x,
|
interop::GPUSize32 workgroupCountX,
|
||||||
interop::GPUSize32 y,
|
interop::GPUSize32 workgroupCountY,
|
||||||
interop::GPUSize32 z) override;
|
interop::GPUSize32 workgroupCountZ) override;
|
||||||
void dispatchIndirect(Napi::Env,
|
void dispatchIndirect(Napi::Env,
|
||||||
interop::Interface<interop::GPUBuffer> indirectBuffer,
|
interop::Interface<interop::GPUBuffer> indirectBuffer,
|
||||||
interop::GPUSize64 indirectOffset) override;
|
interop::GPUSize64 indirectOffset) override;
|
||||||
|
|
Loading…
Reference in New Issue