Fix inclusive language presubmit

The current presubmit has the filter inverted so it would only attempt
to match the filtered files. The file name also has to be converted to
`LocalPath` otherwise it's attempting to compare a python object to a
string and always fails to match.

Bug: dawn:1339
Change-Id: Ie7712dee60f6b9df2cb78c9feab11769f7ea1f02
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87080
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Commit-Queue: Dan Sinclair <dsinclair@chromium.org>
Auto-Submit: Dan Sinclair <dsinclair@chromium.org>
This commit is contained in:
dan sinclair 2022-04-19 22:25:45 +00:00 committed by Dawn LUCI CQ
parent 6a3373e419
commit fb5a492787
88 changed files with 574 additions and 534 deletions

View File

@ -14,7 +14,7 @@
cmake_minimum_required(VERSION 3.10.2) cmake_minimum_required(VERSION 3.10.2)
# When upgrading to CMake 3.11 we can remove DAWN_DUMMY_FILE because source-less add_library # When upgrading to CMake 3.11 we can remove DAWN_PLACEHOLDER_FILE because source-less add_library
# becomes available. # becomes available.
# When upgrading to CMake 3.12 we should add CONFIGURE_DEPENDS to DawnGenerator to rerun CMake in # When upgrading to CMake 3.12 we should add CONFIGURE_DEPENDS to DawnGenerator to rerun CMake in
# case any of the generator files changes. We should also remove the CACHE "" FORCE stuff to # case any of the generator files changes. We should also remove the CACHE "" FORCE stuff to
@ -46,7 +46,7 @@ set(DAWN_SRC_DIR "${Dawn_SOURCE_DIR}/src")
set(DAWN_INCLUDE_DIR "${Dawn_SOURCE_DIR}/include") set(DAWN_INCLUDE_DIR "${Dawn_SOURCE_DIR}/include")
set(DAWN_TEMPLATE_DIR "${DAWN_GENERATOR_DIR}/templates") set(DAWN_TEMPLATE_DIR "${DAWN_GENERATOR_DIR}/templates")
set(DAWN_DUMMY_FILE "${DAWN_SRC_DIR}/Dummy.cpp") set(DAWN_PLACEHOLDER_FILE "${DAWN_SRC_DIR}/Placeholder.cpp")
################################################################################ ################################################################################
# Configuration options # Configuration options

View File

@ -80,7 +80,9 @@ def _CheckNonInclusiveLanguage(input_api, output_api, source_file_filter=None):
matches = [] matches = []
for f in input_api.AffectedFiles(include_deletes=False, for f in input_api.AffectedFiles(include_deletes=False,
file_filter=source_file_filter): file_filter=source_file_filter):
for line_num, line in f.ChangedContents(): line_num = 0
for line in f.NewContents():
line_num += 1
for reg in NONINCLUSIVE_REGEX_LIST: for reg in NONINCLUSIVE_REGEX_LIST:
match = reg.search(line) match = reg.search(line)
if match: if match:
@ -99,11 +101,29 @@ def _CheckNonInclusiveLanguage(input_api, output_api, source_file_filter=None):
def _NonInclusiveFileFilter(file): def _NonInclusiveFileFilter(file):
filter_list = [ filter_list = [
"Doxyfile", # References to main pages
"PRESUBMIT.py", # Non-inclusive language check data "PRESUBMIT.py", # Non-inclusive language check data
"PRESUBMIT.py.tint", # Non-inclusive language check data
"docs/dawn/debug_markers.md", # External URL
"docs/dawn/infra.md", # Infra settings
"docs/tint/spirv-input-output-variables.md", # External URL "docs/tint/spirv-input-output-variables.md", # External URL
"test/tint/samples/compute_boids.wgsl ", # External URL "infra/config/global/generated/cr-buildbucket.cfg", # Infra settings
"infra/config/global/main.star", # Infra settings
"infra/kokoro/windows/build.bat", # External URL
"src/dawn/common/GPUInfo.cpp", # External URL
"src/dawn/native/metal/BackendMTL.mm", # OSX Constant
"src/dawn/native/vulkan/SamplerVk.cpp", # External URL
"src/dawn/native/vulkan/TextureVk.cpp", # External URL
"src/dawn/node/tools/src/cmd/run-cts/main.go", # Terminal type name
"src/dawn/samples/ComputeBoids.cpp", # External URL
"src/dawn/tests/end2end/DepthBiasTests.cpp", # External URL
"test/tint/samples/compute_boids.wgsl", # External URL
"third_party/khronos/KHR/khrplatform.h", # Third party file
"tools/roll-all", # Branch name
"tools/src/container/key.go", # External URL
"tools/src/go.sum", # External URL
] ]
return file in filter_list return file.LocalPath() not in filter_list
def _DoCommonChecks(input_api, output_api): def _DoCommonChecks(input_api, output_api):

View File

@ -3,7 +3,7 @@
# Dawn, a WebGPU implementation # Dawn, a WebGPU implementation
Dawn is an open-source and cross-platform implementation of the work-in-progress [WebGPU](https://webgpu.dev) standard. Dawn is an open-source and cross-platform implementation of the work-in-progress [WebGPU](https://webgpu.dev) standard.
More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/master/webgpu.h) that is a one-to-one mapping with the WebGPU IDL. More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/main/webgpu.h) that is a one-to-one mapping with the WebGPU IDL.
Dawn is meant to be integrated as part of a larger system and is the underlying implementation of WebGPU in Chromium. Dawn is meant to be integrated as part of a larger system and is the underlying implementation of WebGPU in Chromium.
Dawn provides several WebGPU building blocks: Dawn provides several WebGPU building blocks:

View File

@ -3,7 +3,7 @@
# Dawn, a WebGPU implementation # Dawn, a WebGPU implementation
Dawn is an open-source and cross-platform implementation of the work-in-progress [WebGPU](https://webgpu.dev) standard. Dawn is an open-source and cross-platform implementation of the work-in-progress [WebGPU](https://webgpu.dev) standard.
More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/master/webgpu.h) that is a one-to-one mapping with the WebGPU IDL. More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/main/webgpu.h) that is a one-to-one mapping with the WebGPU IDL.
Dawn is meant to be integrated as part of a larger system and is the underlying implementation of WebGPU in Chromium. Dawn is meant to be integrated as part of a larger system and is the underlying implementation of WebGPU in Chromium.
Dawn provides several WebGPU building blocks: Dawn provides several WebGPU building blocks:

View File

@ -2,7 +2,7 @@
Dawn relies on a lot of code generation to produce boilerplate code, especially webgpu.h-related code. They start by reading some JSON files (and sometimes XML too), process the data into an in-memory representation that's then used by some [Jinja2](https://jinja.palletsprojects.com/) templates to generate the code. This is similar to the model/view separation in Web development. Dawn relies on a lot of code generation to produce boilerplate code, especially webgpu.h-related code. They start by reading some JSON files (and sometimes XML too), process the data into an in-memory representation that's then used by some [Jinja2](https://jinja.palletsprojects.com/) templates to generate the code. This is similar to the model/view separation in Web development.
Generators are based on [generator_lib.py](../generator/generator_lib.py) which provides facilities for integrating in build systems and using Jinja2. Templates can be found in [`generator/templates`](../generator/templates) and the generated files are in `out/<Debug/Release/foo>/gen/src` when building Dawn in standalone. Generated files can also be found in [Chromium's code search](https://source.chromium.org/chromium/chromium/src/+/master:out/Debug/gen/third_party/dawn/src/). Generators are based on [generator_lib.py](../generator/generator_lib.py) which provides facilities for integrating in build systems and using Jinja2. Templates can be found in [`generator/templates`](../generator/templates) and the generated files are in `out/<Debug/Release/foo>/gen/src` when building Dawn in standalone. Generated files can also be found in [Chromium's code search](https://source.chromium.org/chromium/chromium/src/+/main:out/Debug/gen/third_party/dawn/src/).
## Dawn "JSON API" generators ## Dawn "JSON API" generators

View File

@ -13,6 +13,6 @@ The `dawn_wire_server_and_vulkan_backend_fuzzer` is like `dawn_wire_server_and_f
Using a seed corpus significantly improves the efficiency of fuzzing. Dawn's fuzzers use interesting testcases discovered in previous fuzzing runs to seed future runs. Fuzzing can be further improved by using Dawn tests as a example of API usage which allows the fuzzer to quickly discover and use new API entrypoints and usage patterns. Using a seed corpus significantly improves the efficiency of fuzzing. Dawn's fuzzers use interesting testcases discovered in previous fuzzing runs to seed future runs. Fuzzing can be further improved by using Dawn tests as a example of API usage which allows the fuzzer to quickly discover and use new API entrypoints and usage patterns.
Dawn has a CI builder [cron-linux-clang-rel-x64](https://ci.chromium.org/p/dawn/builders/ci/cron-linux-clang-rel-x64) which runs on a periodic schedule. This bot runs the `dawn_end2end_tests` and `dawn_unittests` using the wire and writes out traces of the commands. This can manually be done by running: `<test_binary> --use-wire --wire-trace-dir=tmp_dir`. The output directory will contain one trace for each test, where the traces are prepended with `0xFFFFFFFFFFFFFFFF`. The header is the callsite index at which the error injector should inject an error. If the fuzzer doesn't support error injection it will skip the header. [cron-linux-clang-rel-x64] then hashes the output files to produce unique names and uploads them to the fuzzer corpus directories. Dawn has a CI builder [cron-linux-clang-rel-x64](https://ci.chromium.org/p/dawn/builders/ci/cron-linux-clang-rel-x64) which runs on a periodic schedule. This bot runs the `dawn_end2end_tests` and `dawn_unittests` using the wire and writes out traces of the commands. This can manually be done by running: `<test_binary> --use-wire --wire-trace-dir=tmp_dir`. The output directory will contain one trace for each test, where the traces are prepended with `0xFFFFFFFFFFFFFFFF`. The header is the callsite index at which the error injector should inject an error. If the fuzzer doesn't support error injection it will skip the header. [cron-linux-clang-rel-x64] then hashes the output files to produce unique names and uploads them to the fuzzer corpus directories.
Please see the `dawn.py`[https://source.chromium.org/chromium/chromium/tools/build/+/master:recipes/recipes/dawn.py] recipe for specific details. Please see the `dawn.py`[https://source.chromium.org/chromium/chromium/tools/build/+/main:recipes/recipes/dawn.py] recipe for specific details.
Regenerating the seed corpus keeps it up to date when Dawn's API or wire protocol changes. Regenerating the seed corpus keeps it up to date when Dawn's API or wire protocol changes.

View File

@ -6,7 +6,7 @@ Dawn uses Chromium's continuous integration (CI) infrastructure to continually r
- [Dawn Try Builders](https://ci.chromium.org/p/dawn/g/try/builders) - [Dawn Try Builders](https://ci.chromium.org/p/dawn/g/try/builders)
- [chromium.dawn Waterfall](https://ci.chromium.org/p/chromium/g/chromium.dawn/console) - [chromium.dawn Waterfall](https://ci.chromium.org/p/chromium/g/chromium.dawn/console)
For additional information on GPU testing in Chromium, please see [[chromium/src]//docs/gpu/gpu_testing_bot_details.md](https://chromium.googlesource.com/chromium/src.git/+/master/docs/gpu/gpu_testing_bot_details.md). For additional information on GPU testing in Chromium, please see [[chromium/src]//docs/gpu/gpu_testing_bot_details.md](https://chromium.googlesource.com/chromium/src.git/+/main/docs/gpu/gpu_testing_bot_details.md).
## Dawn CI/Try Builders ## Dawn CI/Try Builders
Dawn builders are specified in [[dawn]//infra/config/global/cr-buildbucket.cfg](../infra/config/global/cr-buildbucket.cfg). This file contains a few mixins such as `clang`, `no_clang`, `x64`, `x86`, `debug`, `release` which are used to specify the bot dimensions and build properties (builder_mixins.recipe.properties). At the time of writing, we have the following builders: Dawn builders are specified in [[dawn]//infra/config/global/cr-buildbucket.cfg](../infra/config/global/cr-buildbucket.cfg). This file contains a few mixins such as `clang`, `no_clang`, `x64`, `x86`, `debug`, `release` which are used to specify the bot dimensions and build properties (builder_mixins.recipe.properties). At the time of writing, we have the following builders:

View File

@ -30,7 +30,7 @@ A Chromium checkout is required for the highest optimization flags. It is possib
- `recording_time`: The time to convert Dawn commands to native commands. - `recording_time`: The time to convert Dawn commands to native commands.
Metrics are reported according to the format specified at Metrics are reported according to the format specified at
[[chromium]//build/scripts/slave/performance_log_processor.py](https://cs.chromium.org/chromium/build/scripts/slave/performance_log_processor.py) [[chromium]//build/recipes/performance_log_processor.py](https://cs.chromium.org/chromium/build/recipes/performance_log_processor.py)
### Dumping Trace Files ### Dumping Trace Files

View File

@ -16,7 +16,7 @@
//* This generator is used to produce part of Emscripten's struct_info.json, //* This generator is used to produce part of Emscripten's struct_info.json,
//* which is a list of struct fields that it uses to generate field offset //* which is a list of struct fields that it uses to generate field offset
//* information for its own code generators. //* information for its own code generators.
//* https://github.com/emscripten-core/emscripten/blob/master/src/struct_info.json //* https://github.com/emscripten-core/emscripten/blob/main/src/struct_info.json
//* //*
{ {
{% set api = metadata.api.lower() %} {% set api = metadata.api.lower() %}

View File

@ -15,7 +15,7 @@
//* //*
//* This generator is used to produce the number-to-string mappings for //* This generator is used to produce the number-to-string mappings for
//* Emscripten's library_webgpu.js. //* Emscripten's library_webgpu.js.
//* https://github.com/emscripten-core/emscripten/blob/master/src/library_webgpu.js //* https://github.com/emscripten-core/emscripten/blob/main/src/library_webgpu.js
//* //*
{% for type in by_category["enum"] if not type.json_data.get("emscripten_no_enum_table") %} {% for type in by_category["enum"] if not type.json_data.get("emscripten_no_enum_table") %}
{{type.name.CamelCase()}}: {% if type.contiguousFromZero -%} {{type.name.CamelCase()}}: {% if type.contiguousFromZero -%}

View File

@ -25,12 +25,11 @@ import zipfile
def CheckedJoin(output, path): def CheckedJoin(output, path):
""" """
CheckedJoin returns os.path.join(output, path). It does sanity checks to CheckedJoin returns os.path.join(output, path). It checks that the resulting
ensure the resulting path is under output, but shouldn't be used on untrusted path is under output, but shouldn't be used on untrusted input.
input. """
"""
path = os.path.normpath(path) path = os.path.normpath(path)
if os.path.isabs(path) or path.startswith('.'): if os.path.isabs(path) or path.startswith("."):
raise ValueError(path) raise ValueError(path)
return os.path.join(output, path) return os.path.join(output, path)
@ -51,22 +50,22 @@ class SymlinkEntry(object):
def IterateZip(path): def IterateZip(path):
""" """
IterateZip opens the zip file at path and returns a generator of entry objects IterateZip opens the zip file at path and returns a generator of entry objects
for each file in it. for each file in it.
""" """
with zipfile.ZipFile(path, 'r') as zip_file: with zipfile.ZipFile(path, "r") as zip_file:
for info in zip_file.infolist(): for info in zip_file.infolist():
if info.filename.endswith('/'): if info.filename.endswith("/"):
continue continue
yield FileEntry(info.filename, None, zip_file.open(info)) yield FileEntry(info.filename, None, zip_file.open(info))
def IterateTar(path, compression): def IterateTar(path, compression):
""" """
IterateTar opens the tar.gz or tar.bz2 file at path and returns a generator of IterateTar opens the tar.gz or tar.bz2 file at path and returns a generator of
entry objects for each file in it. entry objects for each file in it.
""" """
with tarfile.open(path, 'r:' + compression) as tar_file: with tarfile.open(path, "r:" + compression) as tar_file:
for info in tar_file: for info in tar_file:
if info.isdir(): if info.isdir():
pass pass
@ -80,11 +79,13 @@ def IterateTar(path, compression):
def main(args): def main(args):
parser = optparse.OptionParser(usage='Usage: %prog ARCHIVE OUTPUT') parser = optparse.OptionParser(usage="Usage: %prog ARCHIVE OUTPUT")
parser.add_option('--no-prefix', parser.add_option(
dest='no_prefix', "--no-prefix",
action='store_true', dest="no_prefix",
help='Do not remove a prefix from paths in the archive.') action="store_true",
help="Do not remove a prefix from paths in the archive.",
)
options, args = parser.parse_args(args) options, args = parser.parse_args(args)
if len(args) != 2: if len(args) != 2:
@ -97,7 +98,7 @@ def main(args):
# Skip archives that weren't downloaded. # Skip archives that weren't downloaded.
return 0 return 0
with open(archive, 'rb') as f: with open(archive, "rb") as f:
sha256 = hashlib.sha256() sha256 = hashlib.sha256()
while True: while True:
chunk = f.read(1024 * 1024) chunk = f.read(1024 * 1024)
@ -113,12 +114,12 @@ def main(args):
print("Already up-to-date.") print("Already up-to-date.")
return 0 return 0
if archive.endswith('.zip'): if archive.endswith(".zip"):
entries = IterateZip(archive) entries = IterateZip(archive)
elif archive.endswith('.tar.gz'): elif archive.endswith(".tar.gz"):
entries = IterateTar(archive, 'gz') entries = IterateTar(archive, "gz")
elif archive.endswith('.tar.bz2'): elif archive.endswith(".tar.bz2"):
entries = IterateTar(archive, 'bz2') entries = IterateTar(archive, "bz2")
else: else:
raise ValueError(archive) raise ValueError(archive)
@ -132,11 +133,11 @@ def main(args):
num_extracted = 0 num_extracted = 0
for entry in entries: for entry in entries:
# Even on Windows, zip files must always use forward slashes. # Even on Windows, zip files must always use forward slashes.
if '\\' in entry.path or entry.path.startswith('/'): if "\\" in entry.path or entry.path.startswith("/"):
raise ValueError(entry.path) raise ValueError(entry.path)
if not options.no_prefix: if not options.no_prefix:
new_prefix, rest = entry.path.split('/', 1) new_prefix, rest = entry.path.split("/", 1)
# Ensure the archive is consistent. # Ensure the archive is consistent.
if prefix is None: if prefix is None:
@ -151,12 +152,12 @@ def main(args):
if not os.path.isdir(os.path.dirname(fixed_path)): if not os.path.isdir(os.path.dirname(fixed_path)):
os.makedirs(os.path.dirname(fixed_path)) os.makedirs(os.path.dirname(fixed_path))
if isinstance(entry, FileEntry): if isinstance(entry, FileEntry):
with open(fixed_path, 'wb') as out: with open(fixed_path, "wb") as out:
shutil.copyfileobj(entry.fileobj, out) shutil.copyfileobj(entry.fileobj, out)
elif isinstance(entry, SymlinkEntry): elif isinstance(entry, SymlinkEntry):
os.symlink(entry.target, fixed_path) os.symlink(entry.target, fixed_path)
else: else:
raise TypeError('unknown entry type') raise TypeError("unknown entry type")
# Fix up permissions if needbe. # Fix up permissions if needbe.
# TODO(davidben): To be extra tidy, this should only track the execute bit # TODO(davidben): To be extra tidy, this should only track the execute bit
@ -171,12 +172,12 @@ def main(args):
finally: finally:
entries.close() entries.close()
with open(stamp_path, 'w') as f: with open(stamp_path, "w") as f:
f.write(digest) f.write(digest)
print("Done. Extracted %d files." % (num_extracted, )) print("Done. Extracted %d files." % (num_extracted, ))
return 0 return 0
if __name__ == '__main__': if __name__ == "__main__":
sys.exit(main(sys.argv[1:])) sys.exit(main(sys.argv[1:]))

View File

@ -63,13 +63,13 @@ DawnJSONGenerator(
# Headers only INTERFACE library with generated headers don't work in CMake # Headers only INTERFACE library with generated headers don't work in CMake
# because the GENERATED property is local to a directory. Instead we make a # because the GENERATED property is local to a directory. Instead we make a
# STATIC library with a Dummy cpp file. # STATIC library with a placeholder cpp file.
# #
# INTERFACE libraries can only have INTERFACE sources so the sources get added # INTERFACE libraries can only have INTERFACE sources so the sources get added
# to the dependant's list of sources. If these dependents are in another # to the dependant's list of sources. If these dependents are in another
# directory, they don't see the GENERATED property and fail to configure # directory, they don't see the GENERATED property and fail to configure
# because the file doesn't exist on disk. # because the file doesn't exist on disk.
add_library(dawn_headers STATIC ${DAWN_DUMMY_FILE}) add_library(dawn_headers STATIC ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_headers) common_compile_options(dawn_headers)
target_sources(dawn_headers PRIVATE target_sources(dawn_headers PRIVATE
"${DAWN_INCLUDE_DIR}/dawn/dawn_wsi.h" "${DAWN_INCLUDE_DIR}/dawn/dawn_wsi.h"
@ -89,7 +89,7 @@ DawnJSONGenerator(
# This headers only library needs to be a STATIC library, see comment for # This headers only library needs to be a STATIC library, see comment for
# dawn_headers above. # dawn_headers above.
add_library(dawncpp_headers STATIC ${DAWN_DUMMY_FILE}) add_library(dawncpp_headers STATIC ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawncpp_headers) common_compile_options(dawncpp_headers)
target_sources(dawncpp_headers PRIVATE target_sources(dawncpp_headers PRIVATE
"${DAWN_INCLUDE_DIR}/dawn/EnumClassBitmasks.h" "${DAWN_INCLUDE_DIR}/dawn/EnumClassBitmasks.h"
@ -107,7 +107,7 @@ DawnJSONGenerator(
RESULT_VARIABLE "DAWNCPP_GEN_SOURCES" RESULT_VARIABLE "DAWNCPP_GEN_SOURCES"
) )
add_library(dawncpp STATIC ${DAWN_DUMMY_FILE}) add_library(dawncpp STATIC ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawncpp) common_compile_options(dawncpp)
target_sources(dawncpp PRIVATE ${DAWNCPP_GEN_SOURCES}) target_sources(dawncpp PRIVATE ${DAWNCPP_GEN_SOURCES})
target_link_libraries(dawncpp PUBLIC dawncpp_headers) target_link_libraries(dawncpp PUBLIC dawncpp_headers)
@ -122,7 +122,7 @@ DawnJSONGenerator(
RESULT_VARIABLE "DAWNPROC_GEN_SOURCES" RESULT_VARIABLE "DAWNPROC_GEN_SOURCES"
) )
add_library(dawn_proc ${DAWN_DUMMY_FILE}) add_library(dawn_proc ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_proc) common_compile_options(dawn_proc)
target_compile_definitions(dawn_proc PRIVATE "WGPU_IMPLEMENTATION") target_compile_definitions(dawn_proc PRIVATE "WGPU_IMPLEMENTATION")
if(BUILD_SHARED_LIBS) if(BUILD_SHARED_LIBS)

View File

@ -20,7 +20,7 @@ DawnGenerator(
RESULT_VARIABLE "DAWN_VERSION_AUTOGEN_SOURCES" RESULT_VARIABLE "DAWN_VERSION_AUTOGEN_SOURCES"
) )
add_library(dawn_common STATIC ${DAWN_DUMMY_FILE}) add_library(dawn_common STATIC ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_common) common_compile_options(dawn_common)
target_sources(dawn_common PRIVATE target_sources(dawn_common PRIVATE
${DAWN_VERSION_AUTOGEN_SOURCES} ${DAWN_VERSION_AUTOGEN_SOURCES}

View File

@ -18,7 +18,7 @@
// DAWN_PP_GET_HEAD: get the first element of a __VA_ARGS__ without triggering empty // DAWN_PP_GET_HEAD: get the first element of a __VA_ARGS__ without triggering empty
// __VA_ARGS__ warnings. // __VA_ARGS__ warnings.
#define DAWN_INTERNAL_PP_GET_HEAD(firstParam, ...) firstParam #define DAWN_INTERNAL_PP_GET_HEAD(firstParam, ...) firstParam
#define DAWN_PP_GET_HEAD(...) DAWN_INTERNAL_PP_GET_HEAD(__VA_ARGS__, dummyArg) #define DAWN_PP_GET_HEAD(...) DAWN_INTERNAL_PP_GET_HEAD(__VA_ARGS__, placeholderArg)
// DAWN_PP_CONCATENATE: Concatenate tokens, first expanding the arguments passed in. // DAWN_PP_CONCATENATE: Concatenate tokens, first expanding the arguments passed in.
#define DAWN_PP_CONCATENATE(arg1, arg2) DAWN_PP_CONCATENATE_1(arg1, arg2) #define DAWN_PP_CONCATENATE(arg1, arg2) DAWN_PP_CONCATENATE_1(arg1, arg2)

View File

@ -53,7 +53,7 @@ T NativeNonDispatachableHandleFromU64(uint64_t u64) {
# error "Unsupported platform" # error "Unsupported platform"
#endif #endif
// Define a dummy Vulkan handle for use before we include vulkan.h // Define a placeholder Vulkan handle for use before we include vulkan.h
DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(VkSomeHandle) DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(VkSomeHandle)
// Find out the alignment of native handles. Logically we would use alignof(VkSomeHandleNative) so // Find out the alignment of native handles. Logically we would use alignof(VkSomeHandleNative) so

View File

@ -18,7 +18,7 @@ DawnJSONGenerator(
RESULT_VARIABLE "DAWN_NATIVE_UTILS_GEN_SOURCES" RESULT_VARIABLE "DAWN_NATIVE_UTILS_GEN_SOURCES"
) )
add_library(dawn_native ${DAWN_DUMMY_FILE}) add_library(dawn_native ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_native) common_compile_options(dawn_native)
target_compile_definitions(dawn_native PRIVATE "DAWN_NATIVE_IMPLEMENTATION") target_compile_definitions(dawn_native PRIVATE "DAWN_NATIVE_IMPLEMENTATION")
@ -555,7 +555,7 @@ DawnJSONGenerator(
RESULT_VARIABLE "WEBGPU_DAWN_NATIVE_PROC_GEN" RESULT_VARIABLE "WEBGPU_DAWN_NATIVE_PROC_GEN"
) )
add_library(webgpu_dawn ${DAWN_DUMMY_FILE}) add_library(webgpu_dawn ${DAWN_PLACEHOLDER_FILE})
common_compile_options(webgpu_dawn) common_compile_options(webgpu_dawn)
target_link_libraries(webgpu_dawn PRIVATE dawn_native) target_link_libraries(webgpu_dawn PRIVATE dawn_native)
target_compile_definitions(webgpu_dawn PRIVATE "WGPU_IMPLEMENTATION") target_compile_definitions(webgpu_dawn PRIVATE "WGPU_IMPLEMENTATION")

View File

@ -167,7 +167,7 @@ namespace dawn::native {
} }
bool CommandAllocator::IsEmpty() const { bool CommandAllocator::IsEmpty() const {
return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mDummyEnum[0]); return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mPlaceholderEnum[0]);
} }
CommandBlocks&& CommandAllocator::AcquireBlocks() { CommandBlocks&& CommandAllocator::AcquireBlocks() {
@ -221,8 +221,8 @@ namespace dawn::native {
} }
void CommandAllocator::ResetPointers() { void CommandAllocator::ResetPointers() {
mCurrentPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[0]); mCurrentPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[0]);
mEndPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[1]); mEndPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[1]);
} }
} // namespace dawn::native } // namespace dawn::native

View File

@ -259,7 +259,7 @@ namespace dawn::native {
// Data used for the block range at initialization so that the first call to Allocate sees // Data used for the block range at initialization so that the first call to Allocate sees
// there is not enough space and calls GetNewBlock. This avoids having to special case the // there is not enough space and calls GetNewBlock. This avoids having to special case the
// initialization in Allocate. // initialization in Allocate.
uint32_t mDummyEnum[1] = {0}; uint32_t mPlaceholderEnum[1] = {0};
// Pointers to the current range of allocation in the block. Guaranteed to allow for at // Pointers to the current range of allocation in the block. Guaranteed to allow for at
// least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always // least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always

View File

@ -160,9 +160,9 @@ namespace dawn::native {
if (descriptor.layout == nullptr) { if (descriptor.layout == nullptr) {
// Ref will keep the pipeline layout alive until the end of the function where // Ref will keep the pipeline layout alive until the end of the function where
// the pipeline will take another reference. // the pipeline will take another reference.
DAWN_TRY_ASSIGN(layoutRef, DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
PipelineLayoutBase::CreateDefault( device, GetRenderStagesAndSetPlaceholderShader(
device, GetRenderStagesAndSetDummyShader(device, &descriptor))); device, &descriptor)));
outDescriptor->layout = layoutRef.Get(); outDescriptor->layout = layoutRef.Get();
} }
@ -267,8 +267,8 @@ namespace dawn::native {
DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout()); DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout());
// If dummy fragment shader module is needed, initialize it // If placeholder fragment shader module is needed, initialize it
if (IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) { if (IsToggleEnabled(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline)) {
// The empty fragment shader, used as a work around for vertex-only render pipeline // The empty fragment shader, used as a work around for vertex-only render pipeline
constexpr char kEmptyFragmentShader[] = R"( constexpr char kEmptyFragmentShader[] = R"(
@stage(fragment) fn fs_empty_main() {} @stage(fragment) fn fs_empty_main() {}
@ -278,7 +278,7 @@ namespace dawn::native {
wgslDesc.source = kEmptyFragmentShader; wgslDesc.source = kEmptyFragmentShader;
descriptor.nextInChain = &wgslDesc; descriptor.nextInChain = &wgslDesc;
DAWN_TRY_ASSIGN(mInternalPipelineStore->dummyFragmentShader, DAWN_TRY_ASSIGN(mInternalPipelineStore->placeholderFragmentShader,
CreateShaderModule(&descriptor)); CreateShaderModule(&descriptor));
} }
@ -414,7 +414,7 @@ namespace dawn::native {
mPersistentCache = nullptr; mPersistentCache = nullptr;
mEmptyBindGroupLayout = nullptr; mEmptyBindGroupLayout = nullptr;
mInternalPipelineStore = nullptr; mInternalPipelineStore = nullptr;
mExternalTextureDummyView = nullptr; mExternalTexturePlaceholderView = nullptr;
AssumeCommandsComplete(); AssumeCommandsComplete();
@ -811,17 +811,17 @@ namespace dawn::native {
} }
ResultOrError<Ref<TextureViewBase>> ResultOrError<Ref<TextureViewBase>>
DeviceBase::GetOrCreateDummyTextureViewForExternalTexture() { DeviceBase::GetOrCreatePlaceholderTextureViewForExternalTexture() {
if (!mExternalTextureDummyView.Get()) { if (!mExternalTexturePlaceholderView.Get()) {
Ref<TextureBase> externalTextureDummy; Ref<TextureBase> externalTexturePlaceholder;
TextureDescriptor textureDesc; TextureDescriptor textureDesc;
textureDesc.dimension = wgpu::TextureDimension::e2D; textureDesc.dimension = wgpu::TextureDimension::e2D;
textureDesc.format = wgpu::TextureFormat::RGBA8Unorm; textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
textureDesc.label = "Dawn_External_Texture_Dummy_Texture"; textureDesc.label = "Dawn_External_Texture_Placeholder_Texture";
textureDesc.size = {1, 1, 1}; textureDesc.size = {1, 1, 1};
textureDesc.usage = wgpu::TextureUsage::TextureBinding; textureDesc.usage = wgpu::TextureUsage::TextureBinding;
DAWN_TRY_ASSIGN(externalTextureDummy, CreateTexture(&textureDesc)); DAWN_TRY_ASSIGN(externalTexturePlaceholder, CreateTexture(&textureDesc));
TextureViewDescriptor textureViewDesc; TextureViewDescriptor textureViewDesc;
textureViewDesc.arrayLayerCount = 1; textureViewDesc.arrayLayerCount = 1;
@ -829,14 +829,14 @@ namespace dawn::native {
textureViewDesc.baseArrayLayer = 0; textureViewDesc.baseArrayLayer = 0;
textureViewDesc.dimension = wgpu::TextureViewDimension::e2D; textureViewDesc.dimension = wgpu::TextureViewDimension::e2D;
textureViewDesc.format = wgpu::TextureFormat::RGBA8Unorm; textureViewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
textureViewDesc.label = "Dawn_External_Texture_Dummy_Texture_View"; textureViewDesc.label = "Dawn_External_Texture_Placeholder_Texture_View";
textureViewDesc.mipLevelCount = 1; textureViewDesc.mipLevelCount = 1;
DAWN_TRY_ASSIGN(mExternalTextureDummyView, DAWN_TRY_ASSIGN(mExternalTexturePlaceholderView,
CreateTextureView(externalTextureDummy.Get(), &textureViewDesc)); CreateTextureView(externalTexturePlaceholder.Get(), &textureViewDesc));
} }
return mExternalTextureDummyView; return mExternalTexturePlaceholderView;
} }
ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::GetOrCreatePipelineLayout( ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::GetOrCreatePipelineLayout(

View File

@ -172,7 +172,7 @@ namespace dawn::native {
void UncacheComputePipeline(ComputePipelineBase* obj); void UncacheComputePipeline(ComputePipelineBase* obj);
ResultOrError<Ref<TextureViewBase>> GetOrCreateDummyTextureViewForExternalTexture(); ResultOrError<Ref<TextureViewBase>> GetOrCreatePlaceholderTextureViewForExternalTexture();
ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout( ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout(
const PipelineLayoutDescriptor* descriptor); const PipelineLayoutDescriptor* descriptor);
@ -512,7 +512,7 @@ namespace dawn::native {
Ref<BindGroupLayoutBase> mEmptyBindGroupLayout; Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
Ref<TextureViewBase> mExternalTextureDummyView; Ref<TextureViewBase> mExternalTexturePlaceholderView;
std::unique_ptr<DynamicUploader> mDynamicUploader; std::unique_ptr<DynamicUploader> mDynamicUploader;
std::unique_ptr<AsyncTaskManager> mAsyncTaskManager; std::unique_ptr<AsyncTaskManager> mAsyncTaskManager;

View File

@ -130,7 +130,7 @@ namespace dawn::native {
mTextureViews[1] = descriptor->plane1; mTextureViews[1] = descriptor->plane1;
} else { } else {
DAWN_TRY_ASSIGN(mTextureViews[1], DAWN_TRY_ASSIGN(mTextureViews[1],
device->GetOrCreateDummyTextureViewForExternalTexture()); device->GetOrCreatePlaceholderTextureViewForExternalTexture());
} }
// We must create a buffer to store parameters needed by a shader that operates on this // We must create a buffer to store parameters needed by a shader that operates on this

View File

@ -66,7 +66,7 @@ namespace dawn::native {
ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag); ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
MaybeError Initialize(DeviceBase* device, const ExternalTextureDescriptor* descriptor); MaybeError Initialize(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
Ref<TextureBase> mDummyTexture; Ref<TextureBase> mPlaceholderTexture;
Ref<BufferBase> mParamsBuffer; Ref<BufferBase> mParamsBuffer;
std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> mTextureViews; std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> mTextureViews;

View File

@ -41,7 +41,7 @@ namespace dawn::native {
Ref<ComputePipelineBase> timestampComputePipeline; Ref<ComputePipelineBase> timestampComputePipeline;
Ref<ShaderModuleBase> timestampCS; Ref<ShaderModuleBase> timestampCS;
Ref<ShaderModuleBase> dummyFragmentShader; Ref<ShaderModuleBase> placeholderFragmentShader;
// A scratch buffer suitable for use as a copy destination and storage binding. // A scratch buffer suitable for use as a copy destination and storage binding.
ScratchBuffer scratchStorage; ScratchBuffer scratchStorage;

View File

@ -325,8 +325,7 @@ namespace dawn::native {
DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc)); DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc));
ASSERT(!result->IsError()); ASSERT(!result->IsError());
// Sanity check in debug that the pipeline layout is compatible with the current // Check in debug that the pipeline layout is compatible with the current pipeline.
// pipeline.
for (const StageAndDescriptor& stage : stages) { for (const StageAndDescriptor& stage : stages) {
const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint); const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get()) ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get())

View File

@ -473,7 +473,7 @@ namespace dawn::native {
return {}; return {};
} }
std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader( std::vector<StageAndDescriptor> GetRenderStagesAndSetPlaceholderShader(
DeviceBase* device, DeviceBase* device,
const RenderPipelineDescriptor* descriptor) { const RenderPipelineDescriptor* descriptor) {
std::vector<StageAndDescriptor> stages; std::vector<StageAndDescriptor> stages;
@ -484,13 +484,13 @@ namespace dawn::native {
stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module, stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module,
descriptor->fragment->entryPoint, descriptor->fragment->constantCount, descriptor->fragment->entryPoint, descriptor->fragment->constantCount,
descriptor->fragment->constants}); descriptor->fragment->constants});
} else if (device->IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) { } else if (device->IsToggleEnabled(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline)) {
InternalPipelineStore* store = device->GetInternalPipelineStore(); InternalPipelineStore* store = device->GetInternalPipelineStore();
// The dummy fragment shader module should already be initialized // The placeholder fragment shader module should already be initialized
DAWN_ASSERT(store->dummyFragmentShader != nullptr); DAWN_ASSERT(store->placeholderFragmentShader != nullptr);
ShaderModuleBase* dummyFragmentShader = store->dummyFragmentShader.Get(); ShaderModuleBase* placeholderFragmentShader = store->placeholderFragmentShader.Get();
stages.push_back( stages.push_back({SingleShaderStage::Fragment, placeholderFragmentShader,
{SingleShaderStage::Fragment, dummyFragmentShader, "fs_empty_main", 0, nullptr}); "fs_empty_main", 0, nullptr});
} }
return stages; return stages;
} }
@ -513,7 +513,7 @@ namespace dawn::native {
: PipelineBase(device, : PipelineBase(device,
descriptor->layout, descriptor->layout,
descriptor->label, descriptor->label,
GetRenderStagesAndSetDummyShader(device, descriptor)), GetRenderStagesAndSetPlaceholderShader(device, descriptor)),
mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) { mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) {
mVertexBufferCount = descriptor->vertex.bufferCount; mVertexBufferCount = descriptor->vertex.bufferCount;
const VertexBufferLayout* buffers = descriptor->vertex.buffers; const VertexBufferLayout* buffers = descriptor->vertex.buffers;

View File

@ -33,7 +33,7 @@ namespace dawn::native {
MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device, MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
const RenderPipelineDescriptor* descriptor); const RenderPipelineDescriptor* descriptor);
std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader( std::vector<StageAndDescriptor> GetRenderStagesAndSetPlaceholderShader(
DeviceBase* device, DeviceBase* device,
const RenderPipelineDescriptor* descriptor); const RenderPipelineDescriptor* descriptor);

View File

@ -91,7 +91,7 @@ namespace dawn::native {
// The implementation of functions in this file can have a lot of control flow and corner cases // The implementation of functions in this file can have a lot of control flow and corner cases
// so each modification should come with extensive tests and ensure 100% code coverage of the // so each modification should come with extensive tests and ensure 100% code coverage of the
// modified functions. See instructions at // modified functions. See instructions at
// https://chromium.googlesource.com/chromium/src/+/master/docs/testing/code_coverage.md#local-coverage-script // https://chromium.googlesource.com/chromium/src/+/main/docs/testing/code_coverage.md#local-coverage-script
// to run the test with code coverage. A command line that worked in the past (with the right // to run the test with code coverage. A command line that worked in the past (with the right
// GN args for the out/coverage directory in a Chromium checkout) is: // GN args for the out/coverage directory in a Chromium checkout) is:
// //

View File

@ -232,11 +232,11 @@ namespace dawn::native {
"Disables mipmaps for r8unorm and rg8unorm textures, which are known on some drivers " "Disables mipmaps for r8unorm and rg8unorm textures, which are known on some drivers "
"to not clear correctly.", "to not clear correctly.",
"https://crbug.com/dawn/1071"}}, "https://crbug.com/dawn/1071"}},
{Toggle::UseDummyFragmentInVertexOnlyPipeline, {Toggle::UsePlaceholderFragmentInVertexOnlyPipeline,
{"use_dummy_fragment_in_vertex_only_pipeline", {"use_placeholder_fragment_in_vertex_only_pipeline",
"Use a dummy empty fragment shader in vertex only render pipeline. This toggle must " "Use a placeholder empty fragment shader in vertex only render pipeline. This toggle "
"be enabled for OpenGL ES backend, and serves as a workaround by default enabled on " "must be enabled for OpenGL ES backend, and serves as a workaround by default "
"some Metal devices with Intel GPU to ensure the depth result is correct.", "enabled on some Metal devices with Intel GPU to ensure the depth result is correct.",
"https://crbug.com/dawn/136"}}, "https://crbug.com/dawn/136"}},
{Toggle::FxcOptimizations, {Toggle::FxcOptimizations,
{"fxc_optimizations", {"fxc_optimizations",
@ -260,7 +260,7 @@ namespace dawn::native {
"VK_KHR_zero_initialize_workgroup_memory is supported.", "VK_KHR_zero_initialize_workgroup_memory is supported.",
"https://crbug.com/dawn/1302"}}, "https://crbug.com/dawn/1302"}},
// Dummy comment to separate the }} so it is clearer what to copy-paste to add a toggle. // Comment to separate the }} so it is clearer what to copy-paste to add a toggle.
}}; }};
} // anonymous namespace } // anonymous namespace

View File

@ -61,7 +61,7 @@ namespace dawn::native {
DisableSymbolRenaming, DisableSymbolRenaming,
UseUserDefinedLabelsInBackend, UseUserDefinedLabelsInBackend,
DisableR8RG8Mipmaps, DisableR8RG8Mipmaps,
UseDummyFragmentInVertexOnlyPipeline, UsePlaceholderFragmentInVertexOnlyPipeline,
FxcOptimizations, FxcOptimizations,
RecordDetailedTimingInTraceEvents, RecordDetailedTimingInTraceEvents,
DisableTimestampQueryConversion, DisableTimestampQueryConversion,

View File

@ -217,13 +217,14 @@ namespace dawn::native::metal {
} }
// On some Intel GPU vertex only render pipeline get wrong depth result if no fragment // On some Intel GPU vertex only render pipeline get wrong depth result if no fragment
// shader provided. Create a dummy fragment shader module to work around this issue. // shader provided. Create a placeholder fragment shader module to work around this issue.
if (gpu_info::IsIntel(vendorId)) { if (gpu_info::IsIntel(vendorId)) {
bool useDummyFragmentShader = true; bool usePlaceholderFragmentShader = true;
if (gpu_info::IsSkylake(deviceId)) { if (gpu_info::IsSkylake(deviceId)) {
useDummyFragmentShader = false; usePlaceholderFragmentShader = false;
} }
SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, useDummyFragmentShader); SetToggle(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline,
usePlaceholderFragmentShader);
} }
} }

View File

@ -99,8 +99,8 @@ namespace dawn::native::opengl {
SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead); SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead);
SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables); SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables);
SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES()); SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES());
// For OpenGL ES, we must use dummy fragment shader for vertex-only render pipeline. // For OpenGL ES, we must use a placeholder fragment shader for vertex-only render pipeline.
SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, gl.GetVersion().IsES()); SetToggle(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
} }
const GLFormat& Device::GetGLFormat(const Format& format) { const GLFormat& Device::GetGLFormat(const Format& format) {

View File

@ -87,26 +87,26 @@ namespace dawn::native::opengl {
// Create an OpenGL shader for each stage and gather the list of combined samplers. // Create an OpenGL shader for each stage and gather the list of combined samplers.
PerStage<CombinedSamplerInfo> combinedSamplers; PerStage<CombinedSamplerInfo> combinedSamplers;
bool needsDummySampler = false; bool needsPlaceholderSampler = false;
std::vector<GLuint> glShaders; std::vector<GLuint> glShaders;
for (SingleShaderStage stage : IterateStages(activeStages)) { for (SingleShaderStage stage : IterateStages(activeStages)) {
const ShaderModule* module = ToBackend(stages[stage].module.Get()); const ShaderModule* module = ToBackend(stages[stage].module.Get());
std::string glsl; std::string glsl;
DAWN_TRY_ASSIGN(glsl, module->TranslateToGLSL(stages[stage].entryPoint.c_str(), stage, DAWN_TRY_ASSIGN(glsl, module->TranslateToGLSL(stages[stage].entryPoint.c_str(), stage,
&combinedSamplers[stage], layout, &combinedSamplers[stage], layout,
&needsDummySampler)); &needsPlaceholderSampler));
GLuint shader; GLuint shader;
DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str())); DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str()));
gl.AttachShader(mProgram, shader); gl.AttachShader(mProgram, shader);
glShaders.push_back(shader); glShaders.push_back(shader);
} }
if (needsDummySampler) { if (needsPlaceholderSampler) {
SamplerDescriptor desc = {}; SamplerDescriptor desc = {};
ASSERT(desc.minFilter == wgpu::FilterMode::Nearest); ASSERT(desc.minFilter == wgpu::FilterMode::Nearest);
ASSERT(desc.magFilter == wgpu::FilterMode::Nearest); ASSERT(desc.magFilter == wgpu::FilterMode::Nearest);
ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest); ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest);
mDummySampler = mPlaceholderSampler =
ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess()); ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess());
} }
@ -164,8 +164,8 @@ namespace dawn::native::opengl {
wgpu::TextureSampleType::Float; wgpu::TextureSampleType::Float;
} }
{ {
if (combined.useDummySampler) { if (combined.usePlaceholderSampler) {
mDummySamplerUnits.push_back(textureUnit); mPlaceholderSamplerUnits.push_back(textureUnit);
} else { } else {
const BindGroupLayoutBase* bgl = const BindGroupLayoutBase* bgl =
layout->GetBindGroupLayout(combined.samplerLocation.group); layout->GetBindGroupLayout(combined.samplerLocation.group);
@ -209,9 +209,9 @@ namespace dawn::native::opengl {
void PipelineGL::ApplyNow(const OpenGLFunctions& gl) { void PipelineGL::ApplyNow(const OpenGLFunctions& gl) {
gl.UseProgram(mProgram); gl.UseProgram(mProgram);
for (GLuint unit : mDummySamplerUnits) { for (GLuint unit : mPlaceholderSamplerUnits) {
ASSERT(mDummySampler.Get() != nullptr); ASSERT(mPlaceholderSampler.Get() != nullptr);
gl.BindSampler(unit, mDummySampler->GetNonFilteringHandle()); gl.BindSampler(unit, mPlaceholderSampler->GetNonFilteringHandle());
} }
} }

View File

@ -58,10 +58,10 @@ namespace dawn::native::opengl {
GLuint mProgram; GLuint mProgram;
std::vector<std::vector<SamplerUnit>> mUnitsForSamplers; std::vector<std::vector<SamplerUnit>> mUnitsForSamplers;
std::vector<std::vector<GLuint>> mUnitsForTextures; std::vector<std::vector<GLuint>> mUnitsForTextures;
std::vector<GLuint> mDummySamplerUnits; std::vector<GLuint> mPlaceholderSamplerUnits;
// TODO(enga): This could live on the Device, or elsewhere, but currently it makes Device // TODO(enga): This could live on the Device, or elsewhere, but currently it makes Device
// destruction complex as it requires the sampler to be destroyed before the sampler cache. // destruction complex as it requires the sampler to be destroyed before the sampler cache.
Ref<Sampler> mDummySampler; Ref<Sampler> mPlaceholderSampler;
}; };
} // namespace dawn::native::opengl } // namespace dawn::native::opengl

View File

@ -39,15 +39,15 @@ namespace dawn::native::opengl {
} }
bool operator<(const CombinedSampler& a, const CombinedSampler& b) { bool operator<(const CombinedSampler& a, const CombinedSampler& b) {
return std::tie(a.useDummySampler, a.samplerLocation, a.textureLocation) < return std::tie(a.usePlaceholderSampler, a.samplerLocation, a.textureLocation) <
std::tie(b.useDummySampler, a.samplerLocation, b.textureLocation); std::tie(b.usePlaceholderSampler, a.samplerLocation, b.textureLocation);
} }
std::string CombinedSampler::GetName() const { std::string CombinedSampler::GetName() const {
std::ostringstream o; std::ostringstream o;
o << "dawn_combined"; o << "dawn_combined";
if (useDummySampler) { if (usePlaceholderSampler) {
o << "_dummy_sampler"; o << "_placeholder_sampler";
} else { } else {
o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_" o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_"
<< static_cast<uint32_t>(samplerLocation.binding); << static_cast<uint32_t>(samplerLocation.binding);
@ -82,7 +82,7 @@ namespace dawn::native::opengl {
SingleShaderStage stage, SingleShaderStage stage,
CombinedSamplerInfo* combinedSamplers, CombinedSamplerInfo* combinedSamplers,
const PipelineLayout* layout, const PipelineLayout* layout,
bool* needsDummySampler) const { bool* needsPlaceholderSampler) const {
TRACE_EVENT0(GetDevice()->GetPlatform(), General, "TranslateToGLSL"); TRACE_EVENT0(GetDevice()->GetPlatform(), General, "TranslateToGLSL");
tint::transform::Manager transformManager; tint::transform::Manager transformManager;
tint::transform::DataMap transformInputs; tint::transform::DataMap transformInputs;
@ -111,7 +111,7 @@ namespace dawn::native::opengl {
// of the original texture and sampler, and generates a unique name. The // of the original texture and sampler, and generates a unique name. The
// corresponding uniforms will be retrieved by these generated names // corresponding uniforms will be retrieved by these generated names
// in PipelineGL. Any texture-only references will have // in PipelineGL. Any texture-only references will have
// "useDummySampler" set to true, and only the texture binding point // "usePlaceholderSampler" set to true, and only the texture binding point
// will be used in naming them. In addition, Dawn will bind a // will be used in naming them. In addition, Dawn will bind a
// non-filtering sampler for them (see PipelineGL). // non-filtering sampler for them (see PipelineGL).
auto uses = inspector.GetSamplerTextureUses(entryPointName, placeholderBindingPoint); auto uses = inspector.GetSamplerTextureUses(entryPointName, placeholderBindingPoint);
@ -120,10 +120,10 @@ namespace dawn::native::opengl {
CombinedSampler* info = &combinedSamplers->back(); CombinedSampler* info = &combinedSamplers->back();
if (use.sampler_binding_point == placeholderBindingPoint) { if (use.sampler_binding_point == placeholderBindingPoint) {
info->useDummySampler = true; info->usePlaceholderSampler = true;
*needsDummySampler = true; *needsPlaceholderSampler = true;
} else { } else {
info->useDummySampler = false; info->usePlaceholderSampler = false;
} }
info->samplerLocation.group = BindGroupIndex(use.sampler_binding_point.group); info->samplerLocation.group = BindGroupIndex(use.sampler_binding_point.group);
info->samplerLocation.binding = BindingNumber(use.sampler_binding_point.binding); info->samplerLocation.binding = BindingNumber(use.sampler_binding_point.binding);
@ -131,7 +131,7 @@ namespace dawn::native::opengl {
info->textureLocation.binding = BindingNumber(use.texture_binding_point.binding); info->textureLocation.binding = BindingNumber(use.texture_binding_point.binding);
tintOptions.binding_map[use] = info->GetName(); tintOptions.binding_map[use] = info->GetName();
} }
if (*needsDummySampler) { if (*needsPlaceholderSampler) {
tintOptions.placeholder_binding_point = placeholderBindingPoint; tintOptions.placeholder_binding_point = placeholderBindingPoint;
} }

View File

@ -36,8 +36,9 @@ namespace dawn::native::opengl {
BindingLocation samplerLocation; BindingLocation samplerLocation;
BindingLocation textureLocation; BindingLocation textureLocation;
// OpenGL requires a sampler with texelFetch. If this is true, the developer did not provide // OpenGL requires a sampler with texelFetch. If this is true, the developer did not provide
// one and Dawn should bind a dummy non-filtering sampler. |samplerLocation| is unused. // one and Dawn should bind a placeholder non-filtering sampler. |samplerLocation| is
bool useDummySampler; // unused.
bool usePlaceholderSampler;
std::string GetName() const; std::string GetName() const;
}; };
bool operator<(const CombinedSampler& a, const CombinedSampler& b); bool operator<(const CombinedSampler& a, const CombinedSampler& b);
@ -57,7 +58,7 @@ namespace dawn::native::opengl {
SingleShaderStage stage, SingleShaderStage stage,
CombinedSamplerInfo* combinedSamplers, CombinedSamplerInfo* combinedSamplers,
const PipelineLayout* layout, const PipelineLayout* layout,
bool* needsDummySampler) const; bool* needsPlaceholderSampler) const;
private: private:
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor); ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);

View File

@ -72,7 +72,7 @@ namespace dawn::native::vulkan {
template <typename VK_STRUCT_TYPE> template <typename VK_STRUCT_TYPE>
const VkBaseOutStructure* ToVkBaseOutStructure(const VK_STRUCT_TYPE* t) { const VkBaseOutStructure* ToVkBaseOutStructure(const VK_STRUCT_TYPE* t) {
// Sanity checks to ensure proper type safety. // Checks to ensure proper type safety.
static_assert( static_assert(
offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) && offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext), offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),

View File

@ -397,8 +397,8 @@ namespace dawn::native::vulkan {
inputAssembly.topology = VulkanPrimitiveTopology(GetPrimitiveTopology()); inputAssembly.topology = VulkanPrimitiveTopology(GetPrimitiveTopology());
inputAssembly.primitiveRestartEnable = ShouldEnablePrimitiveRestart(GetPrimitiveTopology()); inputAssembly.primitiveRestartEnable = ShouldEnablePrimitiveRestart(GetPrimitiveTopology());
// A dummy viewport/scissor info. The validation layers force use to provide at least one // A placeholder viewport/scissor info. The validation layers force use to provide at least
// scissor and one viewport here, even if we choose to make them dynamic. // one scissor and one viewport here, even if we choose to make them dynamic.
VkViewport viewportDesc; VkViewport viewportDesc;
viewportDesc.x = 0.0f; viewportDesc.x = 0.0f;
viewportDesc.y = 0.0f; viewportDesc.y = 0.0f;
@ -490,7 +490,7 @@ namespace dawn::native::vulkan {
colorBlend.logicOp = VK_LOGIC_OP_CLEAR; colorBlend.logicOp = VK_LOGIC_OP_CLEAR;
colorBlend.attachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne); colorBlend.attachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
colorBlend.pAttachments = colorBlendAttachments.data(); colorBlend.pAttachments = colorBlendAttachments.data();
// The blend constant is always dynamic so we fill in a dummy value // The blend constant is always dynamic so we fill in a placeholder value
colorBlend.blendConstants[0] = 0.0f; colorBlend.blendConstants[0] = 0.0f;
colorBlend.blendConstants[1] = 0.0f; colorBlend.blendConstants[1] = 0.0f;
colorBlend.blendConstants[2] = 0.0f; colorBlend.blendConstants[2] = 0.0f;

View File

@ -68,7 +68,7 @@ namespace dawn::native::vulkan {
// that is already initialized. // that is already initialized.
template <typename VK_STRUCT_TYPE> template <typename VK_STRUCT_TYPE>
void Add(VK_STRUCT_TYPE* vkStruct) { void Add(VK_STRUCT_TYPE* vkStruct) {
// Sanity checks to ensure proper type safety. // Checks to ensure proper type safety.
static_assert( static_assert(
offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) && offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext), offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),

View File

@ -101,7 +101,7 @@ Open or create the `.vscode/launch.json` file, and add:
"outFiles": [ "./**/*.js" ], "outFiles": [ "./**/*.js" ],
"args": [ "args": [
"-e", "require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/cmdline.ts');", "-e", "require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/cmdline.ts');",
"--", "dummy-arg", "--", "placeholder-arg",
"--gpu-provider", "--gpu-provider",
"[path-to-dawn.node]", // REPLACE: [path-to-dawn.node] "[path-to-dawn.node]", // REPLACE: [path-to-dawn.node]
"[test-query]", // REPLACE: [test-query] "[test-query]", // REPLACE: [test-query]
@ -127,7 +127,7 @@ cd <cts-root-dir>
[path-to-node] \ # for example <dawn-root-dir>/third_party/node/<arch>/node [path-to-node] \ # for example <dawn-root-dir>/third_party/node/<arch>/node
-e "require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/cmdline.ts');" \ -e "require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/cmdline.ts');" \
-- \ -- \
dummy-arg \ placeholder-arg \
--gpu-provider [path to dawn.node] \ --gpu-provider [path to dawn.node] \
[test-query] [test-query]
``` ```

View File

@ -457,8 +457,8 @@ func (r *runner) gatherTestCases(query string, verbose bool) error {
"--", // Start of arguments "--", // Start of arguments
// src/common/runtime/helper/sys.ts expects 'node file.js <args>' // src/common/runtime/helper/sys.ts expects 'node file.js <args>'
// and slices away the first two arguments. When running with '-e', args // and slices away the first two arguments. When running with '-e', args
// start at 1, so just inject a dummy argument. // start at 1, so just inject a placeholder argument.
"dummy-arg", "placeholder-arg",
"--list", "--list",
}, query) }, query)
@ -603,8 +603,8 @@ func (r *runner) runServer(id int, caseIndices <-chan int, results chan<- result
"--", "--",
// src/common/runtime/helper/sys.ts expects 'node file.js <args>' // src/common/runtime/helper/sys.ts expects 'node file.js <args>'
// and slices away the first two arguments. When running with '-e', args // and slices away the first two arguments. When running with '-e', args
// start at 1, so just inject a dummy argument. // start at 1, so just inject a placeholder argument.
"dummy-arg", "placeholder-arg",
// Actual arguments begin here // Actual arguments begin here
"--gpu-provider", r.dawnNode, "--gpu-provider", r.dawnNode,
} }
@ -940,8 +940,8 @@ func (r *runner) runTestcase(query string) result {
"--", "--",
// src/common/runtime/helper/sys.ts expects 'node file.js <args>' // src/common/runtime/helper/sys.ts expects 'node file.js <args>'
// and slices away the first two arguments. When running with '-e', args // and slices away the first two arguments. When running with '-e', args
// start at 1, so just inject a dummy argument. // start at 1, so just inject a placeholder argument.
"dummy-arg", "placeholder-arg",
// Actual arguments begin here // Actual arguments begin here
"--gpu-provider", r.dawnNode, "--gpu-provider", r.dawnNode,
"--verbose", "--verbose",

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
add_library(dawn_platform ${DAWN_DUMMY_FILE}) add_library(dawn_platform ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_platform) common_compile_options(dawn_platform)
target_compile_definitions(dawn_platform PRIVATE "DAWN_PLATFORM_IMPLEMENTATION") target_compile_definitions(dawn_platform PRIVATE "DAWN_PLATFORM_IMPLEMENTATION")

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
add_library(dawn_sample_utils STATIC ${DAWN_DUMMY_FILE}) add_library(dawn_sample_utils STATIC ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_sample_utils) common_compile_options(dawn_sample_utils)
target_sources(dawn_sample_utils PRIVATE target_sources(dawn_sample_utils PRIVATE
"SampleUtils.cpp" "SampleUtils.cpp"

View File

@ -679,7 +679,7 @@ using DawnTest = DawnTestWithParams<>;
// AdapterTestParam, and whose constructor looks like: // AdapterTestParam, and whose constructor looks like:
// Param(AdapterTestParam, ABorC, 12or3, ..., otherParams... ) // Param(AdapterTestParam, ABorC, 12or3, ..., otherParams... )
// You must also teach GTest how to print this struct. // You must also teach GTest how to print this struct.
// https://github.com/google/googletest/blob/master/docs/advanced.md#teaching-googletest-how-to-print-your-values // https://github.com/google/googletest/blob/main/docs/advanced.md#teaching-googletest-how-to-print-your-values
// Macro DAWN_TEST_PARAM_STRUCT can help generate this struct. // Macro DAWN_TEST_PARAM_STRUCT can help generate this struct.
#define DAWN_INSTANTIATE_TEST_P(testName, ...) \ #define DAWN_INSTANTIATE_TEST_P(testName, ...) \
INSTANTIATE_TEST_SUITE_P( \ INSTANTIATE_TEST_SUITE_P( \

View File

@ -434,9 +434,9 @@ TEST_P(MultipleWriteThenMultipleReadTests, SeparateBuffers) {
vbContents.pos[1] = vec4<f32>(1.0, 1.0, 0.0, 1.0); vbContents.pos[1] = vec4<f32>(1.0, 1.0, 0.0, 1.0);
vbContents.pos[2] = vec4<f32>(1.0, -1.0, 0.0, 1.0); vbContents.pos[2] = vec4<f32>(1.0, -1.0, 0.0, 1.0);
vbContents.pos[3] = vec4<f32>(-1.0, -1.0, 0.0, 1.0); vbContents.pos[3] = vec4<f32>(-1.0, -1.0, 0.0, 1.0);
let dummy : i32 = 0; let placeholder : i32 = 0;
ibContents.indices[0] = vec4<i32>(0, 1, 2, 0); ibContents.indices[0] = vec4<i32>(0, 1, 2, 0);
ibContents.indices[1] = vec4<i32>(2, 3, dummy, dummy); ibContents.indices[1] = vec4<i32>(2, 3, placeholder, placeholder);
uniformContents.color = 1.0; uniformContents.color = 1.0;
storageContents.color = 1.0; storageContents.color = 1.0;
})"); })");
@ -549,9 +549,9 @@ TEST_P(MultipleWriteThenMultipleReadTests, OneBuffer) {
contents.pos[1] = vec4<f32>(1.0, 1.0, 0.0, 1.0); contents.pos[1] = vec4<f32>(1.0, 1.0, 0.0, 1.0);
contents.pos[2] = vec4<f32>(1.0, -1.0, 0.0, 1.0); contents.pos[2] = vec4<f32>(1.0, -1.0, 0.0, 1.0);
contents.pos[3] = vec4<f32>(-1.0, -1.0, 0.0, 1.0); contents.pos[3] = vec4<f32>(-1.0, -1.0, 0.0, 1.0);
let dummy : i32 = 0; let placeholder : i32 = 0;
contents.indices[0] = vec4<i32>(0, 1, 2, 0); contents.indices[0] = vec4<i32>(0, 1, 2, 0);
contents.indices[1] = vec4<i32>(2, 3, dummy, dummy); contents.indices[1] = vec4<i32>(2, 3, placeholder, placeholder);
contents.color0 = 1.0; contents.color0 = 1.0;
contents.color1 = 1.0; contents.color1 = 1.0;
})"); })");

View File

@ -535,10 +535,11 @@ fn IsEqualTo(pixel : vec4<f32>, expected : vec4<f32>) -> bool {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::Texture dummyOutputTexture = CreateTexture( wgpu::Texture placeholderOutputTexture = CreateTexture(
kRenderAttachmentFormat, kRenderAttachmentFormat,
wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc, {1, 1}); wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc, {1, 1});
utils::ComboRenderPassDescriptor renderPassDescriptor({dummyOutputTexture.CreateView()}); utils::ComboRenderPassDescriptor renderPassDescriptor(
{placeholderOutputTexture.CreateView()});
wgpu::RenderPassEncoder renderPassEncoder = encoder.BeginRenderPass(&renderPassDescriptor); wgpu::RenderPassEncoder renderPassEncoder = encoder.BeginRenderPass(&renderPassDescriptor);
renderPassEncoder.SetBindGroup(0, bindGroup); renderPassEncoder.SetBindGroup(0, bindGroup);
renderPassEncoder.SetPipeline(pipeline); renderPassEncoder.SetPipeline(pipeline);

View File

@ -308,12 +308,12 @@ TEST_P(VertexOnlyRenderPipelineTest, MultiplePass) {
DAWN_INSTANTIATE_TEST(VertexOnlyRenderPipelineTest, DAWN_INSTANTIATE_TEST(VertexOnlyRenderPipelineTest,
D3D12Backend(), D3D12Backend(),
D3D12Backend({"use_dummy_fragment_in_vertex_only_pipeline"}), D3D12Backend({"use_placeholder_fragment_in_vertex_only_pipeline"}),
MetalBackend(), MetalBackend(),
MetalBackend({"use_dummy_fragment_in_vertex_only_pipeline"}), MetalBackend({"use_placeholder_fragment_in_vertex_only_pipeline"}),
OpenGLBackend(), OpenGLBackend(),
OpenGLBackend({"use_dummy_fragment_in_vertex_only_pipeline"}), OpenGLBackend({"use_placeholder_fragment_in_vertex_only_pipeline"}),
OpenGLESBackend(), OpenGLESBackend(),
OpenGLESBackend({"use_dummy_fragment_in_vertex_only_pipeline"}), OpenGLESBackend({"use_placeholder_fragment_in_vertex_only_pipeline"}),
VulkanBackend(), VulkanBackend(),
VulkanBackend({"use_dummy_fragment_in_vertex_only_pipeline"})); VulkanBackend({"use_placeholder_fragment_in_vertex_only_pipeline"}));

View File

@ -229,7 +229,7 @@ TEST_F(WindowSurfaceInstanceTests, InvalidMetalLayer) {
wgpu::SurfaceDescriptorFromMetalLayer chainedDescriptor; wgpu::SurfaceDescriptorFromMetalLayer chainedDescriptor;
// The CALayer is autoreleased. Releasing it causes a test failure when the Chromium GTest // The CALayer is autoreleased. Releasing it causes a test failure when the Chromium GTest
// autoreleasepool is emptied. // autoreleasepool is emptied.
chainedDescriptor.layer = utils::CreateDummyCALayer(); chainedDescriptor.layer = utils::CreatePlaceholderCALayer();
wgpu::SurfaceDescriptor descriptor; wgpu::SurfaceDescriptor descriptor;
descriptor.nextInChain = &chainedDescriptor; descriptor.nextInChain = &chainedDescriptor;

View File

@ -131,7 +131,7 @@ void DawnPerfTestEnvironment::SetUp() {
std::ofstream outFile; std::ofstream outFile;
outFile.open(mTraceFile); outFile.open(mTraceFile);
outFile << "{ \"traceEvents\": ["; outFile << "{ \"traceEvents\": [";
outFile << "{}"; // Dummy object so trace events can always prepend a comma outFile << "{}"; // Placeholder object so trace events can always prepend a comma
outFile.flush(); outFile.flush();
outFile.close(); outFile.close();
} }

View File

@ -23,7 +23,7 @@
using namespace dawn::native; using namespace dawn::native;
class DummyResourceHeapAllocator : public ResourceHeapAllocator { class PlaceholderResourceHeapAllocator : public ResourceHeapAllocator {
public: public:
ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override { ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override {
return std::make_unique<ResourceHeapBase>(); return std::make_unique<ResourceHeapBase>();
@ -32,15 +32,15 @@ class DummyResourceHeapAllocator : public ResourceHeapAllocator {
} }
}; };
class DummyBuddyResourceAllocator { class PlaceholderBuddyResourceAllocator {
public: public:
DummyBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize) PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize)
: mAllocator(maxBlockSize, memorySize, &mHeapAllocator) { : mAllocator(maxBlockSize, memorySize, &mHeapAllocator) {
} }
DummyBuddyResourceAllocator(uint64_t maxBlockSize, PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize,
uint64_t memorySize, uint64_t memorySize,
ResourceHeapAllocator* heapAllocator) ResourceHeapAllocator* heapAllocator)
: mAllocator(maxBlockSize, memorySize, heapAllocator) { : mAllocator(maxBlockSize, memorySize, heapAllocator) {
} }
@ -59,7 +59,7 @@ class DummyBuddyResourceAllocator {
} }
private: private:
DummyResourceHeapAllocator mHeapAllocator; PlaceholderResourceHeapAllocator mHeapAllocator;
BuddyMemoryAllocator mAllocator; BuddyMemoryAllocator mAllocator;
}; };
@ -73,7 +73,7 @@ TEST(BuddyMemoryAllocatorTests, SingleHeap) {
// //
constexpr uint64_t heapSize = 128; constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = heapSize; constexpr uint64_t maxBlockSize = heapSize;
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize); PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
// Cannot allocate greater than heap size. // Cannot allocate greater than heap size.
ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2); ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
@ -106,7 +106,7 @@ TEST(BuddyMemoryAllocatorTests, MultipleHeaps) {
// //
constexpr uint64_t maxBlockSize = 256; constexpr uint64_t maxBlockSize = 256;
constexpr uint64_t heapSize = 128; constexpr uint64_t heapSize = 128;
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize); PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
// Cannot allocate greater than heap size. // Cannot allocate greater than heap size.
ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2); ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
@ -154,7 +154,7 @@ TEST(BuddyMemoryAllocatorTests, MultipleSplitHeaps) {
// //
constexpr uint64_t maxBlockSize = 256; constexpr uint64_t maxBlockSize = 256;
constexpr uint64_t heapSize = 128; constexpr uint64_t heapSize = 128;
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize); PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
// Allocate two 64 byte sub-allocations. // Allocate two 64 byte sub-allocations.
ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize / 2); ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize / 2);
@ -208,7 +208,7 @@ TEST(BuddyMemoryAllocatorTests, MultiplSplitHeapsVariableSizes) {
// //
constexpr uint64_t heapSize = 128; constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = 512; constexpr uint64_t maxBlockSize = 512;
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize); PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
// Allocate two 64-byte allocations. // Allocate two 64-byte allocations.
ResourceMemoryAllocation allocation1 = allocator.Allocate(64); ResourceMemoryAllocation allocation1 = allocator.Allocate(64);
@ -284,7 +284,7 @@ TEST(BuddyMemoryAllocatorTests, SameSizeVariousAlignment) {
// //
constexpr uint64_t heapSize = 128; constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = 512; constexpr uint64_t maxBlockSize = 512;
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize); PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
ResourceMemoryAllocation allocation1 = allocator.Allocate(64, 128); ResourceMemoryAllocation allocation1 = allocator.Allocate(64, 128);
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u); ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
@ -334,7 +334,7 @@ TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) {
// //
constexpr uint64_t heapSize = 128; constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = 512; constexpr uint64_t maxBlockSize = 512;
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize); PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
constexpr uint64_t alignment = 64; constexpr uint64_t alignment = 64;
@ -373,7 +373,7 @@ TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) {
TEST(BuddyMemoryAllocatorTests, AllocationOverflow) { TEST(BuddyMemoryAllocatorTests, AllocationOverflow) {
constexpr uint64_t heapSize = 128; constexpr uint64_t heapSize = 128;
constexpr uint64_t maxBlockSize = 512; constexpr uint64_t maxBlockSize = 512;
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize); PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
constexpr uint64_t largeBlock = (1ull << 63) + 1; constexpr uint64_t largeBlock = (1ull << 63) + 1;
ResourceMemoryAllocation invalidAllocation = allocator.Allocate(largeBlock); ResourceMemoryAllocation invalidAllocation = allocator.Allocate(largeBlock);
@ -385,9 +385,9 @@ TEST(BuddyMemoryAllocatorTests, ReuseFreedHeaps) {
constexpr uint64_t kHeapSize = 128; constexpr uint64_t kHeapSize = 128;
constexpr uint64_t kMaxBlockSize = 4096; constexpr uint64_t kMaxBlockSize = 4096;
DummyResourceHeapAllocator heapAllocator; PlaceholderResourceHeapAllocator heapAllocator;
PooledResourceMemoryAllocator poolAllocator(&heapAllocator); PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
DummyBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator); PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
std::set<ResourceHeapBase*> heaps = {}; std::set<ResourceHeapBase*> heaps = {};
std::vector<ResourceMemoryAllocation> allocations = {}; std::vector<ResourceMemoryAllocation> allocations = {};
@ -426,9 +426,9 @@ TEST(BuddyMemoryAllocatorTests, DestroyHeaps) {
constexpr uint64_t kHeapSize = 128; constexpr uint64_t kHeapSize = 128;
constexpr uint64_t kMaxBlockSize = 4096; constexpr uint64_t kMaxBlockSize = 4096;
DummyResourceHeapAllocator heapAllocator; PlaceholderResourceHeapAllocator heapAllocator;
PooledResourceMemoryAllocator poolAllocator(&heapAllocator); PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
DummyBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator); PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
std::set<ResourceHeapBase*> heaps = {}; std::set<ResourceHeapBase*> heaps = {};
std::vector<ResourceMemoryAllocation> allocations = {}; std::vector<ResourceMemoryAllocation> allocations = {};

View File

@ -368,7 +368,7 @@ TEST(CommandAllocator, EmptyIterator) {
template <size_t A> template <size_t A>
struct alignas(A) AlignedStruct { struct alignas(A) AlignedStruct {
char dummy; char placeholder;
}; };
// Test for overflows in Allocate's computations, size 1 variant // Test for overflows in Allocate's computations, size 1 variant

View File

@ -21,8 +21,8 @@ using namespace dawn::native;
namespace { namespace {
int dummySuccess = 0xbeef; int placeholderSuccess = 0xbeef;
const char* dummyErrorMessage = "I am an error message :3"; const char* placeholderErrorMessage = "I am an error message :3";
// Check returning a success MaybeError with {}; // Check returning a success MaybeError with {};
TEST(ErrorTests, Error_Success) { TEST(ErrorTests, Error_Success) {
@ -34,35 +34,37 @@ namespace {
// Check returning an error MaybeError with "return DAWN_VALIDATION_ERROR" // Check returning an error MaybeError with "return DAWN_VALIDATION_ERROR"
TEST(ErrorTests, Error_Error) { TEST(ErrorTests, Error_Error) {
auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); }; auto ReturnError = []() -> MaybeError {
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
};
MaybeError result = ReturnError(); MaybeError result = ReturnError();
ASSERT_TRUE(result.IsError()); ASSERT_TRUE(result.IsError());
std::unique_ptr<ErrorData> errorData = result.AcquireError(); std::unique_ptr<ErrorData> errorData = result.AcquireError();
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage); ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
} }
// Check returning a success ResultOrError with an implicit conversion // Check returning a success ResultOrError with an implicit conversion
TEST(ErrorTests, ResultOrError_Success) { TEST(ErrorTests, ResultOrError_Success) {
auto ReturnSuccess = []() -> ResultOrError<int*> { return &dummySuccess; }; auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
ResultOrError<int*> result = ReturnSuccess(); ResultOrError<int*> result = ReturnSuccess();
ASSERT_TRUE(result.IsSuccess()); ASSERT_TRUE(result.IsSuccess());
ASSERT_EQ(result.AcquireSuccess(), &dummySuccess); ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
} }
// Check returning an error ResultOrError with "return DAWN_VALIDATION_ERROR" // Check returning an error ResultOrError with "return DAWN_VALIDATION_ERROR"
TEST(ErrorTests, ResultOrError_Error) { TEST(ErrorTests, ResultOrError_Error) {
auto ReturnError = []() -> ResultOrError<int*> { auto ReturnError = []() -> ResultOrError<int*> {
return DAWN_VALIDATION_ERROR(dummyErrorMessage); return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
}; };
ResultOrError<int*> result = ReturnError(); ResultOrError<int*> result = ReturnError();
ASSERT_TRUE(result.IsError()); ASSERT_TRUE(result.IsError());
std::unique_ptr<ErrorData> errorData = result.AcquireError(); std::unique_ptr<ErrorData> errorData = result.AcquireError();
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage); ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
} }
// Check DAWN_TRY handles successes correctly. // Check DAWN_TRY handles successes correctly.
@ -85,7 +87,9 @@ namespace {
// Check DAWN_TRY handles errors correctly. // Check DAWN_TRY handles errors correctly.
TEST(ErrorTests, TRY_Error) { TEST(ErrorTests, TRY_Error) {
auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); }; auto ReturnError = []() -> MaybeError {
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
};
auto Try = [ReturnError]() -> MaybeError { auto Try = [ReturnError]() -> MaybeError {
DAWN_TRY(ReturnError()); DAWN_TRY(ReturnError());
@ -98,12 +102,14 @@ namespace {
ASSERT_TRUE(result.IsError()); ASSERT_TRUE(result.IsError());
std::unique_ptr<ErrorData> errorData = result.AcquireError(); std::unique_ptr<ErrorData> errorData = result.AcquireError();
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage); ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
} }
// Check DAWN_TRY adds to the backtrace. // Check DAWN_TRY adds to the backtrace.
TEST(ErrorTests, TRY_AddsToBacktrace) { TEST(ErrorTests, TRY_AddsToBacktrace) {
auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); }; auto ReturnError = []() -> MaybeError {
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
};
auto SingleTry = [ReturnError]() -> MaybeError { auto SingleTry = [ReturnError]() -> MaybeError {
DAWN_TRY(ReturnError()); DAWN_TRY(ReturnError());
@ -129,7 +135,7 @@ namespace {
// Check DAWN_TRY_ASSIGN handles successes correctly. // Check DAWN_TRY_ASSIGN handles successes correctly.
TEST(ErrorTests, TRY_RESULT_Success) { TEST(ErrorTests, TRY_RESULT_Success) {
auto ReturnSuccess = []() -> ResultOrError<int*> { return &dummySuccess; }; auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
// We need to check that DAWN_TRY doesn't return on successes // We need to check that DAWN_TRY doesn't return on successes
bool tryReturned = true; bool tryReturned = true;
@ -139,20 +145,20 @@ namespace {
DAWN_TRY_ASSIGN(result, ReturnSuccess()); DAWN_TRY_ASSIGN(result, ReturnSuccess());
tryReturned = false; tryReturned = false;
EXPECT_EQ(result, &dummySuccess); EXPECT_EQ(result, &placeholderSuccess);
return result; return result;
}; };
ResultOrError<int*> result = Try(); ResultOrError<int*> result = Try();
ASSERT_TRUE(result.IsSuccess()); ASSERT_TRUE(result.IsSuccess());
ASSERT_FALSE(tryReturned); ASSERT_FALSE(tryReturned);
ASSERT_EQ(result.AcquireSuccess(), &dummySuccess); ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
} }
// Check DAWN_TRY_ASSIGN handles errors correctly. // Check DAWN_TRY_ASSIGN handles errors correctly.
TEST(ErrorTests, TRY_RESULT_Error) { TEST(ErrorTests, TRY_RESULT_Error) {
auto ReturnError = []() -> ResultOrError<int*> { auto ReturnError = []() -> ResultOrError<int*> {
return DAWN_VALIDATION_ERROR(dummyErrorMessage); return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
}; };
auto Try = [ReturnError]() -> ResultOrError<int*> { auto Try = [ReturnError]() -> ResultOrError<int*> {
@ -162,30 +168,30 @@ namespace {
// DAWN_TRY should return before this point // DAWN_TRY should return before this point
EXPECT_FALSE(true); EXPECT_FALSE(true);
return &dummySuccess; return &placeholderSuccess;
}; };
ResultOrError<int*> result = Try(); ResultOrError<int*> result = Try();
ASSERT_TRUE(result.IsError()); ASSERT_TRUE(result.IsError());
std::unique_ptr<ErrorData> errorData = result.AcquireError(); std::unique_ptr<ErrorData> errorData = result.AcquireError();
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage); ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
} }
// Check DAWN_TRY_ASSIGN adds to the backtrace. // Check DAWN_TRY_ASSIGN adds to the backtrace.
TEST(ErrorTests, TRY_RESULT_AddsToBacktrace) { TEST(ErrorTests, TRY_RESULT_AddsToBacktrace) {
auto ReturnError = []() -> ResultOrError<int*> { auto ReturnError = []() -> ResultOrError<int*> {
return DAWN_VALIDATION_ERROR(dummyErrorMessage); return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
}; };
auto SingleTry = [ReturnError]() -> ResultOrError<int*> { auto SingleTry = [ReturnError]() -> ResultOrError<int*> {
DAWN_TRY(ReturnError()); DAWN_TRY(ReturnError());
return &dummySuccess; return &placeholderSuccess;
}; };
auto DoubleTry = [SingleTry]() -> ResultOrError<int*> { auto DoubleTry = [SingleTry]() -> ResultOrError<int*> {
DAWN_TRY(SingleTry()); DAWN_TRY(SingleTry());
return &dummySuccess; return &placeholderSuccess;
}; };
ResultOrError<int*> singleResult = SingleTry(); ResultOrError<int*> singleResult = SingleTry();
@ -203,7 +209,7 @@ namespace {
// Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error // Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
TEST(ErrorTests, TRY_RESULT_ConversionToError) { TEST(ErrorTests, TRY_RESULT_ConversionToError) {
auto ReturnError = []() -> ResultOrError<int*> { auto ReturnError = []() -> ResultOrError<int*> {
return DAWN_VALIDATION_ERROR(dummyErrorMessage); return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
}; };
auto Try = [ReturnError]() -> MaybeError { auto Try = [ReturnError]() -> MaybeError {
@ -218,14 +224,14 @@ namespace {
ASSERT_TRUE(result.IsError()); ASSERT_TRUE(result.IsError());
std::unique_ptr<ErrorData> errorData = result.AcquireError(); std::unique_ptr<ErrorData> errorData = result.AcquireError();
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage); ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
} }
// Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error // Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
// Version without Result<E*, T*> // Version without Result<E*, T*>
TEST(ErrorTests, TRY_RESULT_ConversionToErrorNonPointer) { TEST(ErrorTests, TRY_RESULT_ConversionToErrorNonPointer) {
auto ReturnError = []() -> ResultOrError<int> { auto ReturnError = []() -> ResultOrError<int> {
return DAWN_VALIDATION_ERROR(dummyErrorMessage); return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
}; };
auto Try = [ReturnError]() -> MaybeError { auto Try = [ReturnError]() -> MaybeError {
@ -240,12 +246,12 @@ namespace {
ASSERT_TRUE(result.IsError()); ASSERT_TRUE(result.IsError());
std::unique_ptr<ErrorData> errorData = result.AcquireError(); std::unique_ptr<ErrorData> errorData = result.AcquireError();
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage); ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
} }
// Check DAWN_TRY_ASSIGN handles successes correctly. // Check DAWN_TRY_ASSIGN handles successes correctly.
TEST(ErrorTests, TRY_RESULT_CLEANUP_Success) { TEST(ErrorTests, TRY_RESULT_CLEANUP_Success) {
auto ReturnSuccess = []() -> ResultOrError<int*> { return &dummySuccess; }; auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
// We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP doesn't return on successes and the // We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP doesn't return on successes and the
// cleanup is not called. // cleanup is not called.
@ -257,7 +263,7 @@ namespace {
DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnSuccess(), { tryCleanup = true; }); DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnSuccess(), { tryCleanup = true; });
tryReturned = false; tryReturned = false;
EXPECT_EQ(result, &dummySuccess); EXPECT_EQ(result, &placeholderSuccess);
return result; return result;
}; };
@ -265,13 +271,13 @@ namespace {
ASSERT_TRUE(result.IsSuccess()); ASSERT_TRUE(result.IsSuccess());
ASSERT_FALSE(tryReturned); ASSERT_FALSE(tryReturned);
ASSERT_FALSE(tryCleanup); ASSERT_FALSE(tryCleanup);
ASSERT_EQ(result.AcquireSuccess(), &dummySuccess); ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
} }
// Check DAWN_TRY_ASSIGN handles cleanups. // Check DAWN_TRY_ASSIGN handles cleanups.
TEST(ErrorTests, TRY_RESULT_CLEANUP_Cleanup) { TEST(ErrorTests, TRY_RESULT_CLEANUP_Cleanup) {
auto ReturnError = []() -> ResultOrError<int*> { auto ReturnError = []() -> ResultOrError<int*> {
return DAWN_VALIDATION_ERROR(dummyErrorMessage); return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
}; };
// We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP calls cleanup when error. // We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP calls cleanup when error.
@ -284,21 +290,21 @@ namespace {
// DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point // DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point
EXPECT_FALSE(true); EXPECT_FALSE(true);
return &dummySuccess; return &placeholderSuccess;
}; };
ResultOrError<int*> result = Try(); ResultOrError<int*> result = Try();
ASSERT_TRUE(result.IsError()); ASSERT_TRUE(result.IsError());
std::unique_ptr<ErrorData> errorData = result.AcquireError(); std::unique_ptr<ErrorData> errorData = result.AcquireError();
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage); ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
ASSERT_TRUE(tryCleanup); ASSERT_TRUE(tryCleanup);
} }
// Check DAWN_TRY_ASSIGN can override return value when needed. // Check DAWN_TRY_ASSIGN can override return value when needed.
TEST(ErrorTests, TRY_RESULT_CLEANUP_OverrideReturn) { TEST(ErrorTests, TRY_RESULT_CLEANUP_OverrideReturn) {
auto ReturnError = []() -> ResultOrError<int*> { auto ReturnError = []() -> ResultOrError<int*> {
return DAWN_VALIDATION_ERROR(dummyErrorMessage); return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
}; };
auto Try = [ReturnError]() -> bool { auto Try = [ReturnError]() -> bool {
@ -318,24 +324,28 @@ namespace {
// Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError // Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
// Check DAWN_TRY handles errors correctly. // Check DAWN_TRY handles errors correctly.
TEST(ErrorTests, TRY_ConversionToErrorOrResult) { TEST(ErrorTests, TRY_ConversionToErrorOrResult) {
auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); }; auto ReturnError = []() -> MaybeError {
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
};
auto Try = [ReturnError]() -> ResultOrError<int*> { auto Try = [ReturnError]() -> ResultOrError<int*> {
DAWN_TRY(ReturnError()); DAWN_TRY(ReturnError());
return &dummySuccess; return &placeholderSuccess;
}; };
ResultOrError<int*> result = Try(); ResultOrError<int*> result = Try();
ASSERT_TRUE(result.IsError()); ASSERT_TRUE(result.IsError());
std::unique_ptr<ErrorData> errorData = result.AcquireError(); std::unique_ptr<ErrorData> errorData = result.AcquireError();
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage); ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
} }
// Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError // Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
// Check DAWN_TRY handles errors correctly. Version without Result<E*, T*> // Check DAWN_TRY handles errors correctly. Version without Result<E*, T*>
TEST(ErrorTests, TRY_ConversionToErrorOrResultNonPointer) { TEST(ErrorTests, TRY_ConversionToErrorOrResultNonPointer) {
auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); }; auto ReturnError = []() -> MaybeError {
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
};
auto Try = [ReturnError]() -> ResultOrError<int> { auto Try = [ReturnError]() -> ResultOrError<int> {
DAWN_TRY(ReturnError()); DAWN_TRY(ReturnError());
@ -346,7 +356,7 @@ namespace {
ASSERT_TRUE(result.IsError()); ASSERT_TRUE(result.IsError());
std::unique_ptr<ErrorData> errorData = result.AcquireError(); std::unique_ptr<ErrorData> errorData = result.AcquireError();
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage); ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
} }
} // anonymous namespace } // anonymous namespace

View File

@ -36,10 +36,10 @@ class PerThreadProcTests : public testing::Test {
dawn::native::null::Adapter mNativeAdapter; dawn::native::null::Adapter mNativeAdapter;
}; };
// Test that procs can be set per thread. This test overrides deviceCreateBuffer with a dummy proc // Test that procs can be set per thread. This test overrides deviceCreateBuffer with a placeholder
// for each thread that increments a counter. Because each thread has their own proc and counter, // proc for each thread that increments a counter. Because each thread has their own proc and
// there should be no data races. The per-thread procs also check that the current thread id is // counter, there should be no data races. The per-thread procs also check that the current thread
// exactly equal to the expected thread id. // id is exactly equal to the expected thread id.
TEST_F(PerThreadProcTests, DispatchesPerThread) { TEST_F(PerThreadProcTests, DispatchesPerThread) {
dawnProcSetProcs(&dawnThreadDispatchProcTable); dawnProcSetProcs(&dawnThreadDispatchProcTable);

View File

@ -42,9 +42,9 @@ namespace {
EXPECT_FALSE(result->IsSuccess()); EXPECT_FALSE(result->IsSuccess());
} }
static int dummyError = 0xbeef; static int placeholderError = 0xbeef;
static float dummySuccess = 42.0f; static float placeholderSuccess = 42.0f;
static const float dummyConstSuccess = 42.0f; static const float placeholderConstSuccess = 42.0f;
class AClass : public RefCounted { class AClass : public RefCounted {
public: public:
@ -52,9 +52,9 @@ namespace {
}; };
// Tests using the following overload of TestSuccess make // Tests using the following overload of TestSuccess make
// local Ref instances to dummySuccessObj. Tests should // local Ref instances to placeholderSuccessObj. Tests should
// ensure any local Ref objects made along the way continue // ensure any local Ref objects made along the way continue
// to point to dummySuccessObj. // to point to placeholderSuccessObj.
template <typename T, typename E> template <typename T, typename E>
void TestSuccess(Result<Ref<T>, E>* result, T* expectedSuccess) { void TestSuccess(Result<Ref<T>, E>* result, T* expectedSuccess) {
EXPECT_FALSE(result->IsError()); EXPECT_FALSE(result->IsError());
@ -83,25 +83,25 @@ namespace {
// Test constructing an error Result<void, E> // Test constructing an error Result<void, E>
TEST(ResultOnlyPointerError, ConstructingError) { TEST(ResultOnlyPointerError, ConstructingError) {
Result<void, int> result(std::make_unique<int>(dummyError)); Result<void, int> result(std::make_unique<int>(placeholderError));
TestError(&result, dummyError); TestError(&result, placeholderError);
} }
// Test moving an error Result<void, E> // Test moving an error Result<void, E>
TEST(ResultOnlyPointerError, MovingError) { TEST(ResultOnlyPointerError, MovingError) {
Result<void, int> result(std::make_unique<int>(dummyError)); Result<void, int> result(std::make_unique<int>(placeholderError));
Result<void, int> movedResult(std::move(result)); Result<void, int> movedResult(std::move(result));
TestError(&movedResult, dummyError); TestError(&movedResult, placeholderError);
} }
// Test returning an error Result<void, E> // Test returning an error Result<void, E>
TEST(ResultOnlyPointerError, ReturningError) { TEST(ResultOnlyPointerError, ReturningError) {
auto CreateError = []() -> Result<void, int> { auto CreateError = []() -> Result<void, int> {
return {std::make_unique<int>(dummyError)}; return {std::make_unique<int>(placeholderError)};
}; };
Result<void, int> result = CreateError(); Result<void, int> result = CreateError();
TestError(&result, dummyError); TestError(&result, placeholderError);
} }
// Test constructing a success Result<void, E> // Test constructing a success Result<void, E>
@ -132,46 +132,46 @@ namespace {
// Test constructing an error Result<T*, E> // Test constructing an error Result<T*, E>
TEST(ResultBothPointer, ConstructingError) { TEST(ResultBothPointer, ConstructingError) {
Result<float*, int> result(std::make_unique<int>(dummyError)); Result<float*, int> result(std::make_unique<int>(placeholderError));
TestError(&result, dummyError); TestError(&result, placeholderError);
} }
// Test moving an error Result<T*, E> // Test moving an error Result<T*, E>
TEST(ResultBothPointer, MovingError) { TEST(ResultBothPointer, MovingError) {
Result<float*, int> result(std::make_unique<int>(dummyError)); Result<float*, int> result(std::make_unique<int>(placeholderError));
Result<float*, int> movedResult(std::move(result)); Result<float*, int> movedResult(std::move(result));
TestError(&movedResult, dummyError); TestError(&movedResult, placeholderError);
} }
// Test returning an error Result<T*, E> // Test returning an error Result<T*, E>
TEST(ResultBothPointer, ReturningError) { TEST(ResultBothPointer, ReturningError) {
auto CreateError = []() -> Result<float*, int> { auto CreateError = []() -> Result<float*, int> {
return {std::make_unique<int>(dummyError)}; return {std::make_unique<int>(placeholderError)};
}; };
Result<float*, int> result = CreateError(); Result<float*, int> result = CreateError();
TestError(&result, dummyError); TestError(&result, placeholderError);
} }
// Test constructing a success Result<T*, E> // Test constructing a success Result<T*, E>
TEST(ResultBothPointer, ConstructingSuccess) { TEST(ResultBothPointer, ConstructingSuccess) {
Result<float*, int> result(&dummySuccess); Result<float*, int> result(&placeholderSuccess);
TestSuccess(&result, &dummySuccess); TestSuccess(&result, &placeholderSuccess);
} }
// Test moving a success Result<T*, E> // Test moving a success Result<T*, E>
TEST(ResultBothPointer, MovingSuccess) { TEST(ResultBothPointer, MovingSuccess) {
Result<float*, int> result(&dummySuccess); Result<float*, int> result(&placeholderSuccess);
Result<float*, int> movedResult(std::move(result)); Result<float*, int> movedResult(std::move(result));
TestSuccess(&movedResult, &dummySuccess); TestSuccess(&movedResult, &placeholderSuccess);
} }
// Test returning a success Result<T*, E> // Test returning a success Result<T*, E>
TEST(ResultBothPointer, ReturningSuccess) { TEST(ResultBothPointer, ReturningSuccess) {
auto CreateSuccess = []() -> Result<float*, int*> { return {&dummySuccess}; }; auto CreateSuccess = []() -> Result<float*, int*> { return {&placeholderSuccess}; };
Result<float*, int*> result = CreateSuccess(); Result<float*, int*> result = CreateSuccess();
TestSuccess(&result, &dummySuccess); TestSuccess(&result, &placeholderSuccess);
} }
// Tests converting from a Result<TChild*, E> // Tests converting from a Result<TChild*, E>
@ -203,71 +203,73 @@ namespace {
// Test constructing an error Result<const T*, E> // Test constructing an error Result<const T*, E>
TEST(ResultBothPointerWithConstResult, ConstructingError) { TEST(ResultBothPointerWithConstResult, ConstructingError) {
Result<const float*, int> result(std::make_unique<int>(dummyError)); Result<const float*, int> result(std::make_unique<int>(placeholderError));
TestError(&result, dummyError); TestError(&result, placeholderError);
} }
// Test moving an error Result<const T*, E> // Test moving an error Result<const T*, E>
TEST(ResultBothPointerWithConstResult, MovingError) { TEST(ResultBothPointerWithConstResult, MovingError) {
Result<const float*, int> result(std::make_unique<int>(dummyError)); Result<const float*, int> result(std::make_unique<int>(placeholderError));
Result<const float*, int> movedResult(std::move(result)); Result<const float*, int> movedResult(std::move(result));
TestError(&movedResult, dummyError); TestError(&movedResult, placeholderError);
} }
// Test returning an error Result<const T*, E*> // Test returning an error Result<const T*, E*>
TEST(ResultBothPointerWithConstResult, ReturningError) { TEST(ResultBothPointerWithConstResult, ReturningError) {
auto CreateError = []() -> Result<const float*, int> { auto CreateError = []() -> Result<const float*, int> {
return {std::make_unique<int>(dummyError)}; return {std::make_unique<int>(placeholderError)};
}; };
Result<const float*, int> result = CreateError(); Result<const float*, int> result = CreateError();
TestError(&result, dummyError); TestError(&result, placeholderError);
} }
// Test constructing a success Result<const T*, E*> // Test constructing a success Result<const T*, E*>
TEST(ResultBothPointerWithConstResult, ConstructingSuccess) { TEST(ResultBothPointerWithConstResult, ConstructingSuccess) {
Result<const float*, int> result(&dummyConstSuccess); Result<const float*, int> result(&placeholderConstSuccess);
TestSuccess(&result, &dummyConstSuccess); TestSuccess(&result, &placeholderConstSuccess);
} }
// Test moving a success Result<const T*, E*> // Test moving a success Result<const T*, E*>
TEST(ResultBothPointerWithConstResult, MovingSuccess) { TEST(ResultBothPointerWithConstResult, MovingSuccess) {
Result<const float*, int> result(&dummyConstSuccess); Result<const float*, int> result(&placeholderConstSuccess);
Result<const float*, int> movedResult(std::move(result)); Result<const float*, int> movedResult(std::move(result));
TestSuccess(&movedResult, &dummyConstSuccess); TestSuccess(&movedResult, &placeholderConstSuccess);
} }
// Test returning a success Result<const T*, E*> // Test returning a success Result<const T*, E*>
TEST(ResultBothPointerWithConstResult, ReturningSuccess) { TEST(ResultBothPointerWithConstResult, ReturningSuccess) {
auto CreateSuccess = []() -> Result<const float*, int> { return {&dummyConstSuccess}; }; auto CreateSuccess = []() -> Result<const float*, int> {
return {&placeholderConstSuccess};
};
Result<const float*, int> result = CreateSuccess(); Result<const float*, int> result = CreateSuccess();
TestSuccess(&result, &dummyConstSuccess); TestSuccess(&result, &placeholderConstSuccess);
} }
// Result<Ref<T>, E> // Result<Ref<T>, E>
// Test constructing an error Result<Ref<T>, E> // Test constructing an error Result<Ref<T>, E>
TEST(ResultRefT, ConstructingError) { TEST(ResultRefT, ConstructingError) {
Result<Ref<AClass>, int> result(std::make_unique<int>(dummyError)); Result<Ref<AClass>, int> result(std::make_unique<int>(placeholderError));
TestError(&result, dummyError); TestError(&result, placeholderError);
} }
// Test moving an error Result<Ref<T>, E> // Test moving an error Result<Ref<T>, E>
TEST(ResultRefT, MovingError) { TEST(ResultRefT, MovingError) {
Result<Ref<AClass>, int> result(std::make_unique<int>(dummyError)); Result<Ref<AClass>, int> result(std::make_unique<int>(placeholderError));
Result<Ref<AClass>, int> movedResult(std::move(result)); Result<Ref<AClass>, int> movedResult(std::move(result));
TestError(&movedResult, dummyError); TestError(&movedResult, placeholderError);
} }
// Test returning an error Result<Ref<T>, E> // Test returning an error Result<Ref<T>, E>
TEST(ResultRefT, ReturningError) { TEST(ResultRefT, ReturningError) {
auto CreateError = []() -> Result<Ref<AClass>, int> { auto CreateError = []() -> Result<Ref<AClass>, int> {
return {std::make_unique<int>(dummyError)}; return {std::make_unique<int>(placeholderError)};
}; };
Result<Ref<AClass>, int> result = CreateError(); Result<Ref<AClass>, int> result = CreateError();
TestError(&result, dummyError); TestError(&result, placeholderError);
} }
// Test constructing a success Result<Ref<T>, E> // Test constructing a success Result<Ref<T>, E>
@ -340,25 +342,25 @@ namespace {
// Test constructing an error Result<T, E> // Test constructing an error Result<T, E>
TEST(ResultGeneric, ConstructingError) { TEST(ResultGeneric, ConstructingError) {
Result<std::vector<float>, int> result(std::make_unique<int>(dummyError)); Result<std::vector<float>, int> result(std::make_unique<int>(placeholderError));
TestError(&result, dummyError); TestError(&result, placeholderError);
} }
// Test moving an error Result<T, E> // Test moving an error Result<T, E>
TEST(ResultGeneric, MovingError) { TEST(ResultGeneric, MovingError) {
Result<std::vector<float>, int> result(std::make_unique<int>(dummyError)); Result<std::vector<float>, int> result(std::make_unique<int>(placeholderError));
Result<std::vector<float>, int> movedResult(std::move(result)); Result<std::vector<float>, int> movedResult(std::move(result));
TestError(&movedResult, dummyError); TestError(&movedResult, placeholderError);
} }
// Test returning an error Result<T, E> // Test returning an error Result<T, E>
TEST(ResultGeneric, ReturningError) { TEST(ResultGeneric, ReturningError) {
auto CreateError = []() -> Result<std::vector<float>, int> { auto CreateError = []() -> Result<std::vector<float>, int> {
return {std::make_unique<int>(dummyError)}; return {std::make_unique<int>(placeholderError)};
}; };
Result<std::vector<float>, int> result = CreateError(); Result<std::vector<float>, int> result = CreateError();
TestError(&result, dummyError); TestError(&result, placeholderError);
} }
// Test constructing a success Result<T, E> // Test constructing a success Result<T, E>

View File

@ -14,14 +14,14 @@
namespace { namespace {
class Dummy : public RefCounted { class Placeholder : public RefCounted {
public: public:
explicit Dummy(int* alive) : mAlive(alive) { explicit Placeholder(int* alive) : mAlive(alive) {
++*mAlive; ++*mAlive;
} }
private: private:
~Dummy() { ~Placeholder() {
--*mAlive; --*mAlive;
} }
@ -74,22 +74,22 @@ TEST(StackContainer, Vector) {
TEST(StackContainer, VectorDoubleDelete) { TEST(StackContainer, VectorDoubleDelete) {
// Regression testing for double-delete. // Regression testing for double-delete.
typedef StackVector<Ref<Dummy>, 2> Vector; typedef StackVector<Ref<Placeholder>, 2> Vector;
Vector vect; Vector vect;
int alive = 0; int alive = 0;
Ref<Dummy> dummy = AcquireRef(new Dummy(&alive)); Ref<Placeholder> placeholder = AcquireRef(new Placeholder(&alive));
EXPECT_EQ(alive, 1); EXPECT_EQ(alive, 1);
vect->push_back(dummy); vect->push_back(placeholder);
EXPECT_EQ(alive, 1); EXPECT_EQ(alive, 1);
Dummy* dummy_unref = dummy.Get(); Placeholder* placeholder_unref = placeholder.Get();
dummy = nullptr; placeholder = nullptr;
EXPECT_EQ(alive, 1); EXPECT_EQ(alive, 1);
auto itr = std::find(vect->begin(), vect->end(), dummy_unref); auto itr = std::find(vect->begin(), vect->end(), placeholder_unref);
EXPECT_EQ(itr->Get(), dummy_unref); EXPECT_EQ(itr->Get(), placeholder_unref);
vect->erase(itr); vect->erase(itr);
EXPECT_EQ(alive, 0); EXPECT_EQ(alive, 0);
@ -138,7 +138,7 @@ TEST(StackContainer, BufferAlignment) {
} }
template class StackVector<int, 2>; template class StackVector<int, 2>;
template class StackVector<Ref<Dummy>, 2>; template class StackVector<Ref<Placeholder>, 2>;
template <typename T, size_t size> template <typename T, size_t size>
void CheckStackVectorElements(const StackVector<T, size>& vec, std::initializer_list<T> expected) { void CheckStackVectorElements(const StackVector<T, size>& vec, std::initializer_list<T> expected) {

View File

@ -1499,7 +1499,7 @@ class SetBindGroupValidationTest : public ValidationTest {
uint32_t count, uint32_t count,
bool expectation) { bool expectation) {
wgpu::RenderPipeline renderPipeline = CreateRenderPipeline(); wgpu::RenderPipeline renderPipeline = CreateRenderPipeline();
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass); wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
@ -1602,7 +1602,7 @@ TEST_F(SetBindGroupValidationTest, VerifyGroupIfChangedAfterAction) {
} }
{ {
wgpu::RenderPipeline renderPipeline = CreateRenderPipeline(); wgpu::RenderPipeline renderPipeline = CreateRenderPipeline();
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass); wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
@ -1966,7 +1966,7 @@ TEST_F(SetBindGroupPersistenceValidationTest, BindGroupBeforePipeline) {
device, bindGroupLayouts[1], device, bindGroupLayouts[1],
{{0, storageBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}}); {{0, storageBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}});
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass); wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
@ -2020,7 +2020,7 @@ TEST_F(SetBindGroupPersistenceValidationTest, NotVulkanInheritance) {
device, bindGroupLayoutsB[0], device, bindGroupLayoutsB[0],
{{0, storageBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}}); {{0, storageBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}});
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass); wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
@ -2270,8 +2270,8 @@ class BindingsValidationTest : public BindGroupLayoutCompatibilityTest {
wgpu::RenderPipeline pipeline, wgpu::RenderPipeline pipeline,
bool expectation) { bool expectation) {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder rp = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder rp = encoder.BeginRenderPass(&PlaceholderRenderPass);
for (uint32_t i = 0; i < count; ++i) { for (uint32_t i = 0; i < count; ++i) {
rp.SetBindGroup(i, bg[i]); rp.SetBindGroup(i, bg[i]);
} }

View File

@ -31,12 +31,12 @@ TEST_F(CommandBufferValidationTest, Empty) {
// Test that a command buffer cannot be ended mid render pass // Test that a command buffer cannot be ended mid render pass
TEST_F(CommandBufferValidationTest, EndedMidRenderPass) { TEST_F(CommandBufferValidationTest, EndedMidRenderPass) {
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
// Control case, command buffer ended after the pass is ended. // Control case, command buffer ended after the pass is ended.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.End(); pass.End();
encoder.Finish(); encoder.Finish();
} }
@ -44,7 +44,7 @@ TEST_F(CommandBufferValidationTest, EndedMidRenderPass) {
// Error case, command buffer ended mid-pass. // Error case, command buffer ended mid-pass.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
ASSERT_DEVICE_ERROR( ASSERT_DEVICE_ERROR(
encoder.Finish(), encoder.Finish(),
HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended.")); HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended."));
@ -54,7 +54,7 @@ TEST_F(CommandBufferValidationTest, EndedMidRenderPass) {
// should fail too. // should fail too.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
ASSERT_DEVICE_ERROR( ASSERT_DEVICE_ERROR(
encoder.Finish(), encoder.Finish(),
HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended.")); HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended."));
@ -97,12 +97,12 @@ TEST_F(CommandBufferValidationTest, EndedMidComputePass) {
// Test that a render pass cannot be ended twice // Test that a render pass cannot be ended twice
TEST_F(CommandBufferValidationTest, RenderPassEndedTwice) { TEST_F(CommandBufferValidationTest, RenderPassEndedTwice) {
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
// Control case, pass is ended once // Control case, pass is ended once
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.End(); pass.End();
encoder.Finish(); encoder.Finish();
} }
@ -110,7 +110,7 @@ TEST_F(CommandBufferValidationTest, RenderPassEndedTwice) {
// Error case, pass ended twice // Error case, pass ended twice
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.End(); pass.End();
pass.End(); pass.End();
ASSERT_DEVICE_ERROR( ASSERT_DEVICE_ERROR(
@ -143,12 +143,12 @@ TEST_F(CommandBufferValidationTest, ComputePassEndedTwice) {
// Test that beginning a compute pass before ending the previous pass causes an error. // Test that beginning a compute pass before ending the previous pass causes an error.
TEST_F(CommandBufferValidationTest, BeginComputePassBeforeEndPreviousPass) { TEST_F(CommandBufferValidationTest, BeginComputePassBeforeEndPreviousPass) {
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
// Beginning a compute pass before ending a render pass causes an error. // Beginning a compute pass before ending a render pass causes an error.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&PlaceholderRenderPass);
wgpu::ComputePassEncoder computePass = encoder.BeginComputePass(); wgpu::ComputePassEncoder computePass = encoder.BeginComputePass();
computePass.End(); computePass.End();
renderPass.End(); renderPass.End();
@ -168,13 +168,13 @@ TEST_F(CommandBufferValidationTest, BeginComputePassBeforeEndPreviousPass) {
// Test that beginning a render pass before ending the previous pass causes an error. // Test that beginning a render pass before ending the previous pass causes an error.
TEST_F(CommandBufferValidationTest, BeginRenderPassBeforeEndPreviousPass) { TEST_F(CommandBufferValidationTest, BeginRenderPassBeforeEndPreviousPass) {
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
// Beginning a render pass before ending the render pass causes an error. // Beginning a render pass before ending the render pass causes an error.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder renderPass1 = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder renderPass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
wgpu::RenderPassEncoder renderPass2 = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder renderPass2 = encoder.BeginRenderPass(&PlaceholderRenderPass);
renderPass2.End(); renderPass2.End();
renderPass1.End(); renderPass1.End();
ASSERT_DEVICE_ERROR(encoder.Finish()); ASSERT_DEVICE_ERROR(encoder.Finish());
@ -184,7 +184,7 @@ TEST_F(CommandBufferValidationTest, BeginRenderPassBeforeEndPreviousPass) {
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::ComputePassEncoder computePass = encoder.BeginComputePass(); wgpu::ComputePassEncoder computePass = encoder.BeginComputePass();
wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&PlaceholderRenderPass);
renderPass.End(); renderPass.End();
computePass.End(); computePass.End();
ASSERT_DEVICE_ERROR(encoder.Finish()); ASSERT_DEVICE_ERROR(encoder.Finish());
@ -229,12 +229,12 @@ TEST_F(CommandBufferValidationTest, CallsAfterAFailedFinish) {
// Test that passes which are de-referenced prior to ending still allow the correct errors to be // Test that passes which are de-referenced prior to ending still allow the correct errors to be
// produced. // produced.
TEST_F(CommandBufferValidationTest, PassDereferenced) { TEST_F(CommandBufferValidationTest, PassDereferenced) {
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
// Control case, command buffer ended after the pass is ended. // Control case, command buffer ended after the pass is ended.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.End(); pass.End();
encoder.Finish(); encoder.Finish();
} }
@ -242,7 +242,7 @@ TEST_F(CommandBufferValidationTest, PassDereferenced) {
// Error case, no reference is kept to a render pass. // Error case, no reference is kept to a render pass.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
encoder.BeginRenderPass(&dummyRenderPass); encoder.BeginRenderPass(&PlaceholderRenderPass);
ASSERT_DEVICE_ERROR( ASSERT_DEVICE_ERROR(
encoder.Finish(), encoder.Finish(),
HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended.")); HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended."));
@ -260,7 +260,7 @@ TEST_F(CommandBufferValidationTest, PassDereferenced) {
// Error case, beginning a new pass after failing to end a de-referenced pass. // Error case, beginning a new pass after failing to end a de-referenced pass.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
encoder.BeginRenderPass(&dummyRenderPass); encoder.BeginRenderPass(&PlaceholderRenderPass);
wgpu::ComputePassEncoder pass = encoder.BeginComputePass(); wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
pass.End(); pass.End();
ASSERT_DEVICE_ERROR( ASSERT_DEVICE_ERROR(
@ -301,12 +301,12 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
// only way to trigger the destroy call is by losing all references which means we cannot // only way to trigger the destroy call is by losing all references which means we cannot
// call finish. // call finish.
DAWN_SKIP_TEST_IF(UsesWire()); DAWN_SKIP_TEST_IF(UsesWire());
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
// Control case, command buffer ended after the pass is ended. // Control case, command buffer ended after the pass is ended.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.End(); pass.End();
encoder.Finish(); encoder.Finish();
} }
@ -314,7 +314,7 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
// Destroyed encoder with encoded commands should emit error on finish. // Destroyed encoder with encoded commands should emit error on finish.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.End(); pass.End();
dawn::native::FromAPI(encoder.Get())->Destroy(); dawn::native::FromAPI(encoder.Get())->Destroy();
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished.")); ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
@ -323,7 +323,7 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
// Destroyed encoder with encoded commands shouldn't emit an error if never finished. // Destroyed encoder with encoded commands shouldn't emit an error if never finished.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.End(); pass.End();
dawn::native::FromAPI(encoder.Get())->Destroy(); dawn::native::FromAPI(encoder.Get())->Destroy();
} }
@ -332,7 +332,7 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
dawn::native::FromAPI(encoder.Get())->Destroy(); dawn::native::FromAPI(encoder.Get())->Destroy();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.End(); pass.End();
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished.")); ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
} }
@ -341,14 +341,14 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
dawn::native::FromAPI(encoder.Get())->Destroy(); dawn::native::FromAPI(encoder.Get())->Destroy();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.End(); pass.End();
} }
// Destroying a finished encoder should not emit any errors. // Destroying a finished encoder should not emit any errors.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.End(); pass.End();
encoder.Finish(); encoder.Finish();
dawn::native::FromAPI(encoder.Get())->Destroy(); dawn::native::FromAPI(encoder.Get())->Destroy();

View File

@ -21,7 +21,7 @@ class DebugMarkerValidationTest : public ValidationTest {};
// Correct usage of debug markers should succeed in render pass. // Correct usage of debug markers should succeed in render pass.
TEST_F(DebugMarkerValidationTest, RenderSuccess) { TEST_F(DebugMarkerValidationTest, RenderSuccess) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{ {
@ -39,7 +39,7 @@ TEST_F(DebugMarkerValidationTest, RenderSuccess) {
// A PushDebugGroup call without a following PopDebugGroup produces an error in render pass. // A PushDebugGroup call without a following PopDebugGroup produces an error in render pass.
TEST_F(DebugMarkerValidationTest, RenderUnbalancedPush) { TEST_F(DebugMarkerValidationTest, RenderUnbalancedPush) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{ {
@ -56,7 +56,7 @@ TEST_F(DebugMarkerValidationTest, RenderUnbalancedPush) {
// A PopDebugGroup call without a preceding PushDebugGroup produces an error in render pass. // A PopDebugGroup call without a preceding PushDebugGroup produces an error in render pass.
TEST_F(DebugMarkerValidationTest, RenderUnbalancedPop) { TEST_F(DebugMarkerValidationTest, RenderUnbalancedPop) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{ {
@ -224,7 +224,7 @@ TEST_F(DebugMarkerValidationTest, NestedComputeInCommandEncoderIndependent) {
// It is possible to nested pushes in a render pass in a command encoder. // It is possible to nested pushes in a render pass in a command encoder.
TEST_F(DebugMarkerValidationTest, NestedRenderInCommandEncoder) { TEST_F(DebugMarkerValidationTest, NestedRenderInCommandEncoder) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
encoder.PushDebugGroup("Event Start"); encoder.PushDebugGroup("Event Start");
@ -241,7 +241,7 @@ TEST_F(DebugMarkerValidationTest, NestedRenderInCommandEncoder) {
// Command encoder and render pass pushes must be balanced independently. // Command encoder and render pass pushes must be balanced independently.
TEST_F(DebugMarkerValidationTest, NestedRenderInCommandEncoderIndependent) { TEST_F(DebugMarkerValidationTest, NestedRenderInCommandEncoderIndependent) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
encoder.PushDebugGroup("Event Start"); encoder.PushDebugGroup("Event Start");

View File

@ -72,7 +72,7 @@ class DrawIndirectValidationTest : public ValidationTest {
wgpu::Buffer indirectBuffer = wgpu::Buffer indirectBuffer =
utils::CreateBufferFromData<uint32_t>(device, usage, bufferList); utils::CreateBufferFromData<uint32_t>(device, usage, bufferList);
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
pass.SetPipeline(pipeline); pass.SetPipeline(pipeline);

View File

@ -203,7 +203,7 @@ class SetBlendConstantTest : public ValidationTest {};
// Test to check basic use of SetBlendConstantTest // Test to check basic use of SetBlendConstantTest
TEST_F(SetBlendConstantTest, Success) { TEST_F(SetBlendConstantTest, Success) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{ {
@ -217,7 +217,7 @@ TEST_F(SetBlendConstantTest, Success) {
// Test that SetBlendConstant allows any value, large, small or negative // Test that SetBlendConstant allows any value, large, small or negative
TEST_F(SetBlendConstantTest, AnyValueAllowed) { TEST_F(SetBlendConstantTest, AnyValueAllowed) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{ {
@ -233,7 +233,7 @@ class SetStencilReferenceTest : public ValidationTest {};
// Test to check basic use of SetStencilReferenceTest // Test to check basic use of SetStencilReferenceTest
TEST_F(SetStencilReferenceTest, Success) { TEST_F(SetStencilReferenceTest, Success) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{ {
@ -246,7 +246,7 @@ TEST_F(SetStencilReferenceTest, Success) {
// Test that SetStencilReference allows any bit to be set // Test that SetStencilReference allows any bit to be set
TEST_F(SetStencilReferenceTest, AllBitsAllowed) { TEST_F(SetStencilReferenceTest, AllBitsAllowed) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
{ {

View File

@ -50,7 +50,7 @@ TEST_F(IndexBufferValidationTest, UndefinedIndexFormat) {
bufferDesc.size = 256; bufferDesc.size = 256;
wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc); wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Undefined); pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Undefined);
@ -65,7 +65,7 @@ TEST_F(IndexBufferValidationTest, InvalidIndexFormat) {
bufferDesc.size = 256; bufferDesc.size = 256;
wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc); wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
pass.SetIndexBuffer(buffer, static_cast<wgpu::IndexFormat>(404)); pass.SetIndexBuffer(buffer, static_cast<wgpu::IndexFormat>(404));
@ -80,7 +80,7 @@ TEST_F(IndexBufferValidationTest, IndexBufferOffsetOOBValidation) {
bufferDesc.size = 256; bufferDesc.size = 256;
wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc); wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
// Control case, using the full buffer, with or without an explicit size is valid. // Control case, using the full buffer, with or without an explicit size is valid.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
@ -232,7 +232,7 @@ TEST_F(IndexBufferValidationTest, InvalidUsage) {
wgpu::Buffer copyBuffer = wgpu::Buffer copyBuffer =
utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::CopySrc, {0, 1, 2}); utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::CopySrc, {0, 1, 2});
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
// Control case: using the index buffer is valid. // Control case: using the index buffer is valid.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
@ -272,7 +272,7 @@ TEST_F(IndexBufferValidationTest, OffsetAlignment) {
wgpu::Buffer indexBuffer = wgpu::Buffer indexBuffer =
utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 1, 2}); utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 1, 2});
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
// Control cases: index buffer offset is a multiple of the index format size // Control cases: index buffer offset is a multiple of the index format size
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();

View File

@ -300,7 +300,7 @@ class MinBufferSizeTestsBase : public ValidationTest {
void TestDraw(const wgpu::RenderPipeline& renderPipeline, void TestDraw(const wgpu::RenderPipeline& renderPipeline,
const std::vector<wgpu::BindGroup>& bindGroups, const std::vector<wgpu::BindGroup>& bindGroups,
bool expectation) { bool expectation) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass); wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);

View File

@ -82,7 +82,7 @@ class OcclusionQueryValidationTest : public QuerySetValidationTest {};
// Test the occlusionQuerySet in RenderPassDescriptor // Test the occlusionQuerySet in RenderPassDescriptor
TEST_F(OcclusionQueryValidationTest, InvalidOcclusionQuerySet) { TEST_F(OcclusionQueryValidationTest, InvalidOcclusionQuerySet) {
wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2); wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
// Success // Success
{ {
@ -100,7 +100,7 @@ TEST_F(OcclusionQueryValidationTest, InvalidOcclusionQuerySet) {
// Fail to begin occlusion query if the occlusionQuerySet is not set in RenderPassDescriptor // Fail to begin occlusion query if the occlusionQuerySet is not set in RenderPassDescriptor
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass renderPassWithoutOcclusion(device); PlaceholderRenderPass renderPassWithoutOcclusion(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassWithoutOcclusion); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassWithoutOcclusion);
pass.BeginOcclusionQuery(0); pass.BeginOcclusionQuery(0);
pass.EndOcclusionQuery(); pass.EndOcclusionQuery();
@ -142,7 +142,7 @@ TEST_F(OcclusionQueryValidationTest, InvalidOcclusionQuerySet) {
// Test query index of occlusion query // Test query index of occlusion query
TEST_F(OcclusionQueryValidationTest, InvalidQueryIndex) { TEST_F(OcclusionQueryValidationTest, InvalidQueryIndex) {
wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2); wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
renderPass.occlusionQuerySet = occlusionQuerySet; renderPass.occlusionQuerySet = occlusionQuerySet;
// Fail to begin occlusion query if the query index exceeds the number of queries in query set // Fail to begin occlusion query if the query index exceeds the number of queries in query set
@ -186,7 +186,7 @@ TEST_F(OcclusionQueryValidationTest, InvalidQueryIndex) {
// Test the correspondence between BeginOcclusionQuery and EndOcclusionQuery // Test the correspondence between BeginOcclusionQuery and EndOcclusionQuery
TEST_F(OcclusionQueryValidationTest, InvalidBeginAndEnd) { TEST_F(OcclusionQueryValidationTest, InvalidBeginAndEnd) {
wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2); wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
renderPass.occlusionQuerySet = occlusionQuerySet; renderPass.occlusionQuerySet = occlusionQuerySet;
// Fail to begin an occlusion query without corresponding end operation // Fail to begin an occlusion query without corresponding end operation
@ -242,7 +242,7 @@ class TimestampQueryValidationTest : public QuerySetValidationTest {
void EncodeRenderPassWithTimestampWrites( void EncodeRenderPassWithTimestampWrites(
wgpu::CommandEncoder encoder, wgpu::CommandEncoder encoder,
const std::vector<wgpu::RenderPassTimestampWrite>& timestampWrites) { const std::vector<wgpu::RenderPassTimestampWrite>& timestampWrites) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
renderPass.timestampWriteCount = timestampWrites.size(); renderPass.timestampWriteCount = timestampWrites.size();
renderPass.timestampWrites = timestampWrites.data(); renderPass.timestampWrites = timestampWrites.data();
@ -290,7 +290,7 @@ TEST_F(TimestampQueryValidationTest, UnnecessaryPipelineStatistics) {
TEST_F(TimestampQueryValidationTest, SetOcclusionQueryWithTimestampQuerySet) { TEST_F(TimestampQueryValidationTest, SetOcclusionQueryWithTimestampQuerySet) {
// Fail to begin render pass if the type of occlusionQuerySet is not Occlusion // Fail to begin render pass if the type of occlusionQuerySet is not Occlusion
wgpu::QuerySet querySet = CreateQuerySet(device, wgpu::QueryType::Timestamp, 1); wgpu::QuerySet querySet = CreateQuerySet(device, wgpu::QueryType::Timestamp, 1);
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
renderPass.occlusionQuerySet = querySet; renderPass.occlusionQuerySet = querySet;
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
@ -558,7 +558,7 @@ TEST_F(TimestampQueryValidationTest, WriteTimestampOnComputePassEncoder) {
// Test write timestamp on render pass encoder // Test write timestamp on render pass encoder
TEST_F(TimestampQueryValidationTest, WriteTimestampOnRenderPassEncoder) { TEST_F(TimestampQueryValidationTest, WriteTimestampOnRenderPassEncoder) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::QuerySet timestampQuerySet = CreateQuerySet(device, wgpu::QueryType::Timestamp, 2); wgpu::QuerySet timestampQuerySet = CreateQuerySet(device, wgpu::QueryType::Timestamp, 2);
wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2); wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
@ -707,7 +707,7 @@ TEST_F(PipelineStatisticsQueryValidationTest, BeginRenderPassWithPipelineStatist
wgpu::QuerySet querySet = wgpu::QuerySet querySet =
CreateQuerySet(device, wgpu::QueryType::PipelineStatistics, 1, CreateQuerySet(device, wgpu::QueryType::PipelineStatistics, 1,
{wgpu::PipelineStatisticName::VertexShaderInvocations}); {wgpu::PipelineStatisticName::VertexShaderInvocations});
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
renderPass.occlusionQuerySet = querySet; renderPass.occlusionQuerySet = querySet;
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();

View File

@ -44,7 +44,7 @@ namespace {
@group(1) @binding(0) var<uniform> uniforms : Uniforms; @group(1) @binding(0) var<uniform> uniforms : Uniforms;
struct Storage { struct Storage {
dummy : array<f32> placeholder : array<f32>
} }
@group(1) @binding(1) var<storage, read_write> ssbo : Storage; @group(1) @binding(1) var<storage, read_write> ssbo : Storage;
@ -79,7 +79,7 @@ namespace {
vertexBuffer = utils::CreateBufferFromData(device, kVertices, sizeof(kVertices), vertexBuffer = utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
wgpu::BufferUsage::Vertex); wgpu::BufferUsage::Vertex);
// Dummy storage buffer. // Placeholder storage buffer.
wgpu::Buffer storageBuffer = utils::CreateBufferFromData( wgpu::Buffer storageBuffer = utils::CreateBufferFromData(
device, kVertices, sizeof(kVertices), wgpu::BufferUsage::Storage); device, kVertices, sizeof(kVertices), wgpu::BufferUsage::Storage);
@ -125,7 +125,7 @@ namespace {
// Test creating and encoding an empty render bundle. // Test creating and encoding an empty render bundle.
TEST_F(RenderBundleValidationTest, Empty) { TEST_F(RenderBundleValidationTest, Empty) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
utils::ComboRenderBundleEncoderDescriptor desc = {}; utils::ComboRenderBundleEncoderDescriptor desc = {};
desc.colorFormatsCount = 1; desc.colorFormatsCount = 1;
@ -145,7 +145,7 @@ TEST_F(RenderBundleValidationTest, Empty) {
// This is a regression test for error render bundle encoders containing no commands would // This is a regression test for error render bundle encoders containing no commands would
// produce non-error render bundles. // produce non-error render bundles.
TEST_F(RenderBundleValidationTest, EmptyErrorEncoderProducesErrorBundle) { TEST_F(RenderBundleValidationTest, EmptyErrorEncoderProducesErrorBundle) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
utils::ComboRenderBundleEncoderDescriptor desc = {}; utils::ComboRenderBundleEncoderDescriptor desc = {};
// Having 0 attachments is invalid! // Having 0 attachments is invalid!
@ -165,7 +165,7 @@ TEST_F(RenderBundleValidationTest, EmptyErrorEncoderProducesErrorBundle) {
// Test executing zero render bundles. // Test executing zero render bundles.
TEST_F(RenderBundleValidationTest, ZeroBundles) { TEST_F(RenderBundleValidationTest, ZeroBundles) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder(); wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass); wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
@ -176,7 +176,7 @@ TEST_F(RenderBundleValidationTest, ZeroBundles) {
// Test successfully creating and encoding a render bundle into a command buffer. // Test successfully creating and encoding a render bundle into a command buffer.
TEST_F(RenderBundleValidationTest, SimpleSuccess) { TEST_F(RenderBundleValidationTest, SimpleSuccess) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
utils::ComboRenderBundleEncoderDescriptor desc = {}; utils::ComboRenderBundleEncoderDescriptor desc = {};
desc.colorFormatsCount = 1; desc.colorFormatsCount = 1;
@ -199,7 +199,7 @@ TEST_F(RenderBundleValidationTest, SimpleSuccess) {
// Test that render bundle debug groups must be well nested. // Test that render bundle debug groups must be well nested.
TEST_F(RenderBundleValidationTest, DebugGroups) { TEST_F(RenderBundleValidationTest, DebugGroups) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
utils::ComboRenderBundleEncoderDescriptor desc = {}; utils::ComboRenderBundleEncoderDescriptor desc = {};
desc.colorFormatsCount = 1; desc.colorFormatsCount = 1;
@ -258,7 +258,7 @@ TEST_F(RenderBundleValidationTest, DebugGroups) {
// Test render bundles do not inherit command buffer state // Test render bundles do not inherit command buffer state
TEST_F(RenderBundleValidationTest, StateInheritance) { TEST_F(RenderBundleValidationTest, StateInheritance) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
utils::ComboRenderBundleEncoderDescriptor desc = {}; utils::ComboRenderBundleEncoderDescriptor desc = {};
desc.colorFormatsCount = 1; desc.colorFormatsCount = 1;
@ -343,7 +343,7 @@ TEST_F(RenderBundleValidationTest, StateInheritance) {
// Test render bundles do not persist command buffer state // Test render bundles do not persist command buffer state
TEST_F(RenderBundleValidationTest, StatePersistence) { TEST_F(RenderBundleValidationTest, StatePersistence) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
utils::ComboRenderBundleEncoderDescriptor desc = {}; utils::ComboRenderBundleEncoderDescriptor desc = {};
desc.colorFormatsCount = 1; desc.colorFormatsCount = 1;
@ -428,7 +428,7 @@ TEST_F(RenderBundleValidationTest, StatePersistence) {
// Test executing render bundles clears command buffer state // Test executing render bundles clears command buffer state
TEST_F(RenderBundleValidationTest, ClearsState) { TEST_F(RenderBundleValidationTest, ClearsState) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
utils::ComboRenderBundleEncoderDescriptor desc = {}; utils::ComboRenderBundleEncoderDescriptor desc = {};
desc.colorFormatsCount = 1; desc.colorFormatsCount = 1;
@ -520,7 +520,7 @@ TEST_F(RenderBundleValidationTest, ClearsState) {
// Test creating and encoding multiple render bundles. // Test creating and encoding multiple render bundles.
TEST_F(RenderBundleValidationTest, MultipleBundles) { TEST_F(RenderBundleValidationTest, MultipleBundles) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
utils::ComboRenderBundleEncoderDescriptor desc = {}; utils::ComboRenderBundleEncoderDescriptor desc = {};
desc.colorFormatsCount = 1; desc.colorFormatsCount = 1;
@ -553,7 +553,7 @@ TEST_F(RenderBundleValidationTest, MultipleBundles) {
// Test that is is valid to execute a render bundle more than once. // Test that is is valid to execute a render bundle more than once.
TEST_F(RenderBundleValidationTest, ExecuteMultipleTimes) { TEST_F(RenderBundleValidationTest, ExecuteMultipleTimes) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
utils::ComboRenderBundleEncoderDescriptor desc = {}; utils::ComboRenderBundleEncoderDescriptor desc = {};
desc.colorFormatsCount = 1; desc.colorFormatsCount = 1;
@ -696,7 +696,7 @@ TEST_F(RenderBundleValidationTest, DepthStencilReadOnly) {
} }
// Test that resource usages are validated inside render bundles. // Test that resource usages are validated inside render bundles.
TEST_F(RenderBundleValidationTest, UsageTracking) { TEST_F(RenderBundleValidationTest, UsageTracking) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
utils::ComboRenderBundleEncoderDescriptor desc = {}; utils::ComboRenderBundleEncoderDescriptor desc = {};
desc.colorFormatsCount = 1; desc.colorFormatsCount = 1;

View File

@ -86,8 +86,8 @@ namespace {
CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index); CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index);
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32); pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
pass.SetVertexBuffer(0, buffer); pass.SetVertexBuffer(0, buffer);
pass.End(); pass.End();
@ -130,8 +130,8 @@ namespace {
// It is invalid to use the buffer as both index and storage in render pass // It is invalid to use the buffer as both index and storage in render pass
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32); pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
pass.SetBindGroup(0, bg); pass.SetBindGroup(0, bg);
pass.End(); pass.End();
@ -195,8 +195,8 @@ namespace {
{ {
// It is valid to use multiple storage usages on the same buffer in render pass // It is valid to use multiple storage usages on the same buffer in render pass
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetBindGroup(0, bg); pass.SetBindGroup(0, bg);
pass.End(); pass.End();
encoder.Finish(); encoder.Finish();
@ -235,14 +235,14 @@ namespace {
// Use these two buffers as both index and storage in different render passes // Use these two buffers as both index and storage in different render passes
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass0.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32); pass0.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
pass0.SetBindGroup(0, bg1); pass0.SetBindGroup(0, bg1);
pass0.End(); pass0.End();
wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass1.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32); pass1.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
pass1.SetBindGroup(0, bg0); pass1.SetBindGroup(0, bg0);
pass1.End(); pass1.End();
@ -297,8 +297,8 @@ namespace {
pass0.SetBindGroup(0, bg0); pass0.SetBindGroup(0, bg0);
pass0.End(); pass0.End();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass1.SetBindGroup(1, bg1); pass1.SetBindGroup(1, bg1);
pass1.End(); pass1.End();
@ -325,8 +325,8 @@ namespace {
// It is not allowed to use the same buffer as both readable and writable in different // It is not allowed to use the same buffer as both readable and writable in different
// draws within the same render pass. // draws within the same render pass.
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetPipeline(rp); pass.SetPipeline(rp);
pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32); pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
@ -392,8 +392,8 @@ namespace {
// It is invalid to use the same buffer as both readable and writable usages in a single // It is invalid to use the same buffer as both readable and writable usages in a single
// draw // draw
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetPipeline(rp); pass.SetPipeline(rp);
pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32); pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
@ -455,8 +455,8 @@ namespace {
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4); encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetBindGroup(0, bg0); pass.SetBindGroup(0, bg0);
pass.End(); pass.End();
encoder.Finish(); encoder.Finish();
@ -492,14 +492,14 @@ namespace {
device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}}); device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer0}}); wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
// Set index buffer twice. The second one overwrites the first one. No buffer is used as // Set index buffer twice. The second one overwrites the first one. No buffer is used as
// both read and write in the same pass. But the overwritten index buffer (buffer0) still // both read and write in the same pass. But the overwritten index buffer (buffer0) still
// take effect during resource tracking. // take effect during resource tracking.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32); pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32); pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
pass.SetBindGroup(0, bg); pass.SetBindGroup(0, bg);
@ -511,7 +511,7 @@ namespace {
// read and write in the same pass // read and write in the same pass
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32); pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32); pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
pass.SetBindGroup(0, bg); pass.SetBindGroup(0, bg);
@ -524,7 +524,7 @@ namespace {
// (buffer0) still take effect during resource tracking. // (buffer0) still take effect during resource tracking.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetVertexBuffer(0, buffer0); pass.SetVertexBuffer(0, buffer0);
pass.SetVertexBuffer(0, buffer1); pass.SetVertexBuffer(0, buffer1);
pass.SetBindGroup(0, bg); pass.SetBindGroup(0, bg);
@ -536,7 +536,7 @@ namespace {
// buffer0 is used as both read and write in the same pass // buffer0 is used as both read and write in the same pass
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetVertexBuffer(0, buffer1); pass.SetVertexBuffer(0, buffer1);
pass.SetVertexBuffer(0, buffer0); pass.SetVertexBuffer(0, buffer0);
pass.SetBindGroup(0, bg); pass.SetBindGroup(0, bg);
@ -562,14 +562,14 @@ namespace {
wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}}); wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}}); wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}});
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
// Set bind group on the same index twice. The second one overwrites the first one. // Set bind group on the same index twice. The second one overwrites the first one.
// No buffer is used as both read and write in the same pass. But the overwritten // No buffer is used as both read and write in the same pass. But the overwritten
// bind group still take effect during resource tracking. // bind group still take effect during resource tracking.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32); pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
pass.SetBindGroup(0, bg0); pass.SetBindGroup(0, bg0);
pass.SetBindGroup(0, bg1); pass.SetBindGroup(0, bg1);
@ -581,7 +581,7 @@ namespace {
// buffer0 is used as both read and write in the same pass // buffer0 is used as both read and write in the same pass
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32); pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
pass.SetBindGroup(0, bg1); pass.SetBindGroup(0, bg1);
pass.SetBindGroup(0, bg0); pass.SetBindGroup(0, bg0);
@ -656,8 +656,8 @@ namespace {
// These two bindings are invisible in render pass. But we still track these bindings. // These two bindings are invisible in render pass. But we still track these bindings.
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetBindGroup(0, bg); pass.SetBindGroup(0, bg);
pass.End(); pass.End();
ASSERT_DEVICE_ERROR(encoder.Finish()); ASSERT_DEVICE_ERROR(encoder.Finish());
@ -702,8 +702,8 @@ namespace {
// Buffer usage in compute stage in bind group conflicts with index buffer. And binding // Buffer usage in compute stage in bind group conflicts with index buffer. And binding
// for compute stage is not visible in render pass. But we still track this binding. // for compute stage is not visible in render pass. But we still track this binding.
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32); pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
pass.SetBindGroup(0, bg); pass.SetBindGroup(0, bg);
pass.End(); pass.End();
@ -777,8 +777,8 @@ namespace {
// Resource in bg1 conflicts with resources used in bg0. However, bindings in bg1 is // Resource in bg1 conflicts with resources used in bg0. However, bindings in bg1 is
// not used in pipeline. But we still track this binding. // not used in pipeline. But we still track this binding.
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetBindGroup(0, bg0); pass.SetBindGroup(0, bg0);
pass.SetBindGroup(1, bg1); pass.SetBindGroup(1, bg1);
pass.SetPipeline(rp); pass.SetPipeline(rp);
@ -963,8 +963,8 @@ namespace {
wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view}}); wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view}});
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetBindGroup(0, bg); pass.SetBindGroup(0, bg);
pass.SetBindGroup(1, bg1); pass.SetBindGroup(1, bg1);
pass.End(); pass.End();
@ -1155,8 +1155,8 @@ namespace {
pass0.SetBindGroup(0, writeBG); pass0.SetBindGroup(0, writeBG);
pass0.End(); pass0.End();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass1.SetBindGroup(0, readBG); pass1.SetBindGroup(0, readBG);
pass1.End(); pass1.End();
@ -1190,8 +1190,8 @@ namespace {
// It is not allowed to use the same texture as both readable and writable in different // It is not allowed to use the same texture as both readable and writable in different
// draws within the same render pass. // draws within the same render pass.
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetPipeline(rp); pass.SetPipeline(rp);
pass.SetBindGroup(0, sampledBG); pass.SetBindGroup(0, sampledBG);
@ -1262,8 +1262,8 @@ namespace {
// It is invalid to use the same texture as both readable and writable usages in a // It is invalid to use the same texture as both readable and writable usages in a
// single draw // single draw
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetPipeline(rp); pass.SetPipeline(rp);
pass.SetBindGroup(0, sampledBG); pass.SetBindGroup(0, sampledBG);
@ -1470,8 +1470,8 @@ namespace {
// These two bindings are invisible in render pass. But we still track these bindings. // These two bindings are invisible in render pass. But we still track these bindings.
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetBindGroup(0, bg); pass.SetBindGroup(0, bg);
pass.End(); pass.End();
ASSERT_DEVICE_ERROR(encoder.Finish()); ASSERT_DEVICE_ERROR(encoder.Finish());
@ -1595,8 +1595,8 @@ namespace {
// Texture binding in readBG conflicts with texture binding in writeBG. The binding // Texture binding in readBG conflicts with texture binding in writeBG. The binding
// in writeBG is not used in pipeline. But we still track this binding. // in writeBG is not used in pipeline. But we still track this binding.
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetBindGroup(0, readBG); pass.SetBindGroup(0, readBG);
pass.SetBindGroup(1, writeBG); pass.SetBindGroup(1, writeBG);
pass.SetPipeline(rp); pass.SetPipeline(rp);
@ -1644,8 +1644,8 @@ namespace {
// Test that indirect + readonly is allowed in the same render pass. // Test that indirect + readonly is allowed in the same render pass.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetPipeline(rp); pass.SetPipeline(rp);
pass.SetBindGroup(0, readBG); pass.SetBindGroup(0, readBG);
pass.DrawIndirect(buffer, 0); pass.DrawIndirect(buffer, 0);
@ -1656,8 +1656,8 @@ namespace {
// Test that indirect + writable is disallowed in the same render pass. // Test that indirect + writable is disallowed in the same render pass.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
DummyRenderPass dummyRenderPass(device); PlaceholderRenderPass PlaceholderRenderPass(device);
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass); wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
pass.SetPipeline(rp); pass.SetPipeline(rp);
pass.SetBindGroup(0, writeBG); pass.SetBindGroup(0, writeBG);
pass.DrawIndirect(buffer, 0); pass.DrawIndirect(buffer, 0);

View File

@ -39,7 +39,7 @@ class UnsafeAPIValidationTest : public ValidationTest {
// Check that pipeline overridable constants are disallowed as part of unsafe APIs. // Check that pipeline overridable constants are disallowed as part of unsafe APIs.
// TODO(dawn:1041) Remove when implementation for all backend is added // TODO(dawn:1041) Remove when implementation for all backend is added
TEST_F(UnsafeAPIValidationTest, PipelineOverridableConstants) { TEST_F(UnsafeAPIValidationTest, PipelineOverridableConstants) {
// Create the dummy compute pipeline. // Create the placeholder compute pipeline.
wgpu::ComputePipelineDescriptor pipelineDescBase; wgpu::ComputePipelineDescriptor pipelineDescBase;
pipelineDescBase.compute.entryPoint = "main"; pipelineDescBase.compute.entryPoint = "main";

View File

@ -252,7 +252,7 @@ void ValidationTest::OnDeviceLost(WGPUDeviceLostReason reason,
ASSERT(false); ASSERT(false);
} }
ValidationTest::DummyRenderPass::DummyRenderPass(const wgpu::Device& device) ValidationTest::PlaceholderRenderPass::PlaceholderRenderPass(const wgpu::Device& device)
: attachmentFormat(wgpu::TextureFormat::RGBA8Unorm), width(400), height(400) { : attachmentFormat(wgpu::TextureFormat::RGBA8Unorm), width(400), height(400) {
wgpu::TextureDescriptor descriptor; wgpu::TextureDescriptor descriptor;
descriptor.dimension = wgpu::TextureDimension::e2D; descriptor.dimension = wgpu::TextureDimension::e2D;

View File

@ -115,9 +115,9 @@ class ValidationTest : public testing::Test {
// Helper functions to create objects to test validation. // Helper functions to create objects to test validation.
struct DummyRenderPass : public wgpu::RenderPassDescriptor { struct PlaceholderRenderPass : public wgpu::RenderPassDescriptor {
public: public:
explicit DummyRenderPass(const wgpu::Device& device); explicit PlaceholderRenderPass(const wgpu::Device& device);
wgpu::Texture attachment; wgpu::Texture attachment;
wgpu::TextureFormat attachmentFormat; wgpu::TextureFormat attachmentFormat;
uint32_t width; uint32_t width;

View File

@ -25,7 +25,7 @@ class VertexBufferValidationTest : public ValidationTest {
void SetUp() override { void SetUp() override {
ValidationTest::SetUp(); ValidationTest::SetUp();
// dummy vertex shader module // Placeholder vertex shader module
vsModule = utils::CreateShaderModule(device, R"( vsModule = utils::CreateShaderModule(device, R"(
@stage(vertex) fn main() -> @builtin(position) vec4<f32> { @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
return vec4<f32>(0.0, 0.0, 0.0, 0.0); return vec4<f32>(0.0, 0.0, 0.0, 0.0);
@ -106,7 +106,7 @@ class VertexBufferValidationTest : public ValidationTest {
// Check that vertex buffers still count as bound if we switch the pipeline. // Check that vertex buffers still count as bound if we switch the pipeline.
TEST_F(VertexBufferValidationTest, VertexBuffersInheritedBetweenPipelines) { TEST_F(VertexBufferValidationTest, VertexBuffersInheritedBetweenPipelines) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::ShaderModule vsModule2 = MakeVertexShader(2); wgpu::ShaderModule vsModule2 = MakeVertexShader(2);
wgpu::ShaderModule vsModule1 = MakeVertexShader(1); wgpu::ShaderModule vsModule1 = MakeVertexShader(1);
@ -143,7 +143,7 @@ TEST_F(VertexBufferValidationTest, VertexBuffersInheritedBetweenPipelines) {
// Check that vertex buffers that are set are reset between render passes. // Check that vertex buffers that are set are reset between render passes.
TEST_F(VertexBufferValidationTest, VertexBuffersNotInheritedBetweenRenderPasses) { TEST_F(VertexBufferValidationTest, VertexBuffersNotInheritedBetweenRenderPasses) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
wgpu::ShaderModule vsModule2 = MakeVertexShader(2); wgpu::ShaderModule vsModule2 = MakeVertexShader(2);
wgpu::ShaderModule vsModule1 = MakeVertexShader(1); wgpu::ShaderModule vsModule1 = MakeVertexShader(1);
@ -195,7 +195,7 @@ TEST_F(VertexBufferValidationTest, VertexBuffersNotInheritedBetweenRenderPasses)
TEST_F(VertexBufferValidationTest, VertexBufferSlotValidation) { TEST_F(VertexBufferValidationTest, VertexBufferSlotValidation) {
wgpu::Buffer buffer = MakeVertexBuffer(); wgpu::Buffer buffer = MakeVertexBuffer();
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
// Control case: using the last vertex buffer slot in render passes is ok. // Control case: using the last vertex buffer slot in render passes is ok.
{ {
@ -238,7 +238,7 @@ TEST_F(VertexBufferValidationTest, VertexBufferSlotValidation) {
TEST_F(VertexBufferValidationTest, VertexBufferOffsetOOBValidation) { TEST_F(VertexBufferValidationTest, VertexBufferOffsetOOBValidation) {
wgpu::Buffer buffer = MakeVertexBuffer(); wgpu::Buffer buffer = MakeVertexBuffer();
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
// Control case, using the full buffer, with or without an explicit size is valid. // Control case, using the full buffer, with or without an explicit size is valid.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
@ -312,7 +312,7 @@ TEST_F(VertexBufferValidationTest, InvalidUsage) {
wgpu::Buffer indexBuffer = wgpu::Buffer indexBuffer =
utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 0, 0}); utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 0, 0});
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
// Control case: using the vertex buffer is valid. // Control case: using the vertex buffer is valid.
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
@ -351,7 +351,7 @@ TEST_F(VertexBufferValidationTest, InvalidUsage) {
TEST_F(VertexBufferValidationTest, OffsetAlignment) { TEST_F(VertexBufferValidationTest, OffsetAlignment) {
wgpu::Buffer vertexBuffer = MakeVertexBuffer(); wgpu::Buffer vertexBuffer = MakeVertexBuffer();
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
// Control cases: vertex buffer offset is a multiple of 4 // Control cases: vertex buffer offset is a multiple of 4
{ {
wgpu::CommandEncoder encoder = device.CreateCommandEncoder(); wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
@ -375,7 +375,7 @@ TEST_F(VertexBufferValidationTest, OffsetAlignment) {
// Check vertex buffer stride requirements for draw command. // Check vertex buffer stride requirements for draw command.
TEST_F(VertexBufferValidationTest, DrawStrideLimitsVertex) { TEST_F(VertexBufferValidationTest, DrawStrideLimitsVertex) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
// Create a buffer of size 28, containing 4 float32 elements, array stride size = 8 // Create a buffer of size 28, containing 4 float32 elements, array stride size = 8
// The last element doesn't have the full stride size // The last element doesn't have the full stride size
@ -506,7 +506,7 @@ TEST_F(VertexBufferValidationTest, DrawStrideLimitsVertex) {
// Check instance buffer stride requirements with instanced attributes for draw command. // Check instance buffer stride requirements with instanced attributes for draw command.
TEST_F(VertexBufferValidationTest, DrawStrideLimitsInstance) { TEST_F(VertexBufferValidationTest, DrawStrideLimitsInstance) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
// Create a buffer of size 28, containing 4 float32 elements, array stride size = 8 // Create a buffer of size 28, containing 4 float32 elements, array stride size = 8
// The last element doesn't have the full stride size // The last element doesn't have the full stride size
@ -637,7 +637,7 @@ TEST_F(VertexBufferValidationTest, DrawStrideLimitsInstance) {
// Check vertex buffer stride requirements with instanced attributes for draw indexed command. // Check vertex buffer stride requirements with instanced attributes for draw indexed command.
TEST_F(VertexBufferValidationTest, DrawIndexedStrideLimitsInstance) { TEST_F(VertexBufferValidationTest, DrawIndexedStrideLimitsInstance) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
// Create a buffer of size 28, containing 4 float32 elements, array stride size = 8 // Create a buffer of size 28, containing 4 float32 elements, array stride size = 8
// The last element doesn't have the full stride size // The last element doesn't have the full stride size
@ -778,7 +778,7 @@ TEST_F(VertexBufferValidationTest, DrawIndexedStrideLimitsInstance) {
// Check last stride is computed correctly for vertex buffer with multiple attributes. // Check last stride is computed correctly for vertex buffer with multiple attributes.
TEST_F(VertexBufferValidationTest, DrawStrideLimitsVertexMultipleAttributes) { TEST_F(VertexBufferValidationTest, DrawStrideLimitsVertexMultipleAttributes) {
DummyRenderPass renderPass(device); PlaceholderRenderPass renderPass(device);
// Create a buffer of size 44, array stride size = 12 // Create a buffer of size 44, array stride size = 12
wgpu::BufferDescriptor descriptor; wgpu::BufferDescriptor descriptor;

View File

@ -43,7 +43,7 @@ class VertexStateTest : public ValidationTest {
} }
} }
const char* kDummyVertexShader = R"( const char* kPlaceholderVertexShader = R"(
@stage(vertex) fn main() -> @builtin(position) vec4<f32> { @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
return vec4<f32>(0.0, 0.0, 0.0, 0.0); return vec4<f32>(0.0, 0.0, 0.0, 0.0);
} }
@ -53,7 +53,7 @@ class VertexStateTest : public ValidationTest {
// Check an empty vertex input is valid // Check an empty vertex input is valid
TEST_F(VertexStateTest, EmptyIsOk) { TEST_F(VertexStateTest, EmptyIsOk) {
utils::ComboVertexState state; utils::ComboVertexState state;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
} }
// Check null buffer is valid // Check null buffer is valid
@ -64,7 +64,7 @@ TEST_F(VertexStateTest, NullBufferIsOk) {
state.cVertexBuffers[0].arrayStride = 0; state.cVertexBuffers[0].arrayStride = 0;
state.cVertexBuffers[0].attributeCount = 0; state.cVertexBuffers[0].attributeCount = 0;
state.cVertexBuffers[0].attributes = nullptr; state.cVertexBuffers[0].attributes = nullptr;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
// One null buffer (buffer[0]) followed by a buffer (buffer[1]) is OK // One null buffer (buffer[0]) followed by a buffer (buffer[1]) is OK
state.vertexBufferCount = 2; state.vertexBufferCount = 2;
@ -72,7 +72,7 @@ TEST_F(VertexStateTest, NullBufferIsOk) {
state.cVertexBuffers[1].attributeCount = 1; state.cVertexBuffers[1].attributeCount = 1;
state.cVertexBuffers[1].attributes = &state.cAttributes[0]; state.cVertexBuffers[1].attributes = &state.cAttributes[0];
state.cAttributes[0].shaderLocation = 0; state.cAttributes[0].shaderLocation = 0;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
// Null buffer (buffer[2]) sitting between buffers (buffer[1] and buffer[3]) is OK // Null buffer (buffer[2]) sitting between buffers (buffer[1] and buffer[3]) is OK
state.vertexBufferCount = 4; state.vertexBufferCount = 4;
@ -81,7 +81,7 @@ TEST_F(VertexStateTest, NullBufferIsOk) {
state.cVertexBuffers[3].attributeCount = 1; state.cVertexBuffers[3].attributeCount = 1;
state.cVertexBuffers[3].attributes = &state.cAttributes[1]; state.cVertexBuffers[3].attributes = &state.cAttributes[1];
state.cAttributes[1].shaderLocation = 1; state.cAttributes[1].shaderLocation = 1;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
} }
// Check validation that pipeline vertex buffers are backed by attributes in the vertex input // Check validation that pipeline vertex buffers are backed by attributes in the vertex input
@ -130,11 +130,11 @@ TEST_F(VertexStateTest, StrideZero) {
state.vertexBufferCount = 1; state.vertexBufferCount = 1;
state.cVertexBuffers[0].arrayStride = 0; state.cVertexBuffers[0].arrayStride = 0;
state.cVertexBuffers[0].attributeCount = 1; state.cVertexBuffers[0].attributeCount = 1;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
// Works ok with attributes at a large-ish offset // Works ok with attributes at a large-ish offset
state.cAttributes[0].offset = 128; state.cAttributes[0].offset = 128;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
} }
// Check validation that vertex attribute offset should be within vertex buffer arrayStride, // Check validation that vertex attribute offset should be within vertex buffer arrayStride,
@ -148,15 +148,15 @@ TEST_F(VertexStateTest, SetOffsetOutOfBounds) {
state.cAttributes[0].shaderLocation = 0; state.cAttributes[0].shaderLocation = 0;
state.cAttributes[1].shaderLocation = 1; state.cAttributes[1].shaderLocation = 1;
state.cAttributes[1].offset = sizeof(float); state.cAttributes[1].offset = sizeof(float);
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
// Test vertex attribute offset exceed vertex buffer arrayStride range // Test vertex attribute offset exceed vertex buffer arrayStride range
state.cVertexBuffers[0].arrayStride = sizeof(float); state.cVertexBuffers[0].arrayStride = sizeof(float);
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
// It's OK if arrayStride is zero // It's OK if arrayStride is zero
state.cVertexBuffers[0].arrayStride = 0; state.cVertexBuffers[0].arrayStride = 0;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
} }
// Check out of bounds condition on total number of vertex buffers // Check out of bounds condition on total number of vertex buffers
@ -169,11 +169,11 @@ TEST_F(VertexStateTest, SetVertexBuffersNumLimit) {
state.cVertexBuffers[i].attributes = &state.cAttributes[i]; state.cVertexBuffers[i].attributes = &state.cAttributes[i];
state.cAttributes[i].shaderLocation = i; state.cAttributes[i].shaderLocation = i;
} }
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
// Test vertex buffer number exceed the limit // Test vertex buffer number exceed the limit
state.vertexBufferCount = kMaxVertexBuffers + 1; state.vertexBufferCount = kMaxVertexBuffers + 1;
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
} }
// Check out of bounds condition on total number of vertex attributes // Check out of bounds condition on total number of vertex attributes
@ -185,12 +185,12 @@ TEST_F(VertexStateTest, SetVertexAttributesNumLimit) {
for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) { for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
state.cAttributes[i].shaderLocation = i; state.cAttributes[i].shaderLocation = i;
} }
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
// Test vertex attribute number exceed the limit // Test vertex attribute number exceed the limit
state.cVertexBuffers[1].attributeCount = 1; state.cVertexBuffers[1].attributeCount = 1;
state.cVertexBuffers[1].attributes = &state.cAttributes[kMaxVertexAttributes - 1]; state.cVertexBuffers[1].attributes = &state.cAttributes[kMaxVertexAttributes - 1];
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
} }
// Check out of bounds condition on input arrayStride // Check out of bounds condition on input arrayStride
@ -200,11 +200,11 @@ TEST_F(VertexStateTest, SetInputStrideOutOfBounds) {
state.vertexBufferCount = 1; state.vertexBufferCount = 1;
state.cVertexBuffers[0].arrayStride = kMaxVertexBufferArrayStride; state.cVertexBuffers[0].arrayStride = kMaxVertexBufferArrayStride;
state.cVertexBuffers[0].attributeCount = 1; state.cVertexBuffers[0].attributeCount = 1;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
// Test input arrayStride OOB // Test input arrayStride OOB
state.cVertexBuffers[0].arrayStride = kMaxVertexBufferArrayStride + 1; state.cVertexBuffers[0].arrayStride = kMaxVertexBufferArrayStride + 1;
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
} }
// Check multiple of 4 bytes constraint on input arrayStride // Check multiple of 4 bytes constraint on input arrayStride
@ -214,11 +214,11 @@ TEST_F(VertexStateTest, SetInputStrideNotAligned) {
state.vertexBufferCount = 1; state.vertexBufferCount = 1;
state.cVertexBuffers[0].arrayStride = 4; state.cVertexBuffers[0].arrayStride = 4;
state.cVertexBuffers[0].attributeCount = 1; state.cVertexBuffers[0].attributeCount = 1;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
// Test input arrayStride not multiple of 4 bytes // Test input arrayStride not multiple of 4 bytes
state.cVertexBuffers[0].arrayStride = 2; state.cVertexBuffers[0].arrayStride = 2;
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
} }
// Test that we cannot set an already set attribute // Test that we cannot set an already set attribute
@ -228,13 +228,13 @@ TEST_F(VertexStateTest, AlreadySetAttribute) {
state.vertexBufferCount = 1; state.vertexBufferCount = 1;
state.cVertexBuffers[0].attributeCount = 1; state.cVertexBuffers[0].attributeCount = 1;
state.cAttributes[0].shaderLocation = 0; state.cAttributes[0].shaderLocation = 0;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
// Oh no, attribute 0 is set twice // Oh no, attribute 0 is set twice
state.cVertexBuffers[0].attributeCount = 2; state.cVertexBuffers[0].attributeCount = 2;
state.cAttributes[0].shaderLocation = 0; state.cAttributes[0].shaderLocation = 0;
state.cAttributes[1].shaderLocation = 0; state.cAttributes[1].shaderLocation = 0;
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
} }
// Test that a arrayStride of 0 is valid // Test that a arrayStride of 0 is valid
@ -246,11 +246,11 @@ TEST_F(VertexStateTest, SetSameShaderLocation) {
state.cAttributes[0].shaderLocation = 0; state.cAttributes[0].shaderLocation = 0;
state.cAttributes[1].shaderLocation = 1; state.cAttributes[1].shaderLocation = 1;
state.cAttributes[1].offset = sizeof(float); state.cAttributes[1].offset = sizeof(float);
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
// Test same shader location in two attributes in the same buffer // Test same shader location in two attributes in the same buffer
state.cAttributes[1].shaderLocation = 0; state.cAttributes[1].shaderLocation = 0;
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
// Test same shader location in two attributes in different buffers // Test same shader location in two attributes in different buffers
state.vertexBufferCount = 2; state.vertexBufferCount = 2;
@ -259,7 +259,7 @@ TEST_F(VertexStateTest, SetSameShaderLocation) {
state.cVertexBuffers[1].attributeCount = 1; state.cVertexBuffers[1].attributeCount = 1;
state.cVertexBuffers[1].attributes = &state.cAttributes[1]; state.cVertexBuffers[1].attributes = &state.cAttributes[1];
state.cAttributes[1].shaderLocation = 0; state.cAttributes[1].shaderLocation = 0;
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
} }
// Check out of bounds condition on attribute shader location // Check out of bounds condition on attribute shader location
@ -269,11 +269,11 @@ TEST_F(VertexStateTest, SetAttributeLocationOutOfBounds) {
state.vertexBufferCount = 1; state.vertexBufferCount = 1;
state.cVertexBuffers[0].attributeCount = 1; state.cVertexBuffers[0].attributeCount = 1;
state.cAttributes[0].shaderLocation = kMaxVertexAttributes - 1; state.cAttributes[0].shaderLocation = kMaxVertexAttributes - 1;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
// Test attribute location OOB // Test attribute location OOB
state.cAttributes[0].shaderLocation = kMaxVertexAttributes; state.cAttributes[0].shaderLocation = kMaxVertexAttributes;
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
} }
// Check attribute offset out of bounds // Check attribute offset out of bounds
@ -283,11 +283,11 @@ TEST_F(VertexStateTest, SetAttributeOffsetOutOfBounds) {
state.vertexBufferCount = 1; state.vertexBufferCount = 1;
state.cVertexBuffers[0].attributeCount = 1; state.cVertexBuffers[0].attributeCount = 1;
state.cAttributes[0].offset = kMaxVertexBufferArrayStride - sizeof(wgpu::VertexFormat::Float32); state.cAttributes[0].offset = kMaxVertexBufferArrayStride - sizeof(wgpu::VertexFormat::Float32);
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
// Test attribute offset out of bounds // Test attribute offset out of bounds
state.cAttributes[0].offset = kMaxVertexBufferArrayStride - 1; state.cAttributes[0].offset = kMaxVertexBufferArrayStride - 1;
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
} }
// Check the min(4, formatSize) alignment constraint for the offset. // Check the min(4, formatSize) alignment constraint for the offset.
@ -300,34 +300,34 @@ TEST_F(VertexStateTest, SetOffsetNotAligned) {
// Test that for small formats, the offset must be aligned to the format size. // Test that for small formats, the offset must be aligned to the format size.
state.cAttributes[0].format = wgpu::VertexFormat::Float32; state.cAttributes[0].format = wgpu::VertexFormat::Float32;
state.cAttributes[0].offset = 4; state.cAttributes[0].offset = 4;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
state.cAttributes[0].offset = 2; state.cAttributes[0].offset = 2;
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
state.cAttributes[0].format = wgpu::VertexFormat::Snorm16x2; state.cAttributes[0].format = wgpu::VertexFormat::Snorm16x2;
state.cAttributes[0].offset = 4; state.cAttributes[0].offset = 4;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
state.cAttributes[0].offset = 2; state.cAttributes[0].offset = 2;
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
state.cAttributes[0].format = wgpu::VertexFormat::Unorm8x2; state.cAttributes[0].format = wgpu::VertexFormat::Unorm8x2;
state.cAttributes[0].offset = 2; state.cAttributes[0].offset = 2;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
state.cAttributes[0].offset = 1; state.cAttributes[0].offset = 1;
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
// Test that for large formts the offset only needs to be aligned to 4. // Test that for large formts the offset only needs to be aligned to 4.
state.cAttributes[0].format = wgpu::VertexFormat::Snorm16x4; state.cAttributes[0].format = wgpu::VertexFormat::Snorm16x4;
state.cAttributes[0].offset = 4; state.cAttributes[0].offset = 4;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
state.cAttributes[0].format = wgpu::VertexFormat::Uint32x3; state.cAttributes[0].format = wgpu::VertexFormat::Uint32x3;
state.cAttributes[0].offset = 4; state.cAttributes[0].offset = 4;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
state.cAttributes[0].format = wgpu::VertexFormat::Sint32x4; state.cAttributes[0].format = wgpu::VertexFormat::Sint32x4;
state.cAttributes[0].offset = 4; state.cAttributes[0].offset = 4;
CreatePipeline(true, state, kDummyVertexShader); CreatePipeline(true, state, kPlaceholderVertexShader);
} }
// Check attribute offset overflow // Check attribute offset overflow
@ -336,7 +336,7 @@ TEST_F(VertexStateTest, SetAttributeOffsetOverflow) {
state.vertexBufferCount = 1; state.vertexBufferCount = 1;
state.cVertexBuffers[0].attributeCount = 1; state.cVertexBuffers[0].attributeCount = 1;
state.cAttributes[0].offset = std::numeric_limits<uint32_t>::max(); state.cAttributes[0].offset = std::numeric_limits<uint32_t>::max();
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
} }
// Check for some potential underflow in the vertex input validation // Check for some potential underflow in the vertex input validation
@ -346,7 +346,7 @@ TEST_F(VertexStateTest, VertexFormatLargerThanNonZeroStride) {
state.cVertexBuffers[0].arrayStride = 4; state.cVertexBuffers[0].arrayStride = 4;
state.cVertexBuffers[0].attributeCount = 1; state.cVertexBuffers[0].attributeCount = 1;
state.cAttributes[0].format = wgpu::VertexFormat::Float32x4; state.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
CreatePipeline(false, state, kDummyVertexShader); CreatePipeline(false, state, kPlaceholderVertexShader);
} }
// Check that the vertex format base type must match the shader's variable base type. // Check that the vertex format base type must match the shader's variable base type.

View File

@ -224,10 +224,10 @@ namespace {
// Test copying from a buffer to a multi-planar format fails. // Test copying from a buffer to a multi-planar format fails.
TEST_F(VideoViewsValidation, B2TCopyAllAspectsFails) { TEST_F(VideoViewsValidation, B2TCopyAllAspectsFails) {
std::vector<uint8_t> dummyData(4, 0); std::vector<uint8_t> placeholderData(4, 0);
wgpu::Buffer srcBuffer = utils::CreateBufferFromData( wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
device, dummyData.data(), dummyData.size(), wgpu::BufferUsage::CopySrc); device, placeholderData.data(), placeholderData.size(), wgpu::BufferUsage::CopySrc);
wgpu::Texture dstTexture = CreateVideoTextureForTest( wgpu::Texture dstTexture = CreateVideoTextureForTest(
wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding); wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
@ -245,10 +245,10 @@ namespace {
// Test copying from a buffer to a multi-planar format per plane fails. // Test copying from a buffer to a multi-planar format per plane fails.
TEST_F(VideoViewsValidation, B2TCopyPlaneAspectsFails) { TEST_F(VideoViewsValidation, B2TCopyPlaneAspectsFails) {
std::vector<uint8_t> dummyData(4, 0); std::vector<uint8_t> placeholderData(4, 0);
wgpu::Buffer srcBuffer = utils::CreateBufferFromData( wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
device, dummyData.data(), dummyData.size(), wgpu::BufferUsage::CopySrc); device, placeholderData.data(), placeholderData.size(), wgpu::BufferUsage::CopySrc);
wgpu::Texture dstTexture = CreateVideoTextureForTest( wgpu::Texture dstTexture = CreateVideoTextureForTest(
wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding); wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
@ -311,13 +311,14 @@ namespace {
wgpu::ImageCopyTexture imageCopyTexture = wgpu::ImageCopyTexture imageCopyTexture =
utils::CreateImageCopyTexture(texture, 0, {0, 0, 0}); utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
std::vector<uint8_t> dummyData(4, 0); std::vector<uint8_t> placeholderData(4, 0);
wgpu::Extent3D writeSize = {1, 1, 1}; wgpu::Extent3D writeSize = {1, 1, 1};
wgpu::Queue queue = device.GetQueue(); wgpu::Queue queue = device.GetQueue();
ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, dummyData.data(), ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, placeholderData.data(),
dummyData.size(), &textureDataLayout, &writeSize)); placeholderData.size(), &textureDataLayout,
&writeSize));
} }
// Tests writing into a multi-planar format per plane fails. // Tests writing into a multi-planar format per plane fails.
@ -329,13 +330,14 @@ namespace {
wgpu::ImageCopyTexture imageCopyTexture = wgpu::ImageCopyTexture imageCopyTexture =
utils::CreateImageCopyTexture(texture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only); utils::CreateImageCopyTexture(texture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
std::vector<uint8_t> dummmyData(4, 0); std::vector<uint8_t> placeholderData(4, 0);
wgpu::Extent3D writeSize = {1, 1, 1}; wgpu::Extent3D writeSize = {1, 1, 1};
wgpu::Queue queue = device.GetQueue(); wgpu::Queue queue = device.GetQueue();
ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, dummmyData.data(), ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, placeholderData.data(),
dummmyData.size(), &textureDataLayout, &writeSize)); placeholderData.size(), &textureDataLayout,
&writeSize));
} }
} // anonymous namespace } // anonymous namespace

View File

@ -166,13 +166,13 @@ TEST_F(WireArgumentTests, CStringArgument) {
wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor); wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
WGPURenderPipeline apiDummyPipeline = api.GetNewRenderPipeline(); WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
EXPECT_CALL(api, EXPECT_CALL(api,
DeviceCreateRenderPipeline( DeviceCreateRenderPipeline(
apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool { apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
return desc->vertex.entryPoint == std::string("main"); return desc->vertex.entryPoint == std::string("main");
}))) })))
.WillOnce(Return(apiDummyPipeline)); .WillOnce(Return(apiPlaceholderPipeline));
FlushClient(); FlushClient();
} }
@ -248,7 +248,7 @@ TEST_F(WireArgumentTests, StructureOfValuesArgument) {
wgpuDeviceCreateSampler(device, &descriptor); wgpuDeviceCreateSampler(device, &descriptor);
WGPUSampler apiDummySampler = api.GetNewSampler(); WGPUSampler apiPlaceholderSampler = api.GetNewSampler();
EXPECT_CALL(api, DeviceCreateSampler( EXPECT_CALL(api, DeviceCreateSampler(
apiDevice, MatchesLambda([](const WGPUSamplerDescriptor* desc) -> bool { apiDevice, MatchesLambda([](const WGPUSamplerDescriptor* desc) -> bool {
return desc->nextInChain == nullptr && return desc->nextInChain == nullptr &&
@ -261,7 +261,7 @@ TEST_F(WireArgumentTests, StructureOfValuesArgument) {
desc->compare == WGPUCompareFunction_Never && desc->compare == WGPUCompareFunction_Never &&
desc->lodMinClamp == kLodMin && desc->lodMaxClamp == kLodMax; desc->lodMinClamp == kLodMin && desc->lodMaxClamp == kLodMax;
}))) })))
.WillOnce(Return(apiDummySampler)); .WillOnce(Return(apiPlaceholderSampler));
FlushClient(); FlushClient();
} }
@ -282,7 +282,7 @@ TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) {
wgpuDeviceCreatePipelineLayout(device, &descriptor); wgpuDeviceCreatePipelineLayout(device, &descriptor);
WGPUPipelineLayout apiDummyLayout = api.GetNewPipelineLayout(); WGPUPipelineLayout apiPlaceholderLayout = api.GetNewPipelineLayout();
EXPECT_CALL(api, DeviceCreatePipelineLayout( EXPECT_CALL(api, DeviceCreatePipelineLayout(
apiDevice, apiDevice,
MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool { MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool {
@ -290,7 +290,7 @@ TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) {
desc->bindGroupLayoutCount == 1 && desc->bindGroupLayoutCount == 1 &&
desc->bindGroupLayouts[0] == apiBgl; desc->bindGroupLayouts[0] == apiBgl;
}))) })))
.WillOnce(Return(apiDummyLayout)); .WillOnce(Return(apiPlaceholderLayout));
FlushClient(); FlushClient();
} }

View File

@ -29,7 +29,7 @@ namespace {
// Test that commands are not received if the client disconnects. // Test that commands are not received if the client disconnects.
TEST_F(WireDisconnectTests, CommandsAfterDisconnect) { TEST_F(WireDisconnectTests, CommandsAfterDisconnect) {
// Sanity check that commands work at all. // Check that commands work at all.
wgpuDeviceCreateCommandEncoder(device, nullptr); wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder(); WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
@ -49,7 +49,7 @@ TEST_F(WireDisconnectTests, CommandsAfterDisconnect) {
// Test that commands that are serialized before a disconnect but flushed // Test that commands that are serialized before a disconnect but flushed
// after are received. // after are received.
TEST_F(WireDisconnectTests, FlushAfterDisconnect) { TEST_F(WireDisconnectTests, FlushAfterDisconnect) {
// Sanity check that commands work at all. // Check that commands work at all.
wgpuDeviceCreateCommandEncoder(device, nullptr); wgpuDeviceCreateCommandEncoder(device, nullptr);
// Disconnect. // Disconnect.

View File

@ -38,8 +38,8 @@ TEST_F(WireInjectTextureTests, CallAfterReserveInject) {
reservation.deviceId, reservation.deviceGeneration)); reservation.deviceId, reservation.deviceGeneration));
wgpuTextureCreateView(reservation.texture, nullptr); wgpuTextureCreateView(reservation.texture, nullptr);
WGPUTextureView apiDummyView = api.GetNewTextureView(); WGPUTextureView apiPlaceholderView = api.GetNewTextureView();
EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr)).WillOnce(Return(apiDummyView)); EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr)).WillOnce(Return(apiPlaceholderView));
FlushClient(); FlushClient();
} }

View File

@ -49,7 +49,7 @@ TEST_F(WireOptionalTests, OptionalObjectValue) {
wgpuDeviceCreateBindGroup(device, &bgDesc); wgpuDeviceCreateBindGroup(device, &bgDesc);
WGPUBindGroup apiDummyBindGroup = api.GetNewBindGroup(); WGPUBindGroup apiPlaceholderBindGroup = api.GetNewBindGroup();
EXPECT_CALL(api, DeviceCreateBindGroup( EXPECT_CALL(api, DeviceCreateBindGroup(
apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool { apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool {
return desc->nextInChain == nullptr && desc->entryCount == 1 && return desc->nextInChain == nullptr && desc->entryCount == 1 &&
@ -58,7 +58,7 @@ TEST_F(WireOptionalTests, OptionalObjectValue) {
desc->entries[0].buffer == nullptr && desc->entries[0].buffer == nullptr &&
desc->entries[0].textureView == nullptr; desc->entries[0].textureView == nullptr;
}))) })))
.WillOnce(Return(apiDummyBindGroup)); .WillOnce(Return(apiPlaceholderBindGroup));
FlushClient(); FlushClient();
} }
@ -138,7 +138,7 @@ TEST_F(WireOptionalTests, OptionalStructPointer) {
pipelineDescriptor.depthStencil = &depthStencilState; pipelineDescriptor.depthStencil = &depthStencilState;
wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor); wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
WGPURenderPipeline apiDummyPipeline = api.GetNewRenderPipeline(); WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
EXPECT_CALL( EXPECT_CALL(
api, api,
DeviceCreateRenderPipeline( DeviceCreateRenderPipeline(
@ -161,7 +161,7 @@ TEST_F(WireOptionalTests, OptionalStructPointer) {
desc->depthStencil->depthBiasSlopeScale == 0.0 && desc->depthStencil->depthBiasSlopeScale == 0.0 &&
desc->depthStencil->depthBiasClamp == 0.0; desc->depthStencil->depthBiasClamp == 0.0;
}))) })))
.WillOnce(Return(apiDummyPipeline)); .WillOnce(Return(apiPlaceholderPipeline));
FlushClient(); FlushClient();
@ -173,7 +173,7 @@ TEST_F(WireOptionalTests, OptionalStructPointer) {
apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool { apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
return desc->depthStencil == nullptr; return desc->depthStencil == nullptr;
}))) })))
.WillOnce(Return(apiDummyPipeline)); .WillOnce(Return(apiPlaceholderPipeline));
FlushClient(); FlushClient();
} }

View File

@ -96,11 +96,11 @@ class D3D12DescriptorHeapTests : public DawnTest {
wgpu::ShaderModule mSimpleFSModule; wgpu::ShaderModule mSimpleFSModule;
}; };
class DummyStagingDescriptorAllocator { class PlaceholderStagingDescriptorAllocator {
public: public:
DummyStagingDescriptorAllocator(Device* device, PlaceholderStagingDescriptorAllocator(Device* device,
uint32_t descriptorCount, uint32_t descriptorCount,
uint32_t allocationsPerHeap) uint32_t allocationsPerHeap)
: mAllocator(device, : mAllocator(device,
descriptorCount, descriptorCount,
allocationsPerHeap * descriptorCount, allocationsPerHeap * descriptorCount,
@ -899,7 +899,8 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
TEST_P(D3D12DescriptorHeapTests, Single) { TEST_P(D3D12DescriptorHeapTests, Single) {
constexpr uint32_t kDescriptorCount = 4; constexpr uint32_t kDescriptorCount = 4;
constexpr uint32_t kAllocationsPerHeap = 3; constexpr uint32_t kAllocationsPerHeap = 3;
DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap); PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
kAllocationsPerHeap);
CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors(); CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
EXPECT_EQ(allocation.GetHeapIndex(), 0u); EXPECT_EQ(allocation.GetHeapIndex(), 0u);
@ -914,7 +915,8 @@ TEST_P(D3D12DescriptorHeapTests, Single) {
TEST_P(D3D12DescriptorHeapTests, Sequential) { TEST_P(D3D12DescriptorHeapTests, Sequential) {
constexpr uint32_t kDescriptorCount = 4; constexpr uint32_t kDescriptorCount = 4;
constexpr uint32_t kAllocationsPerHeap = 3; constexpr uint32_t kAllocationsPerHeap = 3;
DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap); PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
kAllocationsPerHeap);
// Allocate |kNumOfHeaps| worth. // Allocate |kNumOfHeaps| worth.
constexpr uint32_t kNumOfHeaps = 2; constexpr uint32_t kNumOfHeaps = 2;
@ -944,7 +946,8 @@ TEST_P(D3D12DescriptorHeapTests, Sequential) {
TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) { TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) {
constexpr uint32_t kDescriptorCount = 4; constexpr uint32_t kDescriptorCount = 4;
constexpr uint32_t kAllocationsPerHeap = 25; constexpr uint32_t kAllocationsPerHeap = 25;
DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap); PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
kAllocationsPerHeap);
constexpr uint32_t kNumofHeaps = 10; constexpr uint32_t kNumofHeaps = 10;
@ -987,7 +990,8 @@ TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) {
TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) { TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) {
constexpr uint32_t kDescriptorCount = 4; constexpr uint32_t kDescriptorCount = 4;
constexpr uint32_t kAllocationsPerHeap = 25; constexpr uint32_t kAllocationsPerHeap = 25;
DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap); PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
kAllocationsPerHeap);
std::list<CPUDescriptorHeapAllocation> list3; std::list<CPUDescriptorHeapAllocation> list3;
std::list<CPUDescriptorHeapAllocation> list5; std::list<CPUDescriptorHeapAllocation> list5;

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
add_library(dawn_utils STATIC ${DAWN_DUMMY_FILE}) add_library(dawn_utils STATIC ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_utils) common_compile_options(dawn_utils)
target_sources(dawn_utils PRIVATE target_sources(dawn_utils PRIVATE
"ComboRenderBundleEncoderDescriptor.cpp" "ComboRenderBundleEncoderDescriptor.cpp"

View File

@ -19,7 +19,7 @@
// NOTE: This must be included before GLFW/glfw3.h because the latter will // NOTE: This must be included before GLFW/glfw3.h because the latter will
// include <vulkan/vulkan.h> and "common/vulkan_platform.h" wants to be // include <vulkan/vulkan.h> and "common/vulkan_platform.h" wants to be
// the first header to do so for sanity reasons (e.g. undefining weird // the first header to do so for validity reasons (e.g. undefining weird
// macros on Windows and Linux). // macros on Windows and Linux).
// clang-format off // clang-format off
#include "dawn/common/vulkan_platform.h" #include "dawn/common/vulkan_platform.h"

View File

@ -22,7 +22,7 @@
namespace utils { namespace utils {
// The returned CALayer is autoreleased. // The returned CALayer is autoreleased.
void* CreateDummyCALayer(); void* CreatePlaceholderCALayer();
} // namespace utils } // namespace utils

View File

@ -18,7 +18,7 @@
namespace utils { namespace utils {
void* CreateDummyCALayer() { void* CreatePlaceholderCALayer() {
return [CALayer layer]; return [CALayer layer];
} }

View File

@ -18,7 +18,7 @@ DawnJSONGenerator(
RESULT_VARIABLE "DAWN_WIRE_GEN_SOURCES" RESULT_VARIABLE "DAWN_WIRE_GEN_SOURCES"
) )
add_library(dawn_wire ${DAWN_DUMMY_FILE}) add_library(dawn_wire ${DAWN_PLACEHOLDER_FILE})
common_compile_options(dawn_wire) common_compile_options(dawn_wire)
target_compile_definitions(dawn_wire PRIVATE "DAWN_WIRE_IMPLEMENTATION") target_compile_definitions(dawn_wire PRIVATE "DAWN_WIRE_IMPLEMENTATION")

View File

@ -3410,8 +3410,8 @@ TEST_F(SpvModuleScopeVarParserTest, RegisterInputOutputVars) {
%300 = OpFunction %void None %voidfn %300 = OpFunction %void None %voidfn
%entry_300 = OpLabel %entry_300 = OpLabel
%dummy_300_1 = OpFunctionCall %void %100 %placeholder_300_1 = OpFunctionCall %void %100
%dummy_300_2 = OpFunctionCall %void %200 %placeholder_300_2 = OpFunctionCall %void %200
OpReturn OpReturn
OpFunctionEnd OpFunctionEnd
@ -3424,21 +3424,21 @@ TEST_F(SpvModuleScopeVarParserTest, RegisterInputOutputVars) {
; Call %100 ; Call %100
%1100 = OpFunction %void None %voidfn %1100 = OpFunction %void None %voidfn
%entry_1100 = OpLabel %entry_1100 = OpLabel
%dummy_1100_1 = OpFunctionCall %void %100 %placeholder_1100_1 = OpFunctionCall %void %100
OpReturn OpReturn
OpFunctionEnd OpFunctionEnd
; Call %200 ; Call %200
%1200 = OpFunction %void None %voidfn %1200 = OpFunction %void None %voidfn
%entry_1200 = OpLabel %entry_1200 = OpLabel
%dummy_1200_1 = OpFunctionCall %void %200 %placeholder_1200_1 = OpFunctionCall %void %200
OpReturn OpReturn
OpFunctionEnd OpFunctionEnd
; Call %300 ; Call %300
%1300 = OpFunction %void None %voidfn %1300 = OpFunction %void None %voidfn
%entry_1300 = OpLabel %entry_1300 = OpLabel
%dummy_1300_1 = OpFunctionCall %void %300 %placeholder_1300_1 = OpFunctionCall %void %300
OpReturn OpReturn
OpFunctionEnd OpFunctionEnd

View File

@ -5,9 +5,9 @@ struct Scene {
struct Material { struct Material {
vDiffuseColor : vec4<f32>, vDiffuseColor : vec4<f32>,
vAmbientColor : vec3<f32>, vAmbientColor : vec3<f32>,
dummy: f32, placeholder: f32,
vEmissiveColor : vec3<f32>, vEmissiveColor : vec3<f32>,
dummy2: f32, placeholder2: f32,
}; };
struct Mesh { struct Mesh {

View File

@ -11,9 +11,9 @@ struct Scene {
struct Material { struct Material {
vec4 vDiffuseColor; vec4 vDiffuseColor;
vec3 vAmbientColor; vec3 vAmbientColor;
float dummy; float placeholder;
vec3 vEmissiveColor; vec3 vEmissiveColor;
float dummy2; float placeholder2;
}; };
struct Mesh { struct Mesh {
@ -29,9 +29,9 @@ layout(binding = 0) uniform Scene_1 {
layout(binding = 1) uniform Material_1 { layout(binding = 1) uniform Material_1 {
vec4 vDiffuseColor; vec4 vDiffuseColor;
vec3 vAmbientColor; vec3 vAmbientColor;
float dummy; float placeholder;
vec3 vEmissiveColor; vec3 vEmissiveColor;
float dummy2; float placeholder2;
} x_49; } x_49;
layout(binding = 2) uniform Mesh_1 { layout(binding = 2) uniform Mesh_1 {

View File

@ -19,9 +19,9 @@ struct Scene {
struct Material { struct Material {
/* 0x0000 */ float4 vDiffuseColor; /* 0x0000 */ float4 vDiffuseColor;
/* 0x0010 */ packed_float3 vAmbientColor; /* 0x0010 */ packed_float3 vAmbientColor;
/* 0x001c */ float dummy; /* 0x001c */ float placeholder;
/* 0x0020 */ packed_float3 vEmissiveColor; /* 0x0020 */ packed_float3 vEmissiveColor;
/* 0x002c */ float dummy2; /* 0x002c */ float placeholder2;
}; };
struct Mesh { struct Mesh {
@ -151,4 +151,3 @@ fragment tint_symbol_3 tint_symbol(const constant Scene* tint_symbol_24 [[buffer
wrapper_result.glFragColor_1 = inner_result.glFragColor_1; wrapper_result.glFragColor_1 = inner_result.glFragColor_1;
return wrapper_result; return wrapper_result;
} }

View File

@ -19,9 +19,9 @@
OpName %Material "Material" OpName %Material "Material"
OpMemberName %Material 0 "vDiffuseColor" OpMemberName %Material 0 "vDiffuseColor"
OpMemberName %Material 1 "vAmbientColor" OpMemberName %Material 1 "vAmbientColor"
OpMemberName %Material 2 "dummy" OpMemberName %Material 2 "placeholder"
OpMemberName %Material 3 "vEmissiveColor" OpMemberName %Material 3 "vEmissiveColor"
OpMemberName %Material 4 "dummy2" OpMemberName %Material 4 "placeholder2"
OpName %x_49 "x_49" OpName %x_49 "x_49"
OpName %Mesh "Mesh" OpName %Mesh "Mesh"
OpMemberName %Mesh 0 "visibility" OpMemberName %Mesh 0 "visibility"

View File

@ -5,9 +5,9 @@ struct Scene {
struct Material { struct Material {
vDiffuseColor : vec4<f32>, vDiffuseColor : vec4<f32>,
vAmbientColor : vec3<f32>, vAmbientColor : vec3<f32>,
dummy : f32, placeholder : f32,
vEmissiveColor : vec3<f32>, vEmissiveColor : vec3<f32>,
dummy2 : f32, placeholder2 : f32,
} }
struct Mesh { struct Mesh {

View File

@ -35,20 +35,20 @@ def compile_src(out_dir):
shutil.rmtree(out_dir) shutil.rmtree(out_dir)
run_tsc_ignore_errors([ run_tsc_ignore_errors([
'--project', "--project",
os.path.join(webgpu_cts_root_dir, 'tsconfig.json'), os.path.join(webgpu_cts_root_dir, "tsconfig.json"),
'--outDir', "--outDir",
out_dir, out_dir,
'--noEmit', "--noEmit",
'false', "false",
'--noEmitOnError', "--noEmitOnError",
'false', "false",
'--declaration', "--declaration",
'false', "false",
'--sourceMap', "--sourceMap",
'false', "false",
'--target', "--target",
'ES2017', "ES2017",
]) ])
@ -59,43 +59,43 @@ def compile_src_for_node(out_dir, additional_args=None, clean=True):
shutil.rmtree(out_dir) shutil.rmtree(out_dir)
args = [ args = [
'--project', "--project",
os.path.join(webgpu_cts_root_dir, 'node.tsconfig.json'), os.path.join(webgpu_cts_root_dir, "node.tsconfig.json"),
'--outDir', "--outDir",
out_dir, out_dir,
'--noEmit', "--noEmit",
'false', "false",
'--noEmitOnError', "--noEmitOnError",
'false', "false",
'--declaration', "--declaration",
'false', "false",
'--sourceMap', "--sourceMap",
'false', "false",
'--target', "--target",
'ES6', "ES6",
] ]
args.extend(additional_args) args.extend(additional_args)
run_tsc_ignore_errors(args) run_tsc_ignore_errors(args)
if __name__ == '__main__': if __name__ == "__main__":
if len(sys.argv) != 2: if len(sys.argv) != 2:
print('Usage: compile_src.py GEN_DIR') print("Usage: compile_src.py GEN_DIR")
sys.exit(1) sys.exit(1)
gen_dir = sys.argv[1] gen_dir = sys.argv[1]
# Compile the CTS src. # Compile the CTS src.
compile_src(os.path.join(gen_dir, 'src')) compile_src(os.path.join(gen_dir, "src"))
compile_src_for_node(os.path.join(gen_dir, 'src-node')) compile_src_for_node(os.path.join(gen_dir, "src-node"))
# Run gen_listings.js to overwrite the dummy src/webgpu/listings.js created # Run gen_listings.js to overwrite the placeholder src/webgpu/listings.js created
# from transpiling src/ # from transpiling src/
RunNode([ RunNode([
os.path.join(gen_dir, 'src-node', 'common', 'tools', os.path.join(gen_dir, "src-node", "common", "tools",
'gen_listings.js'), "gen_listings.js"),
'--no-validate', "--no-validate",
os.path.join(gen_dir, 'src'), os.path.join(gen_dir, "src"),
os.path.join(gen_dir, 'src-node', 'webgpu'), os.path.join(gen_dir, "src-node", "webgpu"),
]) ])