Fix inclusive language presubmit
The current presubmit has the filter inverted so it would only attempt to match the filtered files. The file name also has to be converted to `LocalPath` otherwise it's attempting to compare a python object to a string and always fails to match. Bug: dawn:1339 Change-Id: Ie7712dee60f6b9df2cb78c9feab11769f7ea1f02 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87080 Reviewed-by: Corentin Wallez <cwallez@chromium.org> Commit-Queue: Dan Sinclair <dsinclair@chromium.org> Auto-Submit: Dan Sinclair <dsinclair@chromium.org>
This commit is contained in:
parent
6a3373e419
commit
fb5a492787
|
@ -14,7 +14,7 @@
|
|||
|
||||
cmake_minimum_required(VERSION 3.10.2)
|
||||
|
||||
# When upgrading to CMake 3.11 we can remove DAWN_DUMMY_FILE because source-less add_library
|
||||
# When upgrading to CMake 3.11 we can remove DAWN_PLACEHOLDER_FILE because source-less add_library
|
||||
# becomes available.
|
||||
# When upgrading to CMake 3.12 we should add CONFIGURE_DEPENDS to DawnGenerator to rerun CMake in
|
||||
# case any of the generator files changes. We should also remove the CACHE "" FORCE stuff to
|
||||
|
@ -46,7 +46,7 @@ set(DAWN_SRC_DIR "${Dawn_SOURCE_DIR}/src")
|
|||
set(DAWN_INCLUDE_DIR "${Dawn_SOURCE_DIR}/include")
|
||||
set(DAWN_TEMPLATE_DIR "${DAWN_GENERATOR_DIR}/templates")
|
||||
|
||||
set(DAWN_DUMMY_FILE "${DAWN_SRC_DIR}/Dummy.cpp")
|
||||
set(DAWN_PLACEHOLDER_FILE "${DAWN_SRC_DIR}/Placeholder.cpp")
|
||||
|
||||
################################################################################
|
||||
# Configuration options
|
||||
|
|
26
PRESUBMIT.py
26
PRESUBMIT.py
|
@ -80,7 +80,9 @@ def _CheckNonInclusiveLanguage(input_api, output_api, source_file_filter=None):
|
|||
matches = []
|
||||
for f in input_api.AffectedFiles(include_deletes=False,
|
||||
file_filter=source_file_filter):
|
||||
for line_num, line in f.ChangedContents():
|
||||
line_num = 0
|
||||
for line in f.NewContents():
|
||||
line_num += 1
|
||||
for reg in NONINCLUSIVE_REGEX_LIST:
|
||||
match = reg.search(line)
|
||||
if match:
|
||||
|
@ -99,11 +101,29 @@ def _CheckNonInclusiveLanguage(input_api, output_api, source_file_filter=None):
|
|||
|
||||
def _NonInclusiveFileFilter(file):
|
||||
filter_list = [
|
||||
"Doxyfile", # References to main pages
|
||||
"PRESUBMIT.py", # Non-inclusive language check data
|
||||
"PRESUBMIT.py.tint", # Non-inclusive language check data
|
||||
"docs/dawn/debug_markers.md", # External URL
|
||||
"docs/dawn/infra.md", # Infra settings
|
||||
"docs/tint/spirv-input-output-variables.md", # External URL
|
||||
"test/tint/samples/compute_boids.wgsl ", # External URL
|
||||
"infra/config/global/generated/cr-buildbucket.cfg", # Infra settings
|
||||
"infra/config/global/main.star", # Infra settings
|
||||
"infra/kokoro/windows/build.bat", # External URL
|
||||
"src/dawn/common/GPUInfo.cpp", # External URL
|
||||
"src/dawn/native/metal/BackendMTL.mm", # OSX Constant
|
||||
"src/dawn/native/vulkan/SamplerVk.cpp", # External URL
|
||||
"src/dawn/native/vulkan/TextureVk.cpp", # External URL
|
||||
"src/dawn/node/tools/src/cmd/run-cts/main.go", # Terminal type name
|
||||
"src/dawn/samples/ComputeBoids.cpp", # External URL
|
||||
"src/dawn/tests/end2end/DepthBiasTests.cpp", # External URL
|
||||
"test/tint/samples/compute_boids.wgsl", # External URL
|
||||
"third_party/khronos/KHR/khrplatform.h", # Third party file
|
||||
"tools/roll-all", # Branch name
|
||||
"tools/src/container/key.go", # External URL
|
||||
"tools/src/go.sum", # External URL
|
||||
]
|
||||
return file in filter_list
|
||||
return file.LocalPath() not in filter_list
|
||||
|
||||
|
||||
def _DoCommonChecks(input_api, output_api):
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
# Dawn, a WebGPU implementation
|
||||
|
||||
Dawn is an open-source and cross-platform implementation of the work-in-progress [WebGPU](https://webgpu.dev) standard.
|
||||
More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/master/webgpu.h) that is a one-to-one mapping with the WebGPU IDL.
|
||||
More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/main/webgpu.h) that is a one-to-one mapping with the WebGPU IDL.
|
||||
Dawn is meant to be integrated as part of a larger system and is the underlying implementation of WebGPU in Chromium.
|
||||
|
||||
Dawn provides several WebGPU building blocks:
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
# Dawn, a WebGPU implementation
|
||||
|
||||
Dawn is an open-source and cross-platform implementation of the work-in-progress [WebGPU](https://webgpu.dev) standard.
|
||||
More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/master/webgpu.h) that is a one-to-one mapping with the WebGPU IDL.
|
||||
More precisely it implements [`webgpu.h`](https://github.com/webgpu-native/webgpu-headers/blob/main/webgpu.h) that is a one-to-one mapping with the WebGPU IDL.
|
||||
Dawn is meant to be integrated as part of a larger system and is the underlying implementation of WebGPU in Chromium.
|
||||
|
||||
Dawn provides several WebGPU building blocks:
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
Dawn relies on a lot of code generation to produce boilerplate code, especially webgpu.h-related code. They start by reading some JSON files (and sometimes XML too), process the data into an in-memory representation that's then used by some [Jinja2](https://jinja.palletsprojects.com/) templates to generate the code. This is similar to the model/view separation in Web development.
|
||||
|
||||
Generators are based on [generator_lib.py](../generator/generator_lib.py) which provides facilities for integrating in build systems and using Jinja2. Templates can be found in [`generator/templates`](../generator/templates) and the generated files are in `out/<Debug/Release/foo>/gen/src` when building Dawn in standalone. Generated files can also be found in [Chromium's code search](https://source.chromium.org/chromium/chromium/src/+/master:out/Debug/gen/third_party/dawn/src/).
|
||||
Generators are based on [generator_lib.py](../generator/generator_lib.py) which provides facilities for integrating in build systems and using Jinja2. Templates can be found in [`generator/templates`](../generator/templates) and the generated files are in `out/<Debug/Release/foo>/gen/src` when building Dawn in standalone. Generated files can also be found in [Chromium's code search](https://source.chromium.org/chromium/chromium/src/+/main:out/Debug/gen/third_party/dawn/src/).
|
||||
|
||||
## Dawn "JSON API" generators
|
||||
|
||||
|
|
|
@ -13,6 +13,6 @@ The `dawn_wire_server_and_vulkan_backend_fuzzer` is like `dawn_wire_server_and_f
|
|||
Using a seed corpus significantly improves the efficiency of fuzzing. Dawn's fuzzers use interesting testcases discovered in previous fuzzing runs to seed future runs. Fuzzing can be further improved by using Dawn tests as a example of API usage which allows the fuzzer to quickly discover and use new API entrypoints and usage patterns.
|
||||
|
||||
Dawn has a CI builder [cron-linux-clang-rel-x64](https://ci.chromium.org/p/dawn/builders/ci/cron-linux-clang-rel-x64) which runs on a periodic schedule. This bot runs the `dawn_end2end_tests` and `dawn_unittests` using the wire and writes out traces of the commands. This can manually be done by running: `<test_binary> --use-wire --wire-trace-dir=tmp_dir`. The output directory will contain one trace for each test, where the traces are prepended with `0xFFFFFFFFFFFFFFFF`. The header is the callsite index at which the error injector should inject an error. If the fuzzer doesn't support error injection it will skip the header. [cron-linux-clang-rel-x64] then hashes the output files to produce unique names and uploads them to the fuzzer corpus directories.
|
||||
Please see the `dawn.py`[https://source.chromium.org/chromium/chromium/tools/build/+/master:recipes/recipes/dawn.py] recipe for specific details.
|
||||
Please see the `dawn.py`[https://source.chromium.org/chromium/chromium/tools/build/+/main:recipes/recipes/dawn.py] recipe for specific details.
|
||||
|
||||
Regenerating the seed corpus keeps it up to date when Dawn's API or wire protocol changes.
|
||||
Regenerating the seed corpus keeps it up to date when Dawn's API or wire protocol changes.
|
||||
|
|
|
@ -6,7 +6,7 @@ Dawn uses Chromium's continuous integration (CI) infrastructure to continually r
|
|||
- [Dawn Try Builders](https://ci.chromium.org/p/dawn/g/try/builders)
|
||||
- [chromium.dawn Waterfall](https://ci.chromium.org/p/chromium/g/chromium.dawn/console)
|
||||
|
||||
For additional information on GPU testing in Chromium, please see [[chromium/src]//docs/gpu/gpu_testing_bot_details.md](https://chromium.googlesource.com/chromium/src.git/+/master/docs/gpu/gpu_testing_bot_details.md).
|
||||
For additional information on GPU testing in Chromium, please see [[chromium/src]//docs/gpu/gpu_testing_bot_details.md](https://chromium.googlesource.com/chromium/src.git/+/main/docs/gpu/gpu_testing_bot_details.md).
|
||||
|
||||
## Dawn CI/Try Builders
|
||||
Dawn builders are specified in [[dawn]//infra/config/global/cr-buildbucket.cfg](../infra/config/global/cr-buildbucket.cfg). This file contains a few mixins such as `clang`, `no_clang`, `x64`, `x86`, `debug`, `release` which are used to specify the bot dimensions and build properties (builder_mixins.recipe.properties). At the time of writing, we have the following builders:
|
||||
|
|
|
@ -30,7 +30,7 @@ A Chromium checkout is required for the highest optimization flags. It is possib
|
|||
- `recording_time`: The time to convert Dawn commands to native commands.
|
||||
|
||||
Metrics are reported according to the format specified at
|
||||
[[chromium]//build/scripts/slave/performance_log_processor.py](https://cs.chromium.org/chromium/build/scripts/slave/performance_log_processor.py)
|
||||
[[chromium]//build/recipes/performance_log_processor.py](https://cs.chromium.org/chromium/build/recipes/performance_log_processor.py)
|
||||
|
||||
### Dumping Trace Files
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
//* This generator is used to produce part of Emscripten's struct_info.json,
|
||||
//* which is a list of struct fields that it uses to generate field offset
|
||||
//* information for its own code generators.
|
||||
//* https://github.com/emscripten-core/emscripten/blob/master/src/struct_info.json
|
||||
//* https://github.com/emscripten-core/emscripten/blob/main/src/struct_info.json
|
||||
//*
|
||||
{
|
||||
{% set api = metadata.api.lower() %}
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
//*
|
||||
//* This generator is used to produce the number-to-string mappings for
|
||||
//* Emscripten's library_webgpu.js.
|
||||
//* https://github.com/emscripten-core/emscripten/blob/master/src/library_webgpu.js
|
||||
//* https://github.com/emscripten-core/emscripten/blob/main/src/library_webgpu.js
|
||||
//*
|
||||
{% for type in by_category["enum"] if not type.json_data.get("emscripten_no_enum_table") %}
|
||||
{{type.name.CamelCase()}}: {% if type.contiguousFromZero -%}
|
||||
|
|
|
@ -25,12 +25,11 @@ import zipfile
|
|||
|
||||
def CheckedJoin(output, path):
|
||||
"""
|
||||
CheckedJoin returns os.path.join(output, path). It does sanity checks to
|
||||
ensure the resulting path is under output, but shouldn't be used on untrusted
|
||||
input.
|
||||
"""
|
||||
CheckedJoin returns os.path.join(output, path). It checks that the resulting
|
||||
path is under output, but shouldn't be used on untrusted input.
|
||||
"""
|
||||
path = os.path.normpath(path)
|
||||
if os.path.isabs(path) or path.startswith('.'):
|
||||
if os.path.isabs(path) or path.startswith("."):
|
||||
raise ValueError(path)
|
||||
return os.path.join(output, path)
|
||||
|
||||
|
@ -51,22 +50,22 @@ class SymlinkEntry(object):
|
|||
|
||||
def IterateZip(path):
|
||||
"""
|
||||
IterateZip opens the zip file at path and returns a generator of entry objects
|
||||
for each file in it.
|
||||
"""
|
||||
with zipfile.ZipFile(path, 'r') as zip_file:
|
||||
IterateZip opens the zip file at path and returns a generator of entry objects
|
||||
for each file in it.
|
||||
"""
|
||||
with zipfile.ZipFile(path, "r") as zip_file:
|
||||
for info in zip_file.infolist():
|
||||
if info.filename.endswith('/'):
|
||||
if info.filename.endswith("/"):
|
||||
continue
|
||||
yield FileEntry(info.filename, None, zip_file.open(info))
|
||||
|
||||
|
||||
def IterateTar(path, compression):
|
||||
"""
|
||||
IterateTar opens the tar.gz or tar.bz2 file at path and returns a generator of
|
||||
entry objects for each file in it.
|
||||
"""
|
||||
with tarfile.open(path, 'r:' + compression) as tar_file:
|
||||
IterateTar opens the tar.gz or tar.bz2 file at path and returns a generator of
|
||||
entry objects for each file in it.
|
||||
"""
|
||||
with tarfile.open(path, "r:" + compression) as tar_file:
|
||||
for info in tar_file:
|
||||
if info.isdir():
|
||||
pass
|
||||
|
@ -80,11 +79,13 @@ def IterateTar(path, compression):
|
|||
|
||||
|
||||
def main(args):
|
||||
parser = optparse.OptionParser(usage='Usage: %prog ARCHIVE OUTPUT')
|
||||
parser.add_option('--no-prefix',
|
||||
dest='no_prefix',
|
||||
action='store_true',
|
||||
help='Do not remove a prefix from paths in the archive.')
|
||||
parser = optparse.OptionParser(usage="Usage: %prog ARCHIVE OUTPUT")
|
||||
parser.add_option(
|
||||
"--no-prefix",
|
||||
dest="no_prefix",
|
||||
action="store_true",
|
||||
help="Do not remove a prefix from paths in the archive.",
|
||||
)
|
||||
options, args = parser.parse_args(args)
|
||||
|
||||
if len(args) != 2:
|
||||
|
@ -97,7 +98,7 @@ def main(args):
|
|||
# Skip archives that weren't downloaded.
|
||||
return 0
|
||||
|
||||
with open(archive, 'rb') as f:
|
||||
with open(archive, "rb") as f:
|
||||
sha256 = hashlib.sha256()
|
||||
while True:
|
||||
chunk = f.read(1024 * 1024)
|
||||
|
@ -113,12 +114,12 @@ def main(args):
|
|||
print("Already up-to-date.")
|
||||
return 0
|
||||
|
||||
if archive.endswith('.zip'):
|
||||
if archive.endswith(".zip"):
|
||||
entries = IterateZip(archive)
|
||||
elif archive.endswith('.tar.gz'):
|
||||
entries = IterateTar(archive, 'gz')
|
||||
elif archive.endswith('.tar.bz2'):
|
||||
entries = IterateTar(archive, 'bz2')
|
||||
elif archive.endswith(".tar.gz"):
|
||||
entries = IterateTar(archive, "gz")
|
||||
elif archive.endswith(".tar.bz2"):
|
||||
entries = IterateTar(archive, "bz2")
|
||||
else:
|
||||
raise ValueError(archive)
|
||||
|
||||
|
@ -132,11 +133,11 @@ def main(args):
|
|||
num_extracted = 0
|
||||
for entry in entries:
|
||||
# Even on Windows, zip files must always use forward slashes.
|
||||
if '\\' in entry.path or entry.path.startswith('/'):
|
||||
if "\\" in entry.path or entry.path.startswith("/"):
|
||||
raise ValueError(entry.path)
|
||||
|
||||
if not options.no_prefix:
|
||||
new_prefix, rest = entry.path.split('/', 1)
|
||||
new_prefix, rest = entry.path.split("/", 1)
|
||||
|
||||
# Ensure the archive is consistent.
|
||||
if prefix is None:
|
||||
|
@ -151,12 +152,12 @@ def main(args):
|
|||
if not os.path.isdir(os.path.dirname(fixed_path)):
|
||||
os.makedirs(os.path.dirname(fixed_path))
|
||||
if isinstance(entry, FileEntry):
|
||||
with open(fixed_path, 'wb') as out:
|
||||
with open(fixed_path, "wb") as out:
|
||||
shutil.copyfileobj(entry.fileobj, out)
|
||||
elif isinstance(entry, SymlinkEntry):
|
||||
os.symlink(entry.target, fixed_path)
|
||||
else:
|
||||
raise TypeError('unknown entry type')
|
||||
raise TypeError("unknown entry type")
|
||||
|
||||
# Fix up permissions if needbe.
|
||||
# TODO(davidben): To be extra tidy, this should only track the execute bit
|
||||
|
@ -171,12 +172,12 @@ def main(args):
|
|||
finally:
|
||||
entries.close()
|
||||
|
||||
with open(stamp_path, 'w') as f:
|
||||
with open(stamp_path, "w") as f:
|
||||
f.write(digest)
|
||||
|
||||
print("Done. Extracted %d files." % (num_extracted, ))
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
|
|
|
@ -63,13 +63,13 @@ DawnJSONGenerator(
|
|||
|
||||
# Headers only INTERFACE library with generated headers don't work in CMake
|
||||
# because the GENERATED property is local to a directory. Instead we make a
|
||||
# STATIC library with a Dummy cpp file.
|
||||
# STATIC library with a placeholder cpp file.
|
||||
#
|
||||
# INTERFACE libraries can only have INTERFACE sources so the sources get added
|
||||
# to the dependant's list of sources. If these dependents are in another
|
||||
# directory, they don't see the GENERATED property and fail to configure
|
||||
# because the file doesn't exist on disk.
|
||||
add_library(dawn_headers STATIC ${DAWN_DUMMY_FILE})
|
||||
add_library(dawn_headers STATIC ${DAWN_PLACEHOLDER_FILE})
|
||||
common_compile_options(dawn_headers)
|
||||
target_sources(dawn_headers PRIVATE
|
||||
"${DAWN_INCLUDE_DIR}/dawn/dawn_wsi.h"
|
||||
|
@ -89,7 +89,7 @@ DawnJSONGenerator(
|
|||
|
||||
# This headers only library needs to be a STATIC library, see comment for
|
||||
# dawn_headers above.
|
||||
add_library(dawncpp_headers STATIC ${DAWN_DUMMY_FILE})
|
||||
add_library(dawncpp_headers STATIC ${DAWN_PLACEHOLDER_FILE})
|
||||
common_compile_options(dawncpp_headers)
|
||||
target_sources(dawncpp_headers PRIVATE
|
||||
"${DAWN_INCLUDE_DIR}/dawn/EnumClassBitmasks.h"
|
||||
|
@ -107,7 +107,7 @@ DawnJSONGenerator(
|
|||
RESULT_VARIABLE "DAWNCPP_GEN_SOURCES"
|
||||
)
|
||||
|
||||
add_library(dawncpp STATIC ${DAWN_DUMMY_FILE})
|
||||
add_library(dawncpp STATIC ${DAWN_PLACEHOLDER_FILE})
|
||||
common_compile_options(dawncpp)
|
||||
target_sources(dawncpp PRIVATE ${DAWNCPP_GEN_SOURCES})
|
||||
target_link_libraries(dawncpp PUBLIC dawncpp_headers)
|
||||
|
@ -122,7 +122,7 @@ DawnJSONGenerator(
|
|||
RESULT_VARIABLE "DAWNPROC_GEN_SOURCES"
|
||||
)
|
||||
|
||||
add_library(dawn_proc ${DAWN_DUMMY_FILE})
|
||||
add_library(dawn_proc ${DAWN_PLACEHOLDER_FILE})
|
||||
common_compile_options(dawn_proc)
|
||||
target_compile_definitions(dawn_proc PRIVATE "WGPU_IMPLEMENTATION")
|
||||
if(BUILD_SHARED_LIBS)
|
||||
|
|
|
@ -20,7 +20,7 @@ DawnGenerator(
|
|||
RESULT_VARIABLE "DAWN_VERSION_AUTOGEN_SOURCES"
|
||||
)
|
||||
|
||||
add_library(dawn_common STATIC ${DAWN_DUMMY_FILE})
|
||||
add_library(dawn_common STATIC ${DAWN_PLACEHOLDER_FILE})
|
||||
common_compile_options(dawn_common)
|
||||
target_sources(dawn_common PRIVATE
|
||||
${DAWN_VERSION_AUTOGEN_SOURCES}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
// DAWN_PP_GET_HEAD: get the first element of a __VA_ARGS__ without triggering empty
|
||||
// __VA_ARGS__ warnings.
|
||||
#define DAWN_INTERNAL_PP_GET_HEAD(firstParam, ...) firstParam
|
||||
#define DAWN_PP_GET_HEAD(...) DAWN_INTERNAL_PP_GET_HEAD(__VA_ARGS__, dummyArg)
|
||||
#define DAWN_PP_GET_HEAD(...) DAWN_INTERNAL_PP_GET_HEAD(__VA_ARGS__, placeholderArg)
|
||||
|
||||
// DAWN_PP_CONCATENATE: Concatenate tokens, first expanding the arguments passed in.
|
||||
#define DAWN_PP_CONCATENATE(arg1, arg2) DAWN_PP_CONCATENATE_1(arg1, arg2)
|
||||
|
|
|
@ -53,7 +53,7 @@ T NativeNonDispatachableHandleFromU64(uint64_t u64) {
|
|||
# error "Unsupported platform"
|
||||
#endif
|
||||
|
||||
// Define a dummy Vulkan handle for use before we include vulkan.h
|
||||
// Define a placeholder Vulkan handle for use before we include vulkan.h
|
||||
DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(VkSomeHandle)
|
||||
|
||||
// Find out the alignment of native handles. Logically we would use alignof(VkSomeHandleNative) so
|
||||
|
|
|
@ -18,7 +18,7 @@ DawnJSONGenerator(
|
|||
RESULT_VARIABLE "DAWN_NATIVE_UTILS_GEN_SOURCES"
|
||||
)
|
||||
|
||||
add_library(dawn_native ${DAWN_DUMMY_FILE})
|
||||
add_library(dawn_native ${DAWN_PLACEHOLDER_FILE})
|
||||
common_compile_options(dawn_native)
|
||||
|
||||
target_compile_definitions(dawn_native PRIVATE "DAWN_NATIVE_IMPLEMENTATION")
|
||||
|
@ -555,7 +555,7 @@ DawnJSONGenerator(
|
|||
RESULT_VARIABLE "WEBGPU_DAWN_NATIVE_PROC_GEN"
|
||||
)
|
||||
|
||||
add_library(webgpu_dawn ${DAWN_DUMMY_FILE})
|
||||
add_library(webgpu_dawn ${DAWN_PLACEHOLDER_FILE})
|
||||
common_compile_options(webgpu_dawn)
|
||||
target_link_libraries(webgpu_dawn PRIVATE dawn_native)
|
||||
target_compile_definitions(webgpu_dawn PRIVATE "WGPU_IMPLEMENTATION")
|
||||
|
|
|
@ -167,7 +167,7 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
bool CommandAllocator::IsEmpty() const {
|
||||
return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mDummyEnum[0]);
|
||||
return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mPlaceholderEnum[0]);
|
||||
}
|
||||
|
||||
CommandBlocks&& CommandAllocator::AcquireBlocks() {
|
||||
|
@ -221,8 +221,8 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
void CommandAllocator::ResetPointers() {
|
||||
mCurrentPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[0]);
|
||||
mEndPtr = reinterpret_cast<uint8_t*>(&mDummyEnum[1]);
|
||||
mCurrentPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[0]);
|
||||
mEndPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[1]);
|
||||
}
|
||||
|
||||
} // namespace dawn::native
|
||||
|
|
|
@ -259,7 +259,7 @@ namespace dawn::native {
|
|||
// Data used for the block range at initialization so that the first call to Allocate sees
|
||||
// there is not enough space and calls GetNewBlock. This avoids having to special case the
|
||||
// initialization in Allocate.
|
||||
uint32_t mDummyEnum[1] = {0};
|
||||
uint32_t mPlaceholderEnum[1] = {0};
|
||||
|
||||
// Pointers to the current range of allocation in the block. Guaranteed to allow for at
|
||||
// least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
|
||||
|
|
|
@ -160,9 +160,9 @@ namespace dawn::native {
|
|||
if (descriptor.layout == nullptr) {
|
||||
// Ref will keep the pipeline layout alive until the end of the function where
|
||||
// the pipeline will take another reference.
|
||||
DAWN_TRY_ASSIGN(layoutRef,
|
||||
PipelineLayoutBase::CreateDefault(
|
||||
device, GetRenderStagesAndSetDummyShader(device, &descriptor)));
|
||||
DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
|
||||
device, GetRenderStagesAndSetPlaceholderShader(
|
||||
device, &descriptor)));
|
||||
outDescriptor->layout = layoutRef.Get();
|
||||
}
|
||||
|
||||
|
@ -267,8 +267,8 @@ namespace dawn::native {
|
|||
|
||||
DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout());
|
||||
|
||||
// If dummy fragment shader module is needed, initialize it
|
||||
if (IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
|
||||
// If placeholder fragment shader module is needed, initialize it
|
||||
if (IsToggleEnabled(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline)) {
|
||||
// The empty fragment shader, used as a work around for vertex-only render pipeline
|
||||
constexpr char kEmptyFragmentShader[] = R"(
|
||||
@stage(fragment) fn fs_empty_main() {}
|
||||
|
@ -278,7 +278,7 @@ namespace dawn::native {
|
|||
wgslDesc.source = kEmptyFragmentShader;
|
||||
descriptor.nextInChain = &wgslDesc;
|
||||
|
||||
DAWN_TRY_ASSIGN(mInternalPipelineStore->dummyFragmentShader,
|
||||
DAWN_TRY_ASSIGN(mInternalPipelineStore->placeholderFragmentShader,
|
||||
CreateShaderModule(&descriptor));
|
||||
}
|
||||
|
||||
|
@ -414,7 +414,7 @@ namespace dawn::native {
|
|||
mPersistentCache = nullptr;
|
||||
mEmptyBindGroupLayout = nullptr;
|
||||
mInternalPipelineStore = nullptr;
|
||||
mExternalTextureDummyView = nullptr;
|
||||
mExternalTexturePlaceholderView = nullptr;
|
||||
|
||||
AssumeCommandsComplete();
|
||||
|
||||
|
@ -811,17 +811,17 @@ namespace dawn::native {
|
|||
}
|
||||
|
||||
ResultOrError<Ref<TextureViewBase>>
|
||||
DeviceBase::GetOrCreateDummyTextureViewForExternalTexture() {
|
||||
if (!mExternalTextureDummyView.Get()) {
|
||||
Ref<TextureBase> externalTextureDummy;
|
||||
DeviceBase::GetOrCreatePlaceholderTextureViewForExternalTexture() {
|
||||
if (!mExternalTexturePlaceholderView.Get()) {
|
||||
Ref<TextureBase> externalTexturePlaceholder;
|
||||
TextureDescriptor textureDesc;
|
||||
textureDesc.dimension = wgpu::TextureDimension::e2D;
|
||||
textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
textureDesc.label = "Dawn_External_Texture_Dummy_Texture";
|
||||
textureDesc.label = "Dawn_External_Texture_Placeholder_Texture";
|
||||
textureDesc.size = {1, 1, 1};
|
||||
textureDesc.usage = wgpu::TextureUsage::TextureBinding;
|
||||
|
||||
DAWN_TRY_ASSIGN(externalTextureDummy, CreateTexture(&textureDesc));
|
||||
DAWN_TRY_ASSIGN(externalTexturePlaceholder, CreateTexture(&textureDesc));
|
||||
|
||||
TextureViewDescriptor textureViewDesc;
|
||||
textureViewDesc.arrayLayerCount = 1;
|
||||
|
@ -829,14 +829,14 @@ namespace dawn::native {
|
|||
textureViewDesc.baseArrayLayer = 0;
|
||||
textureViewDesc.dimension = wgpu::TextureViewDimension::e2D;
|
||||
textureViewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
|
||||
textureViewDesc.label = "Dawn_External_Texture_Dummy_Texture_View";
|
||||
textureViewDesc.label = "Dawn_External_Texture_Placeholder_Texture_View";
|
||||
textureViewDesc.mipLevelCount = 1;
|
||||
|
||||
DAWN_TRY_ASSIGN(mExternalTextureDummyView,
|
||||
CreateTextureView(externalTextureDummy.Get(), &textureViewDesc));
|
||||
DAWN_TRY_ASSIGN(mExternalTexturePlaceholderView,
|
||||
CreateTextureView(externalTexturePlaceholder.Get(), &textureViewDesc));
|
||||
}
|
||||
|
||||
return mExternalTextureDummyView;
|
||||
return mExternalTexturePlaceholderView;
|
||||
}
|
||||
|
||||
ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::GetOrCreatePipelineLayout(
|
||||
|
|
|
@ -172,7 +172,7 @@ namespace dawn::native {
|
|||
|
||||
void UncacheComputePipeline(ComputePipelineBase* obj);
|
||||
|
||||
ResultOrError<Ref<TextureViewBase>> GetOrCreateDummyTextureViewForExternalTexture();
|
||||
ResultOrError<Ref<TextureViewBase>> GetOrCreatePlaceholderTextureViewForExternalTexture();
|
||||
|
||||
ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout(
|
||||
const PipelineLayoutDescriptor* descriptor);
|
||||
|
@ -512,7 +512,7 @@ namespace dawn::native {
|
|||
|
||||
Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
|
||||
|
||||
Ref<TextureViewBase> mExternalTextureDummyView;
|
||||
Ref<TextureViewBase> mExternalTexturePlaceholderView;
|
||||
|
||||
std::unique_ptr<DynamicUploader> mDynamicUploader;
|
||||
std::unique_ptr<AsyncTaskManager> mAsyncTaskManager;
|
||||
|
|
|
@ -130,7 +130,7 @@ namespace dawn::native {
|
|||
mTextureViews[1] = descriptor->plane1;
|
||||
} else {
|
||||
DAWN_TRY_ASSIGN(mTextureViews[1],
|
||||
device->GetOrCreateDummyTextureViewForExternalTexture());
|
||||
device->GetOrCreatePlaceholderTextureViewForExternalTexture());
|
||||
}
|
||||
|
||||
// We must create a buffer to store parameters needed by a shader that operates on this
|
||||
|
|
|
@ -66,7 +66,7 @@ namespace dawn::native {
|
|||
ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
|
||||
MaybeError Initialize(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
|
||||
|
||||
Ref<TextureBase> mDummyTexture;
|
||||
Ref<TextureBase> mPlaceholderTexture;
|
||||
Ref<BufferBase> mParamsBuffer;
|
||||
std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> mTextureViews;
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ namespace dawn::native {
|
|||
Ref<ComputePipelineBase> timestampComputePipeline;
|
||||
Ref<ShaderModuleBase> timestampCS;
|
||||
|
||||
Ref<ShaderModuleBase> dummyFragmentShader;
|
||||
Ref<ShaderModuleBase> placeholderFragmentShader;
|
||||
|
||||
// A scratch buffer suitable for use as a copy destination and storage binding.
|
||||
ScratchBuffer scratchStorage;
|
||||
|
|
|
@ -325,8 +325,7 @@ namespace dawn::native {
|
|||
DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc));
|
||||
ASSERT(!result->IsError());
|
||||
|
||||
// Sanity check in debug that the pipeline layout is compatible with the current
|
||||
// pipeline.
|
||||
// Check in debug that the pipeline layout is compatible with the current pipeline.
|
||||
for (const StageAndDescriptor& stage : stages) {
|
||||
const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
|
||||
ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get())
|
||||
|
|
|
@ -473,7 +473,7 @@ namespace dawn::native {
|
|||
return {};
|
||||
}
|
||||
|
||||
std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader(
|
||||
std::vector<StageAndDescriptor> GetRenderStagesAndSetPlaceholderShader(
|
||||
DeviceBase* device,
|
||||
const RenderPipelineDescriptor* descriptor) {
|
||||
std::vector<StageAndDescriptor> stages;
|
||||
|
@ -484,13 +484,13 @@ namespace dawn::native {
|
|||
stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module,
|
||||
descriptor->fragment->entryPoint, descriptor->fragment->constantCount,
|
||||
descriptor->fragment->constants});
|
||||
} else if (device->IsToggleEnabled(Toggle::UseDummyFragmentInVertexOnlyPipeline)) {
|
||||
} else if (device->IsToggleEnabled(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline)) {
|
||||
InternalPipelineStore* store = device->GetInternalPipelineStore();
|
||||
// The dummy fragment shader module should already be initialized
|
||||
DAWN_ASSERT(store->dummyFragmentShader != nullptr);
|
||||
ShaderModuleBase* dummyFragmentShader = store->dummyFragmentShader.Get();
|
||||
stages.push_back(
|
||||
{SingleShaderStage::Fragment, dummyFragmentShader, "fs_empty_main", 0, nullptr});
|
||||
// The placeholder fragment shader module should already be initialized
|
||||
DAWN_ASSERT(store->placeholderFragmentShader != nullptr);
|
||||
ShaderModuleBase* placeholderFragmentShader = store->placeholderFragmentShader.Get();
|
||||
stages.push_back({SingleShaderStage::Fragment, placeholderFragmentShader,
|
||||
"fs_empty_main", 0, nullptr});
|
||||
}
|
||||
return stages;
|
||||
}
|
||||
|
@ -513,7 +513,7 @@ namespace dawn::native {
|
|||
: PipelineBase(device,
|
||||
descriptor->layout,
|
||||
descriptor->label,
|
||||
GetRenderStagesAndSetDummyShader(device, descriptor)),
|
||||
GetRenderStagesAndSetPlaceholderShader(device, descriptor)),
|
||||
mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) {
|
||||
mVertexBufferCount = descriptor->vertex.bufferCount;
|
||||
const VertexBufferLayout* buffers = descriptor->vertex.buffers;
|
||||
|
|
|
@ -33,7 +33,7 @@ namespace dawn::native {
|
|||
MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
|
||||
const RenderPipelineDescriptor* descriptor);
|
||||
|
||||
std::vector<StageAndDescriptor> GetRenderStagesAndSetDummyShader(
|
||||
std::vector<StageAndDescriptor> GetRenderStagesAndSetPlaceholderShader(
|
||||
DeviceBase* device,
|
||||
const RenderPipelineDescriptor* descriptor);
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ namespace dawn::native {
|
|||
// The implementation of functions in this file can have a lot of control flow and corner cases
|
||||
// so each modification should come with extensive tests and ensure 100% code coverage of the
|
||||
// modified functions. See instructions at
|
||||
// https://chromium.googlesource.com/chromium/src/+/master/docs/testing/code_coverage.md#local-coverage-script
|
||||
// https://chromium.googlesource.com/chromium/src/+/main/docs/testing/code_coverage.md#local-coverage-script
|
||||
// to run the test with code coverage. A command line that worked in the past (with the right
|
||||
// GN args for the out/coverage directory in a Chromium checkout) is:
|
||||
//
|
||||
|
|
|
@ -232,11 +232,11 @@ namespace dawn::native {
|
|||
"Disables mipmaps for r8unorm and rg8unorm textures, which are known on some drivers "
|
||||
"to not clear correctly.",
|
||||
"https://crbug.com/dawn/1071"}},
|
||||
{Toggle::UseDummyFragmentInVertexOnlyPipeline,
|
||||
{"use_dummy_fragment_in_vertex_only_pipeline",
|
||||
"Use a dummy empty fragment shader in vertex only render pipeline. This toggle must "
|
||||
"be enabled for OpenGL ES backend, and serves as a workaround by default enabled on "
|
||||
"some Metal devices with Intel GPU to ensure the depth result is correct.",
|
||||
{Toggle::UsePlaceholderFragmentInVertexOnlyPipeline,
|
||||
{"use_placeholder_fragment_in_vertex_only_pipeline",
|
||||
"Use a placeholder empty fragment shader in vertex only render pipeline. This toggle "
|
||||
"must be enabled for OpenGL ES backend, and serves as a workaround by default "
|
||||
"enabled on some Metal devices with Intel GPU to ensure the depth result is correct.",
|
||||
"https://crbug.com/dawn/136"}},
|
||||
{Toggle::FxcOptimizations,
|
||||
{"fxc_optimizations",
|
||||
|
@ -260,7 +260,7 @@ namespace dawn::native {
|
|||
"VK_KHR_zero_initialize_workgroup_memory is supported.",
|
||||
"https://crbug.com/dawn/1302"}},
|
||||
|
||||
// Dummy comment to separate the }} so it is clearer what to copy-paste to add a toggle.
|
||||
// Comment to separate the }} so it is clearer what to copy-paste to add a toggle.
|
||||
}};
|
||||
} // anonymous namespace
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ namespace dawn::native {
|
|||
DisableSymbolRenaming,
|
||||
UseUserDefinedLabelsInBackend,
|
||||
DisableR8RG8Mipmaps,
|
||||
UseDummyFragmentInVertexOnlyPipeline,
|
||||
UsePlaceholderFragmentInVertexOnlyPipeline,
|
||||
FxcOptimizations,
|
||||
RecordDetailedTimingInTraceEvents,
|
||||
DisableTimestampQueryConversion,
|
||||
|
|
|
@ -217,13 +217,14 @@ namespace dawn::native::metal {
|
|||
}
|
||||
|
||||
// On some Intel GPU vertex only render pipeline get wrong depth result if no fragment
|
||||
// shader provided. Create a dummy fragment shader module to work around this issue.
|
||||
// shader provided. Create a placeholder fragment shader module to work around this issue.
|
||||
if (gpu_info::IsIntel(vendorId)) {
|
||||
bool useDummyFragmentShader = true;
|
||||
bool usePlaceholderFragmentShader = true;
|
||||
if (gpu_info::IsSkylake(deviceId)) {
|
||||
useDummyFragmentShader = false;
|
||||
usePlaceholderFragmentShader = false;
|
||||
}
|
||||
SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, useDummyFragmentShader);
|
||||
SetToggle(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline,
|
||||
usePlaceholderFragmentShader);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -99,8 +99,8 @@ namespace dawn::native::opengl {
|
|||
SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead);
|
||||
SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables);
|
||||
SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES());
|
||||
// For OpenGL ES, we must use dummy fragment shader for vertex-only render pipeline.
|
||||
SetToggle(Toggle::UseDummyFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
|
||||
// For OpenGL ES, we must use a placeholder fragment shader for vertex-only render pipeline.
|
||||
SetToggle(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
|
||||
}
|
||||
|
||||
const GLFormat& Device::GetGLFormat(const Format& format) {
|
||||
|
|
|
@ -87,26 +87,26 @@ namespace dawn::native::opengl {
|
|||
|
||||
// Create an OpenGL shader for each stage and gather the list of combined samplers.
|
||||
PerStage<CombinedSamplerInfo> combinedSamplers;
|
||||
bool needsDummySampler = false;
|
||||
bool needsPlaceholderSampler = false;
|
||||
std::vector<GLuint> glShaders;
|
||||
for (SingleShaderStage stage : IterateStages(activeStages)) {
|
||||
const ShaderModule* module = ToBackend(stages[stage].module.Get());
|
||||
std::string glsl;
|
||||
DAWN_TRY_ASSIGN(glsl, module->TranslateToGLSL(stages[stage].entryPoint.c_str(), stage,
|
||||
&combinedSamplers[stage], layout,
|
||||
&needsDummySampler));
|
||||
&needsPlaceholderSampler));
|
||||
GLuint shader;
|
||||
DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str()));
|
||||
gl.AttachShader(mProgram, shader);
|
||||
glShaders.push_back(shader);
|
||||
}
|
||||
|
||||
if (needsDummySampler) {
|
||||
if (needsPlaceholderSampler) {
|
||||
SamplerDescriptor desc = {};
|
||||
ASSERT(desc.minFilter == wgpu::FilterMode::Nearest);
|
||||
ASSERT(desc.magFilter == wgpu::FilterMode::Nearest);
|
||||
ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest);
|
||||
mDummySampler =
|
||||
mPlaceholderSampler =
|
||||
ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess());
|
||||
}
|
||||
|
||||
|
@ -164,8 +164,8 @@ namespace dawn::native::opengl {
|
|||
wgpu::TextureSampleType::Float;
|
||||
}
|
||||
{
|
||||
if (combined.useDummySampler) {
|
||||
mDummySamplerUnits.push_back(textureUnit);
|
||||
if (combined.usePlaceholderSampler) {
|
||||
mPlaceholderSamplerUnits.push_back(textureUnit);
|
||||
} else {
|
||||
const BindGroupLayoutBase* bgl =
|
||||
layout->GetBindGroupLayout(combined.samplerLocation.group);
|
||||
|
@ -209,9 +209,9 @@ namespace dawn::native::opengl {
|
|||
|
||||
void PipelineGL::ApplyNow(const OpenGLFunctions& gl) {
|
||||
gl.UseProgram(mProgram);
|
||||
for (GLuint unit : mDummySamplerUnits) {
|
||||
ASSERT(mDummySampler.Get() != nullptr);
|
||||
gl.BindSampler(unit, mDummySampler->GetNonFilteringHandle());
|
||||
for (GLuint unit : mPlaceholderSamplerUnits) {
|
||||
ASSERT(mPlaceholderSampler.Get() != nullptr);
|
||||
gl.BindSampler(unit, mPlaceholderSampler->GetNonFilteringHandle());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -58,10 +58,10 @@ namespace dawn::native::opengl {
|
|||
GLuint mProgram;
|
||||
std::vector<std::vector<SamplerUnit>> mUnitsForSamplers;
|
||||
std::vector<std::vector<GLuint>> mUnitsForTextures;
|
||||
std::vector<GLuint> mDummySamplerUnits;
|
||||
std::vector<GLuint> mPlaceholderSamplerUnits;
|
||||
// TODO(enga): This could live on the Device, or elsewhere, but currently it makes Device
|
||||
// destruction complex as it requires the sampler to be destroyed before the sampler cache.
|
||||
Ref<Sampler> mDummySampler;
|
||||
Ref<Sampler> mPlaceholderSampler;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
|
|
@ -39,15 +39,15 @@ namespace dawn::native::opengl {
|
|||
}
|
||||
|
||||
bool operator<(const CombinedSampler& a, const CombinedSampler& b) {
|
||||
return std::tie(a.useDummySampler, a.samplerLocation, a.textureLocation) <
|
||||
std::tie(b.useDummySampler, a.samplerLocation, b.textureLocation);
|
||||
return std::tie(a.usePlaceholderSampler, a.samplerLocation, a.textureLocation) <
|
||||
std::tie(b.usePlaceholderSampler, a.samplerLocation, b.textureLocation);
|
||||
}
|
||||
|
||||
std::string CombinedSampler::GetName() const {
|
||||
std::ostringstream o;
|
||||
o << "dawn_combined";
|
||||
if (useDummySampler) {
|
||||
o << "_dummy_sampler";
|
||||
if (usePlaceholderSampler) {
|
||||
o << "_placeholder_sampler";
|
||||
} else {
|
||||
o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_"
|
||||
<< static_cast<uint32_t>(samplerLocation.binding);
|
||||
|
@ -82,7 +82,7 @@ namespace dawn::native::opengl {
|
|||
SingleShaderStage stage,
|
||||
CombinedSamplerInfo* combinedSamplers,
|
||||
const PipelineLayout* layout,
|
||||
bool* needsDummySampler) const {
|
||||
bool* needsPlaceholderSampler) const {
|
||||
TRACE_EVENT0(GetDevice()->GetPlatform(), General, "TranslateToGLSL");
|
||||
tint::transform::Manager transformManager;
|
||||
tint::transform::DataMap transformInputs;
|
||||
|
@ -111,7 +111,7 @@ namespace dawn::native::opengl {
|
|||
// of the original texture and sampler, and generates a unique name. The
|
||||
// corresponding uniforms will be retrieved by these generated names
|
||||
// in PipelineGL. Any texture-only references will have
|
||||
// "useDummySampler" set to true, and only the texture binding point
|
||||
// "usePlaceholderSampler" set to true, and only the texture binding point
|
||||
// will be used in naming them. In addition, Dawn will bind a
|
||||
// non-filtering sampler for them (see PipelineGL).
|
||||
auto uses = inspector.GetSamplerTextureUses(entryPointName, placeholderBindingPoint);
|
||||
|
@ -120,10 +120,10 @@ namespace dawn::native::opengl {
|
|||
|
||||
CombinedSampler* info = &combinedSamplers->back();
|
||||
if (use.sampler_binding_point == placeholderBindingPoint) {
|
||||
info->useDummySampler = true;
|
||||
*needsDummySampler = true;
|
||||
info->usePlaceholderSampler = true;
|
||||
*needsPlaceholderSampler = true;
|
||||
} else {
|
||||
info->useDummySampler = false;
|
||||
info->usePlaceholderSampler = false;
|
||||
}
|
||||
info->samplerLocation.group = BindGroupIndex(use.sampler_binding_point.group);
|
||||
info->samplerLocation.binding = BindingNumber(use.sampler_binding_point.binding);
|
||||
|
@ -131,7 +131,7 @@ namespace dawn::native::opengl {
|
|||
info->textureLocation.binding = BindingNumber(use.texture_binding_point.binding);
|
||||
tintOptions.binding_map[use] = info->GetName();
|
||||
}
|
||||
if (*needsDummySampler) {
|
||||
if (*needsPlaceholderSampler) {
|
||||
tintOptions.placeholder_binding_point = placeholderBindingPoint;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,8 +36,9 @@ namespace dawn::native::opengl {
|
|||
BindingLocation samplerLocation;
|
||||
BindingLocation textureLocation;
|
||||
// OpenGL requires a sampler with texelFetch. If this is true, the developer did not provide
|
||||
// one and Dawn should bind a dummy non-filtering sampler. |samplerLocation| is unused.
|
||||
bool useDummySampler;
|
||||
// one and Dawn should bind a placeholder non-filtering sampler. |samplerLocation| is
|
||||
// unused.
|
||||
bool usePlaceholderSampler;
|
||||
std::string GetName() const;
|
||||
};
|
||||
bool operator<(const CombinedSampler& a, const CombinedSampler& b);
|
||||
|
@ -57,7 +58,7 @@ namespace dawn::native::opengl {
|
|||
SingleShaderStage stage,
|
||||
CombinedSamplerInfo* combinedSamplers,
|
||||
const PipelineLayout* layout,
|
||||
bool* needsDummySampler) const;
|
||||
bool* needsPlaceholderSampler) const;
|
||||
|
||||
private:
|
||||
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
|
||||
|
|
|
@ -72,7 +72,7 @@ namespace dawn::native::vulkan {
|
|||
|
||||
template <typename VK_STRUCT_TYPE>
|
||||
const VkBaseOutStructure* ToVkBaseOutStructure(const VK_STRUCT_TYPE* t) {
|
||||
// Sanity checks to ensure proper type safety.
|
||||
// Checks to ensure proper type safety.
|
||||
static_assert(
|
||||
offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
|
||||
offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
|
||||
|
|
|
@ -397,8 +397,8 @@ namespace dawn::native::vulkan {
|
|||
inputAssembly.topology = VulkanPrimitiveTopology(GetPrimitiveTopology());
|
||||
inputAssembly.primitiveRestartEnable = ShouldEnablePrimitiveRestart(GetPrimitiveTopology());
|
||||
|
||||
// A dummy viewport/scissor info. The validation layers force use to provide at least one
|
||||
// scissor and one viewport here, even if we choose to make them dynamic.
|
||||
// A placeholder viewport/scissor info. The validation layers force use to provide at least
|
||||
// one scissor and one viewport here, even if we choose to make them dynamic.
|
||||
VkViewport viewportDesc;
|
||||
viewportDesc.x = 0.0f;
|
||||
viewportDesc.y = 0.0f;
|
||||
|
@ -490,7 +490,7 @@ namespace dawn::native::vulkan {
|
|||
colorBlend.logicOp = VK_LOGIC_OP_CLEAR;
|
||||
colorBlend.attachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
|
||||
colorBlend.pAttachments = colorBlendAttachments.data();
|
||||
// The blend constant is always dynamic so we fill in a dummy value
|
||||
// The blend constant is always dynamic so we fill in a placeholder value
|
||||
colorBlend.blendConstants[0] = 0.0f;
|
||||
colorBlend.blendConstants[1] = 0.0f;
|
||||
colorBlend.blendConstants[2] = 0.0f;
|
||||
|
|
|
@ -68,7 +68,7 @@ namespace dawn::native::vulkan {
|
|||
// that is already initialized.
|
||||
template <typename VK_STRUCT_TYPE>
|
||||
void Add(VK_STRUCT_TYPE* vkStruct) {
|
||||
// Sanity checks to ensure proper type safety.
|
||||
// Checks to ensure proper type safety.
|
||||
static_assert(
|
||||
offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
|
||||
offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
|
||||
|
|
|
@ -101,7 +101,7 @@ Open or create the `.vscode/launch.json` file, and add:
|
|||
"outFiles": [ "./**/*.js" ],
|
||||
"args": [
|
||||
"-e", "require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/cmdline.ts');",
|
||||
"--", "dummy-arg",
|
||||
"--", "placeholder-arg",
|
||||
"--gpu-provider",
|
||||
"[path-to-dawn.node]", // REPLACE: [path-to-dawn.node]
|
||||
"[test-query]", // REPLACE: [test-query]
|
||||
|
@ -127,7 +127,7 @@ cd <cts-root-dir>
|
|||
[path-to-node] \ # for example <dawn-root-dir>/third_party/node/<arch>/node
|
||||
-e "require('./src/common/tools/setup-ts-in-node.js');require('./src/common/runtime/cmdline.ts');" \
|
||||
-- \
|
||||
dummy-arg \
|
||||
placeholder-arg \
|
||||
--gpu-provider [path to dawn.node] \
|
||||
[test-query]
|
||||
```
|
||||
|
|
|
@ -457,8 +457,8 @@ func (r *runner) gatherTestCases(query string, verbose bool) error {
|
|||
"--", // Start of arguments
|
||||
// src/common/runtime/helper/sys.ts expects 'node file.js <args>'
|
||||
// and slices away the first two arguments. When running with '-e', args
|
||||
// start at 1, so just inject a dummy argument.
|
||||
"dummy-arg",
|
||||
// start at 1, so just inject a placeholder argument.
|
||||
"placeholder-arg",
|
||||
"--list",
|
||||
}, query)
|
||||
|
||||
|
@ -603,8 +603,8 @@ func (r *runner) runServer(id int, caseIndices <-chan int, results chan<- result
|
|||
"--",
|
||||
// src/common/runtime/helper/sys.ts expects 'node file.js <args>'
|
||||
// and slices away the first two arguments. When running with '-e', args
|
||||
// start at 1, so just inject a dummy argument.
|
||||
"dummy-arg",
|
||||
// start at 1, so just inject a placeholder argument.
|
||||
"placeholder-arg",
|
||||
// Actual arguments begin here
|
||||
"--gpu-provider", r.dawnNode,
|
||||
}
|
||||
|
@ -940,8 +940,8 @@ func (r *runner) runTestcase(query string) result {
|
|||
"--",
|
||||
// src/common/runtime/helper/sys.ts expects 'node file.js <args>'
|
||||
// and slices away the first two arguments. When running with '-e', args
|
||||
// start at 1, so just inject a dummy argument.
|
||||
"dummy-arg",
|
||||
// start at 1, so just inject a placeholder argument.
|
||||
"placeholder-arg",
|
||||
// Actual arguments begin here
|
||||
"--gpu-provider", r.dawnNode,
|
||||
"--verbose",
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
add_library(dawn_platform ${DAWN_DUMMY_FILE})
|
||||
add_library(dawn_platform ${DAWN_PLACEHOLDER_FILE})
|
||||
common_compile_options(dawn_platform)
|
||||
|
||||
target_compile_definitions(dawn_platform PRIVATE "DAWN_PLATFORM_IMPLEMENTATION")
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
add_library(dawn_sample_utils STATIC ${DAWN_DUMMY_FILE})
|
||||
add_library(dawn_sample_utils STATIC ${DAWN_PLACEHOLDER_FILE})
|
||||
common_compile_options(dawn_sample_utils)
|
||||
target_sources(dawn_sample_utils PRIVATE
|
||||
"SampleUtils.cpp"
|
||||
|
|
|
@ -679,7 +679,7 @@ using DawnTest = DawnTestWithParams<>;
|
|||
// AdapterTestParam, and whose constructor looks like:
|
||||
// Param(AdapterTestParam, ABorC, 12or3, ..., otherParams... )
|
||||
// You must also teach GTest how to print this struct.
|
||||
// https://github.com/google/googletest/blob/master/docs/advanced.md#teaching-googletest-how-to-print-your-values
|
||||
// https://github.com/google/googletest/blob/main/docs/advanced.md#teaching-googletest-how-to-print-your-values
|
||||
// Macro DAWN_TEST_PARAM_STRUCT can help generate this struct.
|
||||
#define DAWN_INSTANTIATE_TEST_P(testName, ...) \
|
||||
INSTANTIATE_TEST_SUITE_P( \
|
||||
|
|
|
@ -434,9 +434,9 @@ TEST_P(MultipleWriteThenMultipleReadTests, SeparateBuffers) {
|
|||
vbContents.pos[1] = vec4<f32>(1.0, 1.0, 0.0, 1.0);
|
||||
vbContents.pos[2] = vec4<f32>(1.0, -1.0, 0.0, 1.0);
|
||||
vbContents.pos[3] = vec4<f32>(-1.0, -1.0, 0.0, 1.0);
|
||||
let dummy : i32 = 0;
|
||||
let placeholder : i32 = 0;
|
||||
ibContents.indices[0] = vec4<i32>(0, 1, 2, 0);
|
||||
ibContents.indices[1] = vec4<i32>(2, 3, dummy, dummy);
|
||||
ibContents.indices[1] = vec4<i32>(2, 3, placeholder, placeholder);
|
||||
uniformContents.color = 1.0;
|
||||
storageContents.color = 1.0;
|
||||
})");
|
||||
|
@ -549,9 +549,9 @@ TEST_P(MultipleWriteThenMultipleReadTests, OneBuffer) {
|
|||
contents.pos[1] = vec4<f32>(1.0, 1.0, 0.0, 1.0);
|
||||
contents.pos[2] = vec4<f32>(1.0, -1.0, 0.0, 1.0);
|
||||
contents.pos[3] = vec4<f32>(-1.0, -1.0, 0.0, 1.0);
|
||||
let dummy : i32 = 0;
|
||||
let placeholder : i32 = 0;
|
||||
contents.indices[0] = vec4<i32>(0, 1, 2, 0);
|
||||
contents.indices[1] = vec4<i32>(2, 3, dummy, dummy);
|
||||
contents.indices[1] = vec4<i32>(2, 3, placeholder, placeholder);
|
||||
contents.color0 = 1.0;
|
||||
contents.color1 = 1.0;
|
||||
})");
|
||||
|
|
|
@ -535,10 +535,11 @@ fn IsEqualTo(pixel : vec4<f32>, expected : vec4<f32>) -> bool {
|
|||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
||||
wgpu::Texture dummyOutputTexture = CreateTexture(
|
||||
wgpu::Texture placeholderOutputTexture = CreateTexture(
|
||||
kRenderAttachmentFormat,
|
||||
wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc, {1, 1});
|
||||
utils::ComboRenderPassDescriptor renderPassDescriptor({dummyOutputTexture.CreateView()});
|
||||
utils::ComboRenderPassDescriptor renderPassDescriptor(
|
||||
{placeholderOutputTexture.CreateView()});
|
||||
wgpu::RenderPassEncoder renderPassEncoder = encoder.BeginRenderPass(&renderPassDescriptor);
|
||||
renderPassEncoder.SetBindGroup(0, bindGroup);
|
||||
renderPassEncoder.SetPipeline(pipeline);
|
||||
|
|
|
@ -308,12 +308,12 @@ TEST_P(VertexOnlyRenderPipelineTest, MultiplePass) {
|
|||
|
||||
DAWN_INSTANTIATE_TEST(VertexOnlyRenderPipelineTest,
|
||||
D3D12Backend(),
|
||||
D3D12Backend({"use_dummy_fragment_in_vertex_only_pipeline"}),
|
||||
D3D12Backend({"use_placeholder_fragment_in_vertex_only_pipeline"}),
|
||||
MetalBackend(),
|
||||
MetalBackend({"use_dummy_fragment_in_vertex_only_pipeline"}),
|
||||
MetalBackend({"use_placeholder_fragment_in_vertex_only_pipeline"}),
|
||||
OpenGLBackend(),
|
||||
OpenGLBackend({"use_dummy_fragment_in_vertex_only_pipeline"}),
|
||||
OpenGLBackend({"use_placeholder_fragment_in_vertex_only_pipeline"}),
|
||||
OpenGLESBackend(),
|
||||
OpenGLESBackend({"use_dummy_fragment_in_vertex_only_pipeline"}),
|
||||
OpenGLESBackend({"use_placeholder_fragment_in_vertex_only_pipeline"}),
|
||||
VulkanBackend(),
|
||||
VulkanBackend({"use_dummy_fragment_in_vertex_only_pipeline"}));
|
||||
VulkanBackend({"use_placeholder_fragment_in_vertex_only_pipeline"}));
|
||||
|
|
|
@ -229,7 +229,7 @@ TEST_F(WindowSurfaceInstanceTests, InvalidMetalLayer) {
|
|||
wgpu::SurfaceDescriptorFromMetalLayer chainedDescriptor;
|
||||
// The CALayer is autoreleased. Releasing it causes a test failure when the Chromium GTest
|
||||
// autoreleasepool is emptied.
|
||||
chainedDescriptor.layer = utils::CreateDummyCALayer();
|
||||
chainedDescriptor.layer = utils::CreatePlaceholderCALayer();
|
||||
|
||||
wgpu::SurfaceDescriptor descriptor;
|
||||
descriptor.nextInChain = &chainedDescriptor;
|
||||
|
|
|
@ -131,7 +131,7 @@ void DawnPerfTestEnvironment::SetUp() {
|
|||
std::ofstream outFile;
|
||||
outFile.open(mTraceFile);
|
||||
outFile << "{ \"traceEvents\": [";
|
||||
outFile << "{}"; // Dummy object so trace events can always prepend a comma
|
||||
outFile << "{}"; // Placeholder object so trace events can always prepend a comma
|
||||
outFile.flush();
|
||||
outFile.close();
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
using namespace dawn::native;
|
||||
|
||||
class DummyResourceHeapAllocator : public ResourceHeapAllocator {
|
||||
class PlaceholderResourceHeapAllocator : public ResourceHeapAllocator {
|
||||
public:
|
||||
ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override {
|
||||
return std::make_unique<ResourceHeapBase>();
|
||||
|
@ -32,15 +32,15 @@ class DummyResourceHeapAllocator : public ResourceHeapAllocator {
|
|||
}
|
||||
};
|
||||
|
||||
class DummyBuddyResourceAllocator {
|
||||
class PlaceholderBuddyResourceAllocator {
|
||||
public:
|
||||
DummyBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize)
|
||||
PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize)
|
||||
: mAllocator(maxBlockSize, memorySize, &mHeapAllocator) {
|
||||
}
|
||||
|
||||
DummyBuddyResourceAllocator(uint64_t maxBlockSize,
|
||||
uint64_t memorySize,
|
||||
ResourceHeapAllocator* heapAllocator)
|
||||
PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize,
|
||||
uint64_t memorySize,
|
||||
ResourceHeapAllocator* heapAllocator)
|
||||
: mAllocator(maxBlockSize, memorySize, heapAllocator) {
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@ class DummyBuddyResourceAllocator {
|
|||
}
|
||||
|
||||
private:
|
||||
DummyResourceHeapAllocator mHeapAllocator;
|
||||
PlaceholderResourceHeapAllocator mHeapAllocator;
|
||||
BuddyMemoryAllocator mAllocator;
|
||||
};
|
||||
|
||||
|
@ -73,7 +73,7 @@ TEST(BuddyMemoryAllocatorTests, SingleHeap) {
|
|||
//
|
||||
constexpr uint64_t heapSize = 128;
|
||||
constexpr uint64_t maxBlockSize = heapSize;
|
||||
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
|
||||
// Cannot allocate greater than heap size.
|
||||
ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
|
||||
|
@ -106,7 +106,7 @@ TEST(BuddyMemoryAllocatorTests, MultipleHeaps) {
|
|||
//
|
||||
constexpr uint64_t maxBlockSize = 256;
|
||||
constexpr uint64_t heapSize = 128;
|
||||
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
|
||||
// Cannot allocate greater than heap size.
|
||||
ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
|
||||
|
@ -154,7 +154,7 @@ TEST(BuddyMemoryAllocatorTests, MultipleSplitHeaps) {
|
|||
//
|
||||
constexpr uint64_t maxBlockSize = 256;
|
||||
constexpr uint64_t heapSize = 128;
|
||||
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
|
||||
// Allocate two 64 byte sub-allocations.
|
||||
ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize / 2);
|
||||
|
@ -208,7 +208,7 @@ TEST(BuddyMemoryAllocatorTests, MultiplSplitHeapsVariableSizes) {
|
|||
//
|
||||
constexpr uint64_t heapSize = 128;
|
||||
constexpr uint64_t maxBlockSize = 512;
|
||||
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
|
||||
// Allocate two 64-byte allocations.
|
||||
ResourceMemoryAllocation allocation1 = allocator.Allocate(64);
|
||||
|
@ -284,7 +284,7 @@ TEST(BuddyMemoryAllocatorTests, SameSizeVariousAlignment) {
|
|||
//
|
||||
constexpr uint64_t heapSize = 128;
|
||||
constexpr uint64_t maxBlockSize = 512;
|
||||
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
|
||||
ResourceMemoryAllocation allocation1 = allocator.Allocate(64, 128);
|
||||
ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
|
||||
|
@ -334,7 +334,7 @@ TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) {
|
|||
//
|
||||
constexpr uint64_t heapSize = 128;
|
||||
constexpr uint64_t maxBlockSize = 512;
|
||||
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
|
||||
constexpr uint64_t alignment = 64;
|
||||
|
||||
|
@ -373,7 +373,7 @@ TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) {
|
|||
TEST(BuddyMemoryAllocatorTests, AllocationOverflow) {
|
||||
constexpr uint64_t heapSize = 128;
|
||||
constexpr uint64_t maxBlockSize = 512;
|
||||
DummyBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
|
||||
|
||||
constexpr uint64_t largeBlock = (1ull << 63) + 1;
|
||||
ResourceMemoryAllocation invalidAllocation = allocator.Allocate(largeBlock);
|
||||
|
@ -385,9 +385,9 @@ TEST(BuddyMemoryAllocatorTests, ReuseFreedHeaps) {
|
|||
constexpr uint64_t kHeapSize = 128;
|
||||
constexpr uint64_t kMaxBlockSize = 4096;
|
||||
|
||||
DummyResourceHeapAllocator heapAllocator;
|
||||
PlaceholderResourceHeapAllocator heapAllocator;
|
||||
PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
|
||||
DummyBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
|
||||
PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
|
||||
|
||||
std::set<ResourceHeapBase*> heaps = {};
|
||||
std::vector<ResourceMemoryAllocation> allocations = {};
|
||||
|
@ -426,9 +426,9 @@ TEST(BuddyMemoryAllocatorTests, DestroyHeaps) {
|
|||
constexpr uint64_t kHeapSize = 128;
|
||||
constexpr uint64_t kMaxBlockSize = 4096;
|
||||
|
||||
DummyResourceHeapAllocator heapAllocator;
|
||||
PlaceholderResourceHeapAllocator heapAllocator;
|
||||
PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
|
||||
DummyBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
|
||||
PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
|
||||
|
||||
std::set<ResourceHeapBase*> heaps = {};
|
||||
std::vector<ResourceMemoryAllocation> allocations = {};
|
||||
|
|
|
@ -368,7 +368,7 @@ TEST(CommandAllocator, EmptyIterator) {
|
|||
|
||||
template <size_t A>
|
||||
struct alignas(A) AlignedStruct {
|
||||
char dummy;
|
||||
char placeholder;
|
||||
};
|
||||
|
||||
// Test for overflows in Allocate's computations, size 1 variant
|
||||
|
|
|
@ -21,8 +21,8 @@ using namespace dawn::native;
|
|||
|
||||
namespace {
|
||||
|
||||
int dummySuccess = 0xbeef;
|
||||
const char* dummyErrorMessage = "I am an error message :3";
|
||||
int placeholderSuccess = 0xbeef;
|
||||
const char* placeholderErrorMessage = "I am an error message :3";
|
||||
|
||||
// Check returning a success MaybeError with {};
|
||||
TEST(ErrorTests, Error_Success) {
|
||||
|
@ -34,35 +34,37 @@ namespace {
|
|||
|
||||
// Check returning an error MaybeError with "return DAWN_VALIDATION_ERROR"
|
||||
TEST(ErrorTests, Error_Error) {
|
||||
auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); };
|
||||
auto ReturnError = []() -> MaybeError {
|
||||
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
|
||||
};
|
||||
|
||||
MaybeError result = ReturnError();
|
||||
ASSERT_TRUE(result.IsError());
|
||||
|
||||
std::unique_ptr<ErrorData> errorData = result.AcquireError();
|
||||
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
|
||||
ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
|
||||
}
|
||||
|
||||
// Check returning a success ResultOrError with an implicit conversion
|
||||
TEST(ErrorTests, ResultOrError_Success) {
|
||||
auto ReturnSuccess = []() -> ResultOrError<int*> { return &dummySuccess; };
|
||||
auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
|
||||
|
||||
ResultOrError<int*> result = ReturnSuccess();
|
||||
ASSERT_TRUE(result.IsSuccess());
|
||||
ASSERT_EQ(result.AcquireSuccess(), &dummySuccess);
|
||||
ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
|
||||
}
|
||||
|
||||
// Check returning an error ResultOrError with "return DAWN_VALIDATION_ERROR"
|
||||
TEST(ErrorTests, ResultOrError_Error) {
|
||||
auto ReturnError = []() -> ResultOrError<int*> {
|
||||
return DAWN_VALIDATION_ERROR(dummyErrorMessage);
|
||||
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
|
||||
};
|
||||
|
||||
ResultOrError<int*> result = ReturnError();
|
||||
ASSERT_TRUE(result.IsError());
|
||||
|
||||
std::unique_ptr<ErrorData> errorData = result.AcquireError();
|
||||
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
|
||||
ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
|
||||
}
|
||||
|
||||
// Check DAWN_TRY handles successes correctly.
|
||||
|
@ -85,7 +87,9 @@ namespace {
|
|||
|
||||
// Check DAWN_TRY handles errors correctly.
|
||||
TEST(ErrorTests, TRY_Error) {
|
||||
auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); };
|
||||
auto ReturnError = []() -> MaybeError {
|
||||
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
|
||||
};
|
||||
|
||||
auto Try = [ReturnError]() -> MaybeError {
|
||||
DAWN_TRY(ReturnError());
|
||||
|
@ -98,12 +102,14 @@ namespace {
|
|||
ASSERT_TRUE(result.IsError());
|
||||
|
||||
std::unique_ptr<ErrorData> errorData = result.AcquireError();
|
||||
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
|
||||
ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
|
||||
}
|
||||
|
||||
// Check DAWN_TRY adds to the backtrace.
|
||||
TEST(ErrorTests, TRY_AddsToBacktrace) {
|
||||
auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); };
|
||||
auto ReturnError = []() -> MaybeError {
|
||||
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
|
||||
};
|
||||
|
||||
auto SingleTry = [ReturnError]() -> MaybeError {
|
||||
DAWN_TRY(ReturnError());
|
||||
|
@ -129,7 +135,7 @@ namespace {
|
|||
|
||||
// Check DAWN_TRY_ASSIGN handles successes correctly.
|
||||
TEST(ErrorTests, TRY_RESULT_Success) {
|
||||
auto ReturnSuccess = []() -> ResultOrError<int*> { return &dummySuccess; };
|
||||
auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
|
||||
|
||||
// We need to check that DAWN_TRY doesn't return on successes
|
||||
bool tryReturned = true;
|
||||
|
@ -139,20 +145,20 @@ namespace {
|
|||
DAWN_TRY_ASSIGN(result, ReturnSuccess());
|
||||
tryReturned = false;
|
||||
|
||||
EXPECT_EQ(result, &dummySuccess);
|
||||
EXPECT_EQ(result, &placeholderSuccess);
|
||||
return result;
|
||||
};
|
||||
|
||||
ResultOrError<int*> result = Try();
|
||||
ASSERT_TRUE(result.IsSuccess());
|
||||
ASSERT_FALSE(tryReturned);
|
||||
ASSERT_EQ(result.AcquireSuccess(), &dummySuccess);
|
||||
ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
|
||||
}
|
||||
|
||||
// Check DAWN_TRY_ASSIGN handles errors correctly.
|
||||
TEST(ErrorTests, TRY_RESULT_Error) {
|
||||
auto ReturnError = []() -> ResultOrError<int*> {
|
||||
return DAWN_VALIDATION_ERROR(dummyErrorMessage);
|
||||
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
|
||||
};
|
||||
|
||||
auto Try = [ReturnError]() -> ResultOrError<int*> {
|
||||
|
@ -162,30 +168,30 @@ namespace {
|
|||
|
||||
// DAWN_TRY should return before this point
|
||||
EXPECT_FALSE(true);
|
||||
return &dummySuccess;
|
||||
return &placeholderSuccess;
|
||||
};
|
||||
|
||||
ResultOrError<int*> result = Try();
|
||||
ASSERT_TRUE(result.IsError());
|
||||
|
||||
std::unique_ptr<ErrorData> errorData = result.AcquireError();
|
||||
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
|
||||
ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
|
||||
}
|
||||
|
||||
// Check DAWN_TRY_ASSIGN adds to the backtrace.
|
||||
TEST(ErrorTests, TRY_RESULT_AddsToBacktrace) {
|
||||
auto ReturnError = []() -> ResultOrError<int*> {
|
||||
return DAWN_VALIDATION_ERROR(dummyErrorMessage);
|
||||
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
|
||||
};
|
||||
|
||||
auto SingleTry = [ReturnError]() -> ResultOrError<int*> {
|
||||
DAWN_TRY(ReturnError());
|
||||
return &dummySuccess;
|
||||
return &placeholderSuccess;
|
||||
};
|
||||
|
||||
auto DoubleTry = [SingleTry]() -> ResultOrError<int*> {
|
||||
DAWN_TRY(SingleTry());
|
||||
return &dummySuccess;
|
||||
return &placeholderSuccess;
|
||||
};
|
||||
|
||||
ResultOrError<int*> singleResult = SingleTry();
|
||||
|
@ -203,7 +209,7 @@ namespace {
|
|||
// Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
|
||||
TEST(ErrorTests, TRY_RESULT_ConversionToError) {
|
||||
auto ReturnError = []() -> ResultOrError<int*> {
|
||||
return DAWN_VALIDATION_ERROR(dummyErrorMessage);
|
||||
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
|
||||
};
|
||||
|
||||
auto Try = [ReturnError]() -> MaybeError {
|
||||
|
@ -218,14 +224,14 @@ namespace {
|
|||
ASSERT_TRUE(result.IsError());
|
||||
|
||||
std::unique_ptr<ErrorData> errorData = result.AcquireError();
|
||||
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
|
||||
ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
|
||||
}
|
||||
|
||||
// Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
|
||||
// Version without Result<E*, T*>
|
||||
TEST(ErrorTests, TRY_RESULT_ConversionToErrorNonPointer) {
|
||||
auto ReturnError = []() -> ResultOrError<int> {
|
||||
return DAWN_VALIDATION_ERROR(dummyErrorMessage);
|
||||
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
|
||||
};
|
||||
|
||||
auto Try = [ReturnError]() -> MaybeError {
|
||||
|
@ -240,12 +246,12 @@ namespace {
|
|||
ASSERT_TRUE(result.IsError());
|
||||
|
||||
std::unique_ptr<ErrorData> errorData = result.AcquireError();
|
||||
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
|
||||
ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
|
||||
}
|
||||
|
||||
// Check DAWN_TRY_ASSIGN handles successes correctly.
|
||||
TEST(ErrorTests, TRY_RESULT_CLEANUP_Success) {
|
||||
auto ReturnSuccess = []() -> ResultOrError<int*> { return &dummySuccess; };
|
||||
auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
|
||||
|
||||
// We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP doesn't return on successes and the
|
||||
// cleanup is not called.
|
||||
|
@ -257,7 +263,7 @@ namespace {
|
|||
DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnSuccess(), { tryCleanup = true; });
|
||||
tryReturned = false;
|
||||
|
||||
EXPECT_EQ(result, &dummySuccess);
|
||||
EXPECT_EQ(result, &placeholderSuccess);
|
||||
return result;
|
||||
};
|
||||
|
||||
|
@ -265,13 +271,13 @@ namespace {
|
|||
ASSERT_TRUE(result.IsSuccess());
|
||||
ASSERT_FALSE(tryReturned);
|
||||
ASSERT_FALSE(tryCleanup);
|
||||
ASSERT_EQ(result.AcquireSuccess(), &dummySuccess);
|
||||
ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
|
||||
}
|
||||
|
||||
// Check DAWN_TRY_ASSIGN handles cleanups.
|
||||
TEST(ErrorTests, TRY_RESULT_CLEANUP_Cleanup) {
|
||||
auto ReturnError = []() -> ResultOrError<int*> {
|
||||
return DAWN_VALIDATION_ERROR(dummyErrorMessage);
|
||||
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
|
||||
};
|
||||
|
||||
// We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP calls cleanup when error.
|
||||
|
@ -284,21 +290,21 @@ namespace {
|
|||
|
||||
// DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point
|
||||
EXPECT_FALSE(true);
|
||||
return &dummySuccess;
|
||||
return &placeholderSuccess;
|
||||
};
|
||||
|
||||
ResultOrError<int*> result = Try();
|
||||
ASSERT_TRUE(result.IsError());
|
||||
|
||||
std::unique_ptr<ErrorData> errorData = result.AcquireError();
|
||||
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
|
||||
ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
|
||||
ASSERT_TRUE(tryCleanup);
|
||||
}
|
||||
|
||||
// Check DAWN_TRY_ASSIGN can override return value when needed.
|
||||
TEST(ErrorTests, TRY_RESULT_CLEANUP_OverrideReturn) {
|
||||
auto ReturnError = []() -> ResultOrError<int*> {
|
||||
return DAWN_VALIDATION_ERROR(dummyErrorMessage);
|
||||
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
|
||||
};
|
||||
|
||||
auto Try = [ReturnError]() -> bool {
|
||||
|
@ -318,24 +324,28 @@ namespace {
|
|||
// Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
|
||||
// Check DAWN_TRY handles errors correctly.
|
||||
TEST(ErrorTests, TRY_ConversionToErrorOrResult) {
|
||||
auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); };
|
||||
auto ReturnError = []() -> MaybeError {
|
||||
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
|
||||
};
|
||||
|
||||
auto Try = [ReturnError]() -> ResultOrError<int*> {
|
||||
DAWN_TRY(ReturnError());
|
||||
return &dummySuccess;
|
||||
return &placeholderSuccess;
|
||||
};
|
||||
|
||||
ResultOrError<int*> result = Try();
|
||||
ASSERT_TRUE(result.IsError());
|
||||
|
||||
std::unique_ptr<ErrorData> errorData = result.AcquireError();
|
||||
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
|
||||
ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
|
||||
}
|
||||
|
||||
// Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
|
||||
// Check DAWN_TRY handles errors correctly. Version without Result<E*, T*>
|
||||
TEST(ErrorTests, TRY_ConversionToErrorOrResultNonPointer) {
|
||||
auto ReturnError = []() -> MaybeError { return DAWN_VALIDATION_ERROR(dummyErrorMessage); };
|
||||
auto ReturnError = []() -> MaybeError {
|
||||
return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
|
||||
};
|
||||
|
||||
auto Try = [ReturnError]() -> ResultOrError<int> {
|
||||
DAWN_TRY(ReturnError());
|
||||
|
@ -346,7 +356,7 @@ namespace {
|
|||
ASSERT_TRUE(result.IsError());
|
||||
|
||||
std::unique_ptr<ErrorData> errorData = result.AcquireError();
|
||||
ASSERT_EQ(errorData->GetMessage(), dummyErrorMessage);
|
||||
ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
|
|
@ -36,10 +36,10 @@ class PerThreadProcTests : public testing::Test {
|
|||
dawn::native::null::Adapter mNativeAdapter;
|
||||
};
|
||||
|
||||
// Test that procs can be set per thread. This test overrides deviceCreateBuffer with a dummy proc
|
||||
// for each thread that increments a counter. Because each thread has their own proc and counter,
|
||||
// there should be no data races. The per-thread procs also check that the current thread id is
|
||||
// exactly equal to the expected thread id.
|
||||
// Test that procs can be set per thread. This test overrides deviceCreateBuffer with a placeholder
|
||||
// proc for each thread that increments a counter. Because each thread has their own proc and
|
||||
// counter, there should be no data races. The per-thread procs also check that the current thread
|
||||
// id is exactly equal to the expected thread id.
|
||||
TEST_F(PerThreadProcTests, DispatchesPerThread) {
|
||||
dawnProcSetProcs(&dawnThreadDispatchProcTable);
|
||||
|
||||
|
|
|
@ -42,9 +42,9 @@ namespace {
|
|||
EXPECT_FALSE(result->IsSuccess());
|
||||
}
|
||||
|
||||
static int dummyError = 0xbeef;
|
||||
static float dummySuccess = 42.0f;
|
||||
static const float dummyConstSuccess = 42.0f;
|
||||
static int placeholderError = 0xbeef;
|
||||
static float placeholderSuccess = 42.0f;
|
||||
static const float placeholderConstSuccess = 42.0f;
|
||||
|
||||
class AClass : public RefCounted {
|
||||
public:
|
||||
|
@ -52,9 +52,9 @@ namespace {
|
|||
};
|
||||
|
||||
// Tests using the following overload of TestSuccess make
|
||||
// local Ref instances to dummySuccessObj. Tests should
|
||||
// local Ref instances to placeholderSuccessObj. Tests should
|
||||
// ensure any local Ref objects made along the way continue
|
||||
// to point to dummySuccessObj.
|
||||
// to point to placeholderSuccessObj.
|
||||
template <typename T, typename E>
|
||||
void TestSuccess(Result<Ref<T>, E>* result, T* expectedSuccess) {
|
||||
EXPECT_FALSE(result->IsError());
|
||||
|
@ -83,25 +83,25 @@ namespace {
|
|||
|
||||
// Test constructing an error Result<void, E>
|
||||
TEST(ResultOnlyPointerError, ConstructingError) {
|
||||
Result<void, int> result(std::make_unique<int>(dummyError));
|
||||
TestError(&result, dummyError);
|
||||
Result<void, int> result(std::make_unique<int>(placeholderError));
|
||||
TestError(&result, placeholderError);
|
||||
}
|
||||
|
||||
// Test moving an error Result<void, E>
|
||||
TEST(ResultOnlyPointerError, MovingError) {
|
||||
Result<void, int> result(std::make_unique<int>(dummyError));
|
||||
Result<void, int> result(std::make_unique<int>(placeholderError));
|
||||
Result<void, int> movedResult(std::move(result));
|
||||
TestError(&movedResult, dummyError);
|
||||
TestError(&movedResult, placeholderError);
|
||||
}
|
||||
|
||||
// Test returning an error Result<void, E>
|
||||
TEST(ResultOnlyPointerError, ReturningError) {
|
||||
auto CreateError = []() -> Result<void, int> {
|
||||
return {std::make_unique<int>(dummyError)};
|
||||
return {std::make_unique<int>(placeholderError)};
|
||||
};
|
||||
|
||||
Result<void, int> result = CreateError();
|
||||
TestError(&result, dummyError);
|
||||
TestError(&result, placeholderError);
|
||||
}
|
||||
|
||||
// Test constructing a success Result<void, E>
|
||||
|
@ -132,46 +132,46 @@ namespace {
|
|||
|
||||
// Test constructing an error Result<T*, E>
|
||||
TEST(ResultBothPointer, ConstructingError) {
|
||||
Result<float*, int> result(std::make_unique<int>(dummyError));
|
||||
TestError(&result, dummyError);
|
||||
Result<float*, int> result(std::make_unique<int>(placeholderError));
|
||||
TestError(&result, placeholderError);
|
||||
}
|
||||
|
||||
// Test moving an error Result<T*, E>
|
||||
TEST(ResultBothPointer, MovingError) {
|
||||
Result<float*, int> result(std::make_unique<int>(dummyError));
|
||||
Result<float*, int> result(std::make_unique<int>(placeholderError));
|
||||
Result<float*, int> movedResult(std::move(result));
|
||||
TestError(&movedResult, dummyError);
|
||||
TestError(&movedResult, placeholderError);
|
||||
}
|
||||
|
||||
// Test returning an error Result<T*, E>
|
||||
TEST(ResultBothPointer, ReturningError) {
|
||||
auto CreateError = []() -> Result<float*, int> {
|
||||
return {std::make_unique<int>(dummyError)};
|
||||
return {std::make_unique<int>(placeholderError)};
|
||||
};
|
||||
|
||||
Result<float*, int> result = CreateError();
|
||||
TestError(&result, dummyError);
|
||||
TestError(&result, placeholderError);
|
||||
}
|
||||
|
||||
// Test constructing a success Result<T*, E>
|
||||
TEST(ResultBothPointer, ConstructingSuccess) {
|
||||
Result<float*, int> result(&dummySuccess);
|
||||
TestSuccess(&result, &dummySuccess);
|
||||
Result<float*, int> result(&placeholderSuccess);
|
||||
TestSuccess(&result, &placeholderSuccess);
|
||||
}
|
||||
|
||||
// Test moving a success Result<T*, E>
|
||||
TEST(ResultBothPointer, MovingSuccess) {
|
||||
Result<float*, int> result(&dummySuccess);
|
||||
Result<float*, int> result(&placeholderSuccess);
|
||||
Result<float*, int> movedResult(std::move(result));
|
||||
TestSuccess(&movedResult, &dummySuccess);
|
||||
TestSuccess(&movedResult, &placeholderSuccess);
|
||||
}
|
||||
|
||||
// Test returning a success Result<T*, E>
|
||||
TEST(ResultBothPointer, ReturningSuccess) {
|
||||
auto CreateSuccess = []() -> Result<float*, int*> { return {&dummySuccess}; };
|
||||
auto CreateSuccess = []() -> Result<float*, int*> { return {&placeholderSuccess}; };
|
||||
|
||||
Result<float*, int*> result = CreateSuccess();
|
||||
TestSuccess(&result, &dummySuccess);
|
||||
TestSuccess(&result, &placeholderSuccess);
|
||||
}
|
||||
|
||||
// Tests converting from a Result<TChild*, E>
|
||||
|
@ -203,71 +203,73 @@ namespace {
|
|||
|
||||
// Test constructing an error Result<const T*, E>
|
||||
TEST(ResultBothPointerWithConstResult, ConstructingError) {
|
||||
Result<const float*, int> result(std::make_unique<int>(dummyError));
|
||||
TestError(&result, dummyError);
|
||||
Result<const float*, int> result(std::make_unique<int>(placeholderError));
|
||||
TestError(&result, placeholderError);
|
||||
}
|
||||
|
||||
// Test moving an error Result<const T*, E>
|
||||
TEST(ResultBothPointerWithConstResult, MovingError) {
|
||||
Result<const float*, int> result(std::make_unique<int>(dummyError));
|
||||
Result<const float*, int> result(std::make_unique<int>(placeholderError));
|
||||
Result<const float*, int> movedResult(std::move(result));
|
||||
TestError(&movedResult, dummyError);
|
||||
TestError(&movedResult, placeholderError);
|
||||
}
|
||||
|
||||
// Test returning an error Result<const T*, E*>
|
||||
TEST(ResultBothPointerWithConstResult, ReturningError) {
|
||||
auto CreateError = []() -> Result<const float*, int> {
|
||||
return {std::make_unique<int>(dummyError)};
|
||||
return {std::make_unique<int>(placeholderError)};
|
||||
};
|
||||
|
||||
Result<const float*, int> result = CreateError();
|
||||
TestError(&result, dummyError);
|
||||
TestError(&result, placeholderError);
|
||||
}
|
||||
|
||||
// Test constructing a success Result<const T*, E*>
|
||||
TEST(ResultBothPointerWithConstResult, ConstructingSuccess) {
|
||||
Result<const float*, int> result(&dummyConstSuccess);
|
||||
TestSuccess(&result, &dummyConstSuccess);
|
||||
Result<const float*, int> result(&placeholderConstSuccess);
|
||||
TestSuccess(&result, &placeholderConstSuccess);
|
||||
}
|
||||
|
||||
// Test moving a success Result<const T*, E*>
|
||||
TEST(ResultBothPointerWithConstResult, MovingSuccess) {
|
||||
Result<const float*, int> result(&dummyConstSuccess);
|
||||
Result<const float*, int> result(&placeholderConstSuccess);
|
||||
Result<const float*, int> movedResult(std::move(result));
|
||||
TestSuccess(&movedResult, &dummyConstSuccess);
|
||||
TestSuccess(&movedResult, &placeholderConstSuccess);
|
||||
}
|
||||
|
||||
// Test returning a success Result<const T*, E*>
|
||||
TEST(ResultBothPointerWithConstResult, ReturningSuccess) {
|
||||
auto CreateSuccess = []() -> Result<const float*, int> { return {&dummyConstSuccess}; };
|
||||
auto CreateSuccess = []() -> Result<const float*, int> {
|
||||
return {&placeholderConstSuccess};
|
||||
};
|
||||
|
||||
Result<const float*, int> result = CreateSuccess();
|
||||
TestSuccess(&result, &dummyConstSuccess);
|
||||
TestSuccess(&result, &placeholderConstSuccess);
|
||||
}
|
||||
|
||||
// Result<Ref<T>, E>
|
||||
|
||||
// Test constructing an error Result<Ref<T>, E>
|
||||
TEST(ResultRefT, ConstructingError) {
|
||||
Result<Ref<AClass>, int> result(std::make_unique<int>(dummyError));
|
||||
TestError(&result, dummyError);
|
||||
Result<Ref<AClass>, int> result(std::make_unique<int>(placeholderError));
|
||||
TestError(&result, placeholderError);
|
||||
}
|
||||
|
||||
// Test moving an error Result<Ref<T>, E>
|
||||
TEST(ResultRefT, MovingError) {
|
||||
Result<Ref<AClass>, int> result(std::make_unique<int>(dummyError));
|
||||
Result<Ref<AClass>, int> result(std::make_unique<int>(placeholderError));
|
||||
Result<Ref<AClass>, int> movedResult(std::move(result));
|
||||
TestError(&movedResult, dummyError);
|
||||
TestError(&movedResult, placeholderError);
|
||||
}
|
||||
|
||||
// Test returning an error Result<Ref<T>, E>
|
||||
TEST(ResultRefT, ReturningError) {
|
||||
auto CreateError = []() -> Result<Ref<AClass>, int> {
|
||||
return {std::make_unique<int>(dummyError)};
|
||||
return {std::make_unique<int>(placeholderError)};
|
||||
};
|
||||
|
||||
Result<Ref<AClass>, int> result = CreateError();
|
||||
TestError(&result, dummyError);
|
||||
TestError(&result, placeholderError);
|
||||
}
|
||||
|
||||
// Test constructing a success Result<Ref<T>, E>
|
||||
|
@ -340,25 +342,25 @@ namespace {
|
|||
|
||||
// Test constructing an error Result<T, E>
|
||||
TEST(ResultGeneric, ConstructingError) {
|
||||
Result<std::vector<float>, int> result(std::make_unique<int>(dummyError));
|
||||
TestError(&result, dummyError);
|
||||
Result<std::vector<float>, int> result(std::make_unique<int>(placeholderError));
|
||||
TestError(&result, placeholderError);
|
||||
}
|
||||
|
||||
// Test moving an error Result<T, E>
|
||||
TEST(ResultGeneric, MovingError) {
|
||||
Result<std::vector<float>, int> result(std::make_unique<int>(dummyError));
|
||||
Result<std::vector<float>, int> result(std::make_unique<int>(placeholderError));
|
||||
Result<std::vector<float>, int> movedResult(std::move(result));
|
||||
TestError(&movedResult, dummyError);
|
||||
TestError(&movedResult, placeholderError);
|
||||
}
|
||||
|
||||
// Test returning an error Result<T, E>
|
||||
TEST(ResultGeneric, ReturningError) {
|
||||
auto CreateError = []() -> Result<std::vector<float>, int> {
|
||||
return {std::make_unique<int>(dummyError)};
|
||||
return {std::make_unique<int>(placeholderError)};
|
||||
};
|
||||
|
||||
Result<std::vector<float>, int> result = CreateError();
|
||||
TestError(&result, dummyError);
|
||||
TestError(&result, placeholderError);
|
||||
}
|
||||
|
||||
// Test constructing a success Result<T, E>
|
||||
|
|
|
@ -14,14 +14,14 @@
|
|||
|
||||
namespace {
|
||||
|
||||
class Dummy : public RefCounted {
|
||||
class Placeholder : public RefCounted {
|
||||
public:
|
||||
explicit Dummy(int* alive) : mAlive(alive) {
|
||||
explicit Placeholder(int* alive) : mAlive(alive) {
|
||||
++*mAlive;
|
||||
}
|
||||
|
||||
private:
|
||||
~Dummy() {
|
||||
~Placeholder() {
|
||||
--*mAlive;
|
||||
}
|
||||
|
||||
|
@ -74,22 +74,22 @@ TEST(StackContainer, Vector) {
|
|||
|
||||
TEST(StackContainer, VectorDoubleDelete) {
|
||||
// Regression testing for double-delete.
|
||||
typedef StackVector<Ref<Dummy>, 2> Vector;
|
||||
typedef StackVector<Ref<Placeholder>, 2> Vector;
|
||||
Vector vect;
|
||||
|
||||
int alive = 0;
|
||||
Ref<Dummy> dummy = AcquireRef(new Dummy(&alive));
|
||||
Ref<Placeholder> placeholder = AcquireRef(new Placeholder(&alive));
|
||||
EXPECT_EQ(alive, 1);
|
||||
|
||||
vect->push_back(dummy);
|
||||
vect->push_back(placeholder);
|
||||
EXPECT_EQ(alive, 1);
|
||||
|
||||
Dummy* dummy_unref = dummy.Get();
|
||||
dummy = nullptr;
|
||||
Placeholder* placeholder_unref = placeholder.Get();
|
||||
placeholder = nullptr;
|
||||
EXPECT_EQ(alive, 1);
|
||||
|
||||
auto itr = std::find(vect->begin(), vect->end(), dummy_unref);
|
||||
EXPECT_EQ(itr->Get(), dummy_unref);
|
||||
auto itr = std::find(vect->begin(), vect->end(), placeholder_unref);
|
||||
EXPECT_EQ(itr->Get(), placeholder_unref);
|
||||
vect->erase(itr);
|
||||
EXPECT_EQ(alive, 0);
|
||||
|
||||
|
@ -138,7 +138,7 @@ TEST(StackContainer, BufferAlignment) {
|
|||
}
|
||||
|
||||
template class StackVector<int, 2>;
|
||||
template class StackVector<Ref<Dummy>, 2>;
|
||||
template class StackVector<Ref<Placeholder>, 2>;
|
||||
|
||||
template <typename T, size_t size>
|
||||
void CheckStackVectorElements(const StackVector<T, size>& vec, std::initializer_list<T> expected) {
|
||||
|
|
|
@ -1499,7 +1499,7 @@ class SetBindGroupValidationTest : public ValidationTest {
|
|||
uint32_t count,
|
||||
bool expectation) {
|
||||
wgpu::RenderPipeline renderPipeline = CreateRenderPipeline();
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
|
||||
|
@ -1602,7 +1602,7 @@ TEST_F(SetBindGroupValidationTest, VerifyGroupIfChangedAfterAction) {
|
|||
}
|
||||
{
|
||||
wgpu::RenderPipeline renderPipeline = CreateRenderPipeline();
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
|
||||
|
@ -1966,7 +1966,7 @@ TEST_F(SetBindGroupPersistenceValidationTest, BindGroupBeforePipeline) {
|
|||
device, bindGroupLayouts[1],
|
||||
{{0, storageBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}});
|
||||
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
|
||||
|
||||
|
@ -2020,7 +2020,7 @@ TEST_F(SetBindGroupPersistenceValidationTest, NotVulkanInheritance) {
|
|||
device, bindGroupLayoutsB[0],
|
||||
{{0, storageBuffer, 0, kBindingSize}, {1, uniformBuffer, 0, kBindingSize}});
|
||||
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
|
||||
|
||||
|
@ -2270,8 +2270,8 @@ class BindingsValidationTest : public BindGroupLayoutCompatibilityTest {
|
|||
wgpu::RenderPipeline pipeline,
|
||||
bool expectation) {
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder rp = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder rp = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
for (uint32_t i = 0; i < count; ++i) {
|
||||
rp.SetBindGroup(i, bg[i]);
|
||||
}
|
||||
|
|
|
@ -31,12 +31,12 @@ TEST_F(CommandBufferValidationTest, Empty) {
|
|||
|
||||
// Test that a command buffer cannot be ended mid render pass
|
||||
TEST_F(CommandBufferValidationTest, EndedMidRenderPass) {
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
|
||||
// Control case, command buffer ended after the pass is ended.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.End();
|
||||
encoder.Finish();
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ TEST_F(CommandBufferValidationTest, EndedMidRenderPass) {
|
|||
// Error case, command buffer ended mid-pass.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
ASSERT_DEVICE_ERROR(
|
||||
encoder.Finish(),
|
||||
HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended."));
|
||||
|
@ -54,7 +54,7 @@ TEST_F(CommandBufferValidationTest, EndedMidRenderPass) {
|
|||
// should fail too.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
ASSERT_DEVICE_ERROR(
|
||||
encoder.Finish(),
|
||||
HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended."));
|
||||
|
@ -97,12 +97,12 @@ TEST_F(CommandBufferValidationTest, EndedMidComputePass) {
|
|||
|
||||
// Test that a render pass cannot be ended twice
|
||||
TEST_F(CommandBufferValidationTest, RenderPassEndedTwice) {
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
|
||||
// Control case, pass is ended once
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.End();
|
||||
encoder.Finish();
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ TEST_F(CommandBufferValidationTest, RenderPassEndedTwice) {
|
|||
// Error case, pass ended twice
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.End();
|
||||
pass.End();
|
||||
ASSERT_DEVICE_ERROR(
|
||||
|
@ -143,12 +143,12 @@ TEST_F(CommandBufferValidationTest, ComputePassEndedTwice) {
|
|||
|
||||
// Test that beginning a compute pass before ending the previous pass causes an error.
|
||||
TEST_F(CommandBufferValidationTest, BeginComputePassBeforeEndPreviousPass) {
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
|
||||
// Beginning a compute pass before ending a render pass causes an error.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
wgpu::ComputePassEncoder computePass = encoder.BeginComputePass();
|
||||
computePass.End();
|
||||
renderPass.End();
|
||||
|
@ -168,13 +168,13 @@ TEST_F(CommandBufferValidationTest, BeginComputePassBeforeEndPreviousPass) {
|
|||
|
||||
// Test that beginning a render pass before ending the previous pass causes an error.
|
||||
TEST_F(CommandBufferValidationTest, BeginRenderPassBeforeEndPreviousPass) {
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
|
||||
// Beginning a render pass before ending the render pass causes an error.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder renderPass1 = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder renderPass2 = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder renderPass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
wgpu::RenderPassEncoder renderPass2 = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
renderPass2.End();
|
||||
renderPass1.End();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
|
@ -184,7 +184,7 @@ TEST_F(CommandBufferValidationTest, BeginRenderPassBeforeEndPreviousPass) {
|
|||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::ComputePassEncoder computePass = encoder.BeginComputePass();
|
||||
wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder renderPass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
renderPass.End();
|
||||
computePass.End();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
|
@ -229,12 +229,12 @@ TEST_F(CommandBufferValidationTest, CallsAfterAFailedFinish) {
|
|||
// Test that passes which are de-referenced prior to ending still allow the correct errors to be
|
||||
// produced.
|
||||
TEST_F(CommandBufferValidationTest, PassDereferenced) {
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
|
||||
// Control case, command buffer ended after the pass is ended.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.End();
|
||||
encoder.Finish();
|
||||
}
|
||||
|
@ -242,7 +242,7 @@ TEST_F(CommandBufferValidationTest, PassDereferenced) {
|
|||
// Error case, no reference is kept to a render pass.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.BeginRenderPass(&dummyRenderPass);
|
||||
encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
ASSERT_DEVICE_ERROR(
|
||||
encoder.Finish(),
|
||||
HasSubstr("Command buffer recording ended before [RenderPassEncoder] was ended."));
|
||||
|
@ -260,7 +260,7 @@ TEST_F(CommandBufferValidationTest, PassDereferenced) {
|
|||
// Error case, beginning a new pass after failing to end a de-referenced pass.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.BeginRenderPass(&dummyRenderPass);
|
||||
encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
|
||||
pass.End();
|
||||
ASSERT_DEVICE_ERROR(
|
||||
|
@ -301,12 +301,12 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
|
|||
// only way to trigger the destroy call is by losing all references which means we cannot
|
||||
// call finish.
|
||||
DAWN_SKIP_TEST_IF(UsesWire());
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
|
||||
// Control case, command buffer ended after the pass is ended.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.End();
|
||||
encoder.Finish();
|
||||
}
|
||||
|
@ -314,7 +314,7 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
|
|||
// Destroyed encoder with encoded commands should emit error on finish.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.End();
|
||||
dawn::native::FromAPI(encoder.Get())->Destroy();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
|
||||
|
@ -323,7 +323,7 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
|
|||
// Destroyed encoder with encoded commands shouldn't emit an error if never finished.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.End();
|
||||
dawn::native::FromAPI(encoder.Get())->Destroy();
|
||||
}
|
||||
|
@ -332,7 +332,7 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
|
|||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
dawn::native::FromAPI(encoder.Get())->Destroy();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.End();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish(), HasSubstr("Destroyed encoder cannot be finished."));
|
||||
}
|
||||
|
@ -341,14 +341,14 @@ TEST_F(CommandBufferValidationTest, DestroyEncoder) {
|
|||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
dawn::native::FromAPI(encoder.Get())->Destroy();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.End();
|
||||
}
|
||||
|
||||
// Destroying a finished encoder should not emit any errors.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.End();
|
||||
encoder.Finish();
|
||||
dawn::native::FromAPI(encoder.Get())->Destroy();
|
||||
|
|
|
@ -21,7 +21,7 @@ class DebugMarkerValidationTest : public ValidationTest {};
|
|||
|
||||
// Correct usage of debug markers should succeed in render pass.
|
||||
TEST_F(DebugMarkerValidationTest, RenderSuccess) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
{
|
||||
|
@ -39,7 +39,7 @@ TEST_F(DebugMarkerValidationTest, RenderSuccess) {
|
|||
|
||||
// A PushDebugGroup call without a following PopDebugGroup produces an error in render pass.
|
||||
TEST_F(DebugMarkerValidationTest, RenderUnbalancedPush) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
{
|
||||
|
@ -56,7 +56,7 @@ TEST_F(DebugMarkerValidationTest, RenderUnbalancedPush) {
|
|||
|
||||
// A PopDebugGroup call without a preceding PushDebugGroup produces an error in render pass.
|
||||
TEST_F(DebugMarkerValidationTest, RenderUnbalancedPop) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
{
|
||||
|
@ -224,7 +224,7 @@ TEST_F(DebugMarkerValidationTest, NestedComputeInCommandEncoderIndependent) {
|
|||
|
||||
// It is possible to nested pushes in a render pass in a command encoder.
|
||||
TEST_F(DebugMarkerValidationTest, NestedRenderInCommandEncoder) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.PushDebugGroup("Event Start");
|
||||
|
@ -241,7 +241,7 @@ TEST_F(DebugMarkerValidationTest, NestedRenderInCommandEncoder) {
|
|||
|
||||
// Command encoder and render pass pushes must be balanced independently.
|
||||
TEST_F(DebugMarkerValidationTest, NestedRenderInCommandEncoderIndependent) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.PushDebugGroup("Event Start");
|
||||
|
|
|
@ -72,7 +72,7 @@ class DrawIndirectValidationTest : public ValidationTest {
|
|||
wgpu::Buffer indirectBuffer =
|
||||
utils::CreateBufferFromData<uint32_t>(device, usage, bufferList);
|
||||
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
|
||||
pass.SetPipeline(pipeline);
|
||||
|
|
|
@ -203,7 +203,7 @@ class SetBlendConstantTest : public ValidationTest {};
|
|||
|
||||
// Test to check basic use of SetBlendConstantTest
|
||||
TEST_F(SetBlendConstantTest, Success) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
{
|
||||
|
@ -217,7 +217,7 @@ TEST_F(SetBlendConstantTest, Success) {
|
|||
|
||||
// Test that SetBlendConstant allows any value, large, small or negative
|
||||
TEST_F(SetBlendConstantTest, AnyValueAllowed) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
{
|
||||
|
@ -233,7 +233,7 @@ class SetStencilReferenceTest : public ValidationTest {};
|
|||
|
||||
// Test to check basic use of SetStencilReferenceTest
|
||||
TEST_F(SetStencilReferenceTest, Success) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
{
|
||||
|
@ -246,7 +246,7 @@ TEST_F(SetStencilReferenceTest, Success) {
|
|||
|
||||
// Test that SetStencilReference allows any bit to be set
|
||||
TEST_F(SetStencilReferenceTest, AllBitsAllowed) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
{
|
||||
|
|
|
@ -50,7 +50,7 @@ TEST_F(IndexBufferValidationTest, UndefinedIndexFormat) {
|
|||
bufferDesc.size = 256;
|
||||
wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
|
||||
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
|
||||
pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Undefined);
|
||||
|
@ -65,7 +65,7 @@ TEST_F(IndexBufferValidationTest, InvalidIndexFormat) {
|
|||
bufferDesc.size = 256;
|
||||
wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
|
||||
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
|
||||
pass.SetIndexBuffer(buffer, static_cast<wgpu::IndexFormat>(404));
|
||||
|
@ -80,7 +80,7 @@ TEST_F(IndexBufferValidationTest, IndexBufferOffsetOOBValidation) {
|
|||
bufferDesc.size = 256;
|
||||
wgpu::Buffer buffer = device.CreateBuffer(&bufferDesc);
|
||||
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
// Control case, using the full buffer, with or without an explicit size is valid.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
@ -232,7 +232,7 @@ TEST_F(IndexBufferValidationTest, InvalidUsage) {
|
|||
wgpu::Buffer copyBuffer =
|
||||
utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::CopySrc, {0, 1, 2});
|
||||
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
// Control case: using the index buffer is valid.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
@ -272,7 +272,7 @@ TEST_F(IndexBufferValidationTest, OffsetAlignment) {
|
|||
wgpu::Buffer indexBuffer =
|
||||
utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 1, 2});
|
||||
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
// Control cases: index buffer offset is a multiple of the index format size
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
|
|
@ -300,7 +300,7 @@ class MinBufferSizeTestsBase : public ValidationTest {
|
|||
void TestDraw(const wgpu::RenderPipeline& renderPipeline,
|
||||
const std::vector<wgpu::BindGroup>& bindGroups,
|
||||
bool expectation) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(&renderPass);
|
||||
|
|
|
@ -82,7 +82,7 @@ class OcclusionQueryValidationTest : public QuerySetValidationTest {};
|
|||
// Test the occlusionQuerySet in RenderPassDescriptor
|
||||
TEST_F(OcclusionQueryValidationTest, InvalidOcclusionQuerySet) {
|
||||
wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
// Success
|
||||
{
|
||||
|
@ -100,7 +100,7 @@ TEST_F(OcclusionQueryValidationTest, InvalidOcclusionQuerySet) {
|
|||
// Fail to begin occlusion query if the occlusionQuerySet is not set in RenderPassDescriptor
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass renderPassWithoutOcclusion(device);
|
||||
PlaceholderRenderPass renderPassWithoutOcclusion(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassWithoutOcclusion);
|
||||
pass.BeginOcclusionQuery(0);
|
||||
pass.EndOcclusionQuery();
|
||||
|
@ -142,7 +142,7 @@ TEST_F(OcclusionQueryValidationTest, InvalidOcclusionQuerySet) {
|
|||
// Test query index of occlusion query
|
||||
TEST_F(OcclusionQueryValidationTest, InvalidQueryIndex) {
|
||||
wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
renderPass.occlusionQuerySet = occlusionQuerySet;
|
||||
|
||||
// Fail to begin occlusion query if the query index exceeds the number of queries in query set
|
||||
|
@ -186,7 +186,7 @@ TEST_F(OcclusionQueryValidationTest, InvalidQueryIndex) {
|
|||
// Test the correspondence between BeginOcclusionQuery and EndOcclusionQuery
|
||||
TEST_F(OcclusionQueryValidationTest, InvalidBeginAndEnd) {
|
||||
wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
renderPass.occlusionQuerySet = occlusionQuerySet;
|
||||
|
||||
// Fail to begin an occlusion query without corresponding end operation
|
||||
|
@ -242,7 +242,7 @@ class TimestampQueryValidationTest : public QuerySetValidationTest {
|
|||
void EncodeRenderPassWithTimestampWrites(
|
||||
wgpu::CommandEncoder encoder,
|
||||
const std::vector<wgpu::RenderPassTimestampWrite>& timestampWrites) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
renderPass.timestampWriteCount = timestampWrites.size();
|
||||
renderPass.timestampWrites = timestampWrites.data();
|
||||
|
||||
|
@ -290,7 +290,7 @@ TEST_F(TimestampQueryValidationTest, UnnecessaryPipelineStatistics) {
|
|||
TEST_F(TimestampQueryValidationTest, SetOcclusionQueryWithTimestampQuerySet) {
|
||||
// Fail to begin render pass if the type of occlusionQuerySet is not Occlusion
|
||||
wgpu::QuerySet querySet = CreateQuerySet(device, wgpu::QueryType::Timestamp, 1);
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
renderPass.occlusionQuerySet = querySet;
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
@ -558,7 +558,7 @@ TEST_F(TimestampQueryValidationTest, WriteTimestampOnComputePassEncoder) {
|
|||
|
||||
// Test write timestamp on render pass encoder
|
||||
TEST_F(TimestampQueryValidationTest, WriteTimestampOnRenderPassEncoder) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::QuerySet timestampQuerySet = CreateQuerySet(device, wgpu::QueryType::Timestamp, 2);
|
||||
wgpu::QuerySet occlusionQuerySet = CreateQuerySet(device, wgpu::QueryType::Occlusion, 2);
|
||||
|
@ -707,7 +707,7 @@ TEST_F(PipelineStatisticsQueryValidationTest, BeginRenderPassWithPipelineStatist
|
|||
wgpu::QuerySet querySet =
|
||||
CreateQuerySet(device, wgpu::QueryType::PipelineStatistics, 1,
|
||||
{wgpu::PipelineStatisticName::VertexShaderInvocations});
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
renderPass.occlusionQuerySet = querySet;
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
|
|
@ -44,7 +44,7 @@ namespace {
|
|||
@group(1) @binding(0) var<uniform> uniforms : Uniforms;
|
||||
|
||||
struct Storage {
|
||||
dummy : array<f32>
|
||||
placeholder : array<f32>
|
||||
}
|
||||
@group(1) @binding(1) var<storage, read_write> ssbo : Storage;
|
||||
|
||||
|
@ -79,7 +79,7 @@ namespace {
|
|||
vertexBuffer = utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
|
||||
wgpu::BufferUsage::Vertex);
|
||||
|
||||
// Dummy storage buffer.
|
||||
// Placeholder storage buffer.
|
||||
wgpu::Buffer storageBuffer = utils::CreateBufferFromData(
|
||||
device, kVertices, sizeof(kVertices), wgpu::BufferUsage::Storage);
|
||||
|
||||
|
@ -125,7 +125,7 @@ namespace {
|
|||
|
||||
// Test creating and encoding an empty render bundle.
|
||||
TEST_F(RenderBundleValidationTest, Empty) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
utils::ComboRenderBundleEncoderDescriptor desc = {};
|
||||
desc.colorFormatsCount = 1;
|
||||
|
@ -145,7 +145,7 @@ TEST_F(RenderBundleValidationTest, Empty) {
|
|||
// This is a regression test for error render bundle encoders containing no commands would
|
||||
// produce non-error render bundles.
|
||||
TEST_F(RenderBundleValidationTest, EmptyErrorEncoderProducesErrorBundle) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
utils::ComboRenderBundleEncoderDescriptor desc = {};
|
||||
// Having 0 attachments is invalid!
|
||||
|
@ -165,7 +165,7 @@ TEST_F(RenderBundleValidationTest, EmptyErrorEncoderProducesErrorBundle) {
|
|||
|
||||
// Test executing zero render bundles.
|
||||
TEST_F(RenderBundleValidationTest, ZeroBundles) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = commandEncoder.BeginRenderPass(&renderPass);
|
||||
|
@ -176,7 +176,7 @@ TEST_F(RenderBundleValidationTest, ZeroBundles) {
|
|||
|
||||
// Test successfully creating and encoding a render bundle into a command buffer.
|
||||
TEST_F(RenderBundleValidationTest, SimpleSuccess) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
utils::ComboRenderBundleEncoderDescriptor desc = {};
|
||||
desc.colorFormatsCount = 1;
|
||||
|
@ -199,7 +199,7 @@ TEST_F(RenderBundleValidationTest, SimpleSuccess) {
|
|||
|
||||
// Test that render bundle debug groups must be well nested.
|
||||
TEST_F(RenderBundleValidationTest, DebugGroups) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
utils::ComboRenderBundleEncoderDescriptor desc = {};
|
||||
desc.colorFormatsCount = 1;
|
||||
|
@ -258,7 +258,7 @@ TEST_F(RenderBundleValidationTest, DebugGroups) {
|
|||
|
||||
// Test render bundles do not inherit command buffer state
|
||||
TEST_F(RenderBundleValidationTest, StateInheritance) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
utils::ComboRenderBundleEncoderDescriptor desc = {};
|
||||
desc.colorFormatsCount = 1;
|
||||
|
@ -343,7 +343,7 @@ TEST_F(RenderBundleValidationTest, StateInheritance) {
|
|||
|
||||
// Test render bundles do not persist command buffer state
|
||||
TEST_F(RenderBundleValidationTest, StatePersistence) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
utils::ComboRenderBundleEncoderDescriptor desc = {};
|
||||
desc.colorFormatsCount = 1;
|
||||
|
@ -428,7 +428,7 @@ TEST_F(RenderBundleValidationTest, StatePersistence) {
|
|||
|
||||
// Test executing render bundles clears command buffer state
|
||||
TEST_F(RenderBundleValidationTest, ClearsState) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
utils::ComboRenderBundleEncoderDescriptor desc = {};
|
||||
desc.colorFormatsCount = 1;
|
||||
|
@ -520,7 +520,7 @@ TEST_F(RenderBundleValidationTest, ClearsState) {
|
|||
|
||||
// Test creating and encoding multiple render bundles.
|
||||
TEST_F(RenderBundleValidationTest, MultipleBundles) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
utils::ComboRenderBundleEncoderDescriptor desc = {};
|
||||
desc.colorFormatsCount = 1;
|
||||
|
@ -553,7 +553,7 @@ TEST_F(RenderBundleValidationTest, MultipleBundles) {
|
|||
|
||||
// Test that is is valid to execute a render bundle more than once.
|
||||
TEST_F(RenderBundleValidationTest, ExecuteMultipleTimes) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
utils::ComboRenderBundleEncoderDescriptor desc = {};
|
||||
desc.colorFormatsCount = 1;
|
||||
|
@ -696,7 +696,7 @@ TEST_F(RenderBundleValidationTest, DepthStencilReadOnly) {
|
|||
}
|
||||
// Test that resource usages are validated inside render bundles.
|
||||
TEST_F(RenderBundleValidationTest, UsageTracking) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
utils::ComboRenderBundleEncoderDescriptor desc = {};
|
||||
desc.colorFormatsCount = 1;
|
||||
|
|
|
@ -86,8 +86,8 @@ namespace {
|
|||
CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index);
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
|
||||
pass.SetVertexBuffer(0, buffer);
|
||||
pass.End();
|
||||
|
@ -130,8 +130,8 @@ namespace {
|
|||
|
||||
// It is invalid to use the buffer as both index and storage in render pass
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.End();
|
||||
|
@ -195,8 +195,8 @@ namespace {
|
|||
{
|
||||
// It is valid to use multiple storage usages on the same buffer in render pass
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.End();
|
||||
encoder.Finish();
|
||||
|
@ -235,14 +235,14 @@ namespace {
|
|||
|
||||
// Use these two buffers as both index and storage in different render passes
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
|
||||
wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass0.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
|
||||
pass0.SetBindGroup(0, bg1);
|
||||
pass0.End();
|
||||
|
||||
wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass1.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
|
||||
pass1.SetBindGroup(0, bg0);
|
||||
pass1.End();
|
||||
|
@ -297,8 +297,8 @@ namespace {
|
|||
pass0.SetBindGroup(0, bg0);
|
||||
pass0.End();
|
||||
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass1.SetBindGroup(1, bg1);
|
||||
pass1.End();
|
||||
|
||||
|
@ -325,8 +325,8 @@ namespace {
|
|||
// It is not allowed to use the same buffer as both readable and writable in different
|
||||
// draws within the same render pass.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetPipeline(rp);
|
||||
|
||||
pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
|
||||
|
@ -392,8 +392,8 @@ namespace {
|
|||
// It is invalid to use the same buffer as both readable and writable usages in a single
|
||||
// draw
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetPipeline(rp);
|
||||
|
||||
pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
|
||||
|
@ -455,8 +455,8 @@ namespace {
|
|||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetBindGroup(0, bg0);
|
||||
pass.End();
|
||||
encoder.Finish();
|
||||
|
@ -492,14 +492,14 @@ namespace {
|
|||
device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
|
||||
wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
|
||||
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
|
||||
// Set index buffer twice. The second one overwrites the first one. No buffer is used as
|
||||
// both read and write in the same pass. But the overwritten index buffer (buffer0) still
|
||||
// take effect during resource tracking.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
|
||||
pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
|
||||
pass.SetBindGroup(0, bg);
|
||||
|
@ -511,7 +511,7 @@ namespace {
|
|||
// read and write in the same pass
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
|
||||
pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
|
||||
pass.SetBindGroup(0, bg);
|
||||
|
@ -524,7 +524,7 @@ namespace {
|
|||
// (buffer0) still take effect during resource tracking.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetVertexBuffer(0, buffer0);
|
||||
pass.SetVertexBuffer(0, buffer1);
|
||||
pass.SetBindGroup(0, bg);
|
||||
|
@ -536,7 +536,7 @@ namespace {
|
|||
// buffer0 is used as both read and write in the same pass
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetVertexBuffer(0, buffer1);
|
||||
pass.SetVertexBuffer(0, buffer0);
|
||||
pass.SetBindGroup(0, bg);
|
||||
|
@ -562,14 +562,14 @@ namespace {
|
|||
wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
|
||||
wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}});
|
||||
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
|
||||
// Set bind group on the same index twice. The second one overwrites the first one.
|
||||
// No buffer is used as both read and write in the same pass. But the overwritten
|
||||
// bind group still take effect during resource tracking.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
|
||||
pass.SetBindGroup(0, bg0);
|
||||
pass.SetBindGroup(0, bg1);
|
||||
|
@ -581,7 +581,7 @@ namespace {
|
|||
// buffer0 is used as both read and write in the same pass
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
|
||||
pass.SetBindGroup(0, bg1);
|
||||
pass.SetBindGroup(0, bg0);
|
||||
|
@ -656,8 +656,8 @@ namespace {
|
|||
|
||||
// These two bindings are invisible in render pass. But we still track these bindings.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.End();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
|
@ -702,8 +702,8 @@ namespace {
|
|||
// Buffer usage in compute stage in bind group conflicts with index buffer. And binding
|
||||
// for compute stage is not visible in render pass. But we still track this binding.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.End();
|
||||
|
@ -777,8 +777,8 @@ namespace {
|
|||
// Resource in bg1 conflicts with resources used in bg0. However, bindings in bg1 is
|
||||
// not used in pipeline. But we still track this binding.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetBindGroup(0, bg0);
|
||||
pass.SetBindGroup(1, bg1);
|
||||
pass.SetPipeline(rp);
|
||||
|
@ -963,8 +963,8 @@ namespace {
|
|||
wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view}});
|
||||
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.SetBindGroup(1, bg1);
|
||||
pass.End();
|
||||
|
@ -1155,8 +1155,8 @@ namespace {
|
|||
pass0.SetBindGroup(0, writeBG);
|
||||
pass0.End();
|
||||
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass1.SetBindGroup(0, readBG);
|
||||
pass1.End();
|
||||
|
||||
|
@ -1190,8 +1190,8 @@ namespace {
|
|||
// It is not allowed to use the same texture as both readable and writable in different
|
||||
// draws within the same render pass.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetPipeline(rp);
|
||||
|
||||
pass.SetBindGroup(0, sampledBG);
|
||||
|
@ -1262,8 +1262,8 @@ namespace {
|
|||
// It is invalid to use the same texture as both readable and writable usages in a
|
||||
// single draw
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetPipeline(rp);
|
||||
|
||||
pass.SetBindGroup(0, sampledBG);
|
||||
|
@ -1470,8 +1470,8 @@ namespace {
|
|||
|
||||
// These two bindings are invisible in render pass. But we still track these bindings.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetBindGroup(0, bg);
|
||||
pass.End();
|
||||
ASSERT_DEVICE_ERROR(encoder.Finish());
|
||||
|
@ -1595,8 +1595,8 @@ namespace {
|
|||
// Texture binding in readBG conflicts with texture binding in writeBG. The binding
|
||||
// in writeBG is not used in pipeline. But we still track this binding.
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetBindGroup(0, readBG);
|
||||
pass.SetBindGroup(1, writeBG);
|
||||
pass.SetPipeline(rp);
|
||||
|
@ -1644,8 +1644,8 @@ namespace {
|
|||
// Test that indirect + readonly is allowed in the same render pass.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetPipeline(rp);
|
||||
pass.SetBindGroup(0, readBG);
|
||||
pass.DrawIndirect(buffer, 0);
|
||||
|
@ -1656,8 +1656,8 @@ namespace {
|
|||
// Test that indirect + writable is disallowed in the same render pass.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
DummyRenderPass dummyRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&dummyRenderPass);
|
||||
PlaceholderRenderPass PlaceholderRenderPass(device);
|
||||
wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
|
||||
pass.SetPipeline(rp);
|
||||
pass.SetBindGroup(0, writeBG);
|
||||
pass.DrawIndirect(buffer, 0);
|
||||
|
|
|
@ -39,7 +39,7 @@ class UnsafeAPIValidationTest : public ValidationTest {
|
|||
// Check that pipeline overridable constants are disallowed as part of unsafe APIs.
|
||||
// TODO(dawn:1041) Remove when implementation for all backend is added
|
||||
TEST_F(UnsafeAPIValidationTest, PipelineOverridableConstants) {
|
||||
// Create the dummy compute pipeline.
|
||||
// Create the placeholder compute pipeline.
|
||||
wgpu::ComputePipelineDescriptor pipelineDescBase;
|
||||
pipelineDescBase.compute.entryPoint = "main";
|
||||
|
||||
|
|
|
@ -252,7 +252,7 @@ void ValidationTest::OnDeviceLost(WGPUDeviceLostReason reason,
|
|||
ASSERT(false);
|
||||
}
|
||||
|
||||
ValidationTest::DummyRenderPass::DummyRenderPass(const wgpu::Device& device)
|
||||
ValidationTest::PlaceholderRenderPass::PlaceholderRenderPass(const wgpu::Device& device)
|
||||
: attachmentFormat(wgpu::TextureFormat::RGBA8Unorm), width(400), height(400) {
|
||||
wgpu::TextureDescriptor descriptor;
|
||||
descriptor.dimension = wgpu::TextureDimension::e2D;
|
||||
|
|
|
@ -115,9 +115,9 @@ class ValidationTest : public testing::Test {
|
|||
|
||||
// Helper functions to create objects to test validation.
|
||||
|
||||
struct DummyRenderPass : public wgpu::RenderPassDescriptor {
|
||||
struct PlaceholderRenderPass : public wgpu::RenderPassDescriptor {
|
||||
public:
|
||||
explicit DummyRenderPass(const wgpu::Device& device);
|
||||
explicit PlaceholderRenderPass(const wgpu::Device& device);
|
||||
wgpu::Texture attachment;
|
||||
wgpu::TextureFormat attachmentFormat;
|
||||
uint32_t width;
|
||||
|
|
|
@ -25,7 +25,7 @@ class VertexBufferValidationTest : public ValidationTest {
|
|||
void SetUp() override {
|
||||
ValidationTest::SetUp();
|
||||
|
||||
// dummy vertex shader module
|
||||
// Placeholder vertex shader module
|
||||
vsModule = utils::CreateShaderModule(device, R"(
|
||||
@stage(vertex) fn main() -> @builtin(position) vec4<f32> {
|
||||
return vec4<f32>(0.0, 0.0, 0.0, 0.0);
|
||||
|
@ -106,7 +106,7 @@ class VertexBufferValidationTest : public ValidationTest {
|
|||
|
||||
// Check that vertex buffers still count as bound if we switch the pipeline.
|
||||
TEST_F(VertexBufferValidationTest, VertexBuffersInheritedBetweenPipelines) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
wgpu::ShaderModule vsModule2 = MakeVertexShader(2);
|
||||
wgpu::ShaderModule vsModule1 = MakeVertexShader(1);
|
||||
|
||||
|
@ -143,7 +143,7 @@ TEST_F(VertexBufferValidationTest, VertexBuffersInheritedBetweenPipelines) {
|
|||
|
||||
// Check that vertex buffers that are set are reset between render passes.
|
||||
TEST_F(VertexBufferValidationTest, VertexBuffersNotInheritedBetweenRenderPasses) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
wgpu::ShaderModule vsModule2 = MakeVertexShader(2);
|
||||
wgpu::ShaderModule vsModule1 = MakeVertexShader(1);
|
||||
|
||||
|
@ -195,7 +195,7 @@ TEST_F(VertexBufferValidationTest, VertexBuffersNotInheritedBetweenRenderPasses)
|
|||
TEST_F(VertexBufferValidationTest, VertexBufferSlotValidation) {
|
||||
wgpu::Buffer buffer = MakeVertexBuffer();
|
||||
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
// Control case: using the last vertex buffer slot in render passes is ok.
|
||||
{
|
||||
|
@ -238,7 +238,7 @@ TEST_F(VertexBufferValidationTest, VertexBufferSlotValidation) {
|
|||
TEST_F(VertexBufferValidationTest, VertexBufferOffsetOOBValidation) {
|
||||
wgpu::Buffer buffer = MakeVertexBuffer();
|
||||
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
// Control case, using the full buffer, with or without an explicit size is valid.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
@ -312,7 +312,7 @@ TEST_F(VertexBufferValidationTest, InvalidUsage) {
|
|||
wgpu::Buffer indexBuffer =
|
||||
utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Index, {0, 0, 0});
|
||||
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
// Control case: using the vertex buffer is valid.
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
@ -351,7 +351,7 @@ TEST_F(VertexBufferValidationTest, InvalidUsage) {
|
|||
TEST_F(VertexBufferValidationTest, OffsetAlignment) {
|
||||
wgpu::Buffer vertexBuffer = MakeVertexBuffer();
|
||||
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
// Control cases: vertex buffer offset is a multiple of 4
|
||||
{
|
||||
wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
|
||||
|
@ -375,7 +375,7 @@ TEST_F(VertexBufferValidationTest, OffsetAlignment) {
|
|||
|
||||
// Check vertex buffer stride requirements for draw command.
|
||||
TEST_F(VertexBufferValidationTest, DrawStrideLimitsVertex) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
// Create a buffer of size 28, containing 4 float32 elements, array stride size = 8
|
||||
// The last element doesn't have the full stride size
|
||||
|
@ -506,7 +506,7 @@ TEST_F(VertexBufferValidationTest, DrawStrideLimitsVertex) {
|
|||
|
||||
// Check instance buffer stride requirements with instanced attributes for draw command.
|
||||
TEST_F(VertexBufferValidationTest, DrawStrideLimitsInstance) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
// Create a buffer of size 28, containing 4 float32 elements, array stride size = 8
|
||||
// The last element doesn't have the full stride size
|
||||
|
@ -637,7 +637,7 @@ TEST_F(VertexBufferValidationTest, DrawStrideLimitsInstance) {
|
|||
|
||||
// Check vertex buffer stride requirements with instanced attributes for draw indexed command.
|
||||
TEST_F(VertexBufferValidationTest, DrawIndexedStrideLimitsInstance) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
// Create a buffer of size 28, containing 4 float32 elements, array stride size = 8
|
||||
// The last element doesn't have the full stride size
|
||||
|
@ -778,7 +778,7 @@ TEST_F(VertexBufferValidationTest, DrawIndexedStrideLimitsInstance) {
|
|||
|
||||
// Check last stride is computed correctly for vertex buffer with multiple attributes.
|
||||
TEST_F(VertexBufferValidationTest, DrawStrideLimitsVertexMultipleAttributes) {
|
||||
DummyRenderPass renderPass(device);
|
||||
PlaceholderRenderPass renderPass(device);
|
||||
|
||||
// Create a buffer of size 44, array stride size = 12
|
||||
wgpu::BufferDescriptor descriptor;
|
||||
|
|
|
@ -43,7 +43,7 @@ class VertexStateTest : public ValidationTest {
|
|||
}
|
||||
}
|
||||
|
||||
const char* kDummyVertexShader = R"(
|
||||
const char* kPlaceholderVertexShader = R"(
|
||||
@stage(vertex) fn main() -> @builtin(position) vec4<f32> {
|
||||
return vec4<f32>(0.0, 0.0, 0.0, 0.0);
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ class VertexStateTest : public ValidationTest {
|
|||
// Check an empty vertex input is valid
|
||||
TEST_F(VertexStateTest, EmptyIsOk) {
|
||||
utils::ComboVertexState state;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check null buffer is valid
|
||||
|
@ -64,7 +64,7 @@ TEST_F(VertexStateTest, NullBufferIsOk) {
|
|||
state.cVertexBuffers[0].arrayStride = 0;
|
||||
state.cVertexBuffers[0].attributeCount = 0;
|
||||
state.cVertexBuffers[0].attributes = nullptr;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
// One null buffer (buffer[0]) followed by a buffer (buffer[1]) is OK
|
||||
state.vertexBufferCount = 2;
|
||||
|
@ -72,7 +72,7 @@ TEST_F(VertexStateTest, NullBufferIsOk) {
|
|||
state.cVertexBuffers[1].attributeCount = 1;
|
||||
state.cVertexBuffers[1].attributes = &state.cAttributes[0];
|
||||
state.cAttributes[0].shaderLocation = 0;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
// Null buffer (buffer[2]) sitting between buffers (buffer[1] and buffer[3]) is OK
|
||||
state.vertexBufferCount = 4;
|
||||
|
@ -81,7 +81,7 @@ TEST_F(VertexStateTest, NullBufferIsOk) {
|
|||
state.cVertexBuffers[3].attributeCount = 1;
|
||||
state.cVertexBuffers[3].attributes = &state.cAttributes[1];
|
||||
state.cAttributes[1].shaderLocation = 1;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check validation that pipeline vertex buffers are backed by attributes in the vertex input
|
||||
|
@ -130,11 +130,11 @@ TEST_F(VertexStateTest, StrideZero) {
|
|||
state.vertexBufferCount = 1;
|
||||
state.cVertexBuffers[0].arrayStride = 0;
|
||||
state.cVertexBuffers[0].attributeCount = 1;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
// Works ok with attributes at a large-ish offset
|
||||
state.cAttributes[0].offset = 128;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check validation that vertex attribute offset should be within vertex buffer arrayStride,
|
||||
|
@ -148,15 +148,15 @@ TEST_F(VertexStateTest, SetOffsetOutOfBounds) {
|
|||
state.cAttributes[0].shaderLocation = 0;
|
||||
state.cAttributes[1].shaderLocation = 1;
|
||||
state.cAttributes[1].offset = sizeof(float);
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
// Test vertex attribute offset exceed vertex buffer arrayStride range
|
||||
state.cVertexBuffers[0].arrayStride = sizeof(float);
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
|
||||
// It's OK if arrayStride is zero
|
||||
state.cVertexBuffers[0].arrayStride = 0;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check out of bounds condition on total number of vertex buffers
|
||||
|
@ -169,11 +169,11 @@ TEST_F(VertexStateTest, SetVertexBuffersNumLimit) {
|
|||
state.cVertexBuffers[i].attributes = &state.cAttributes[i];
|
||||
state.cAttributes[i].shaderLocation = i;
|
||||
}
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
// Test vertex buffer number exceed the limit
|
||||
state.vertexBufferCount = kMaxVertexBuffers + 1;
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check out of bounds condition on total number of vertex attributes
|
||||
|
@ -185,12 +185,12 @@ TEST_F(VertexStateTest, SetVertexAttributesNumLimit) {
|
|||
for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
|
||||
state.cAttributes[i].shaderLocation = i;
|
||||
}
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
// Test vertex attribute number exceed the limit
|
||||
state.cVertexBuffers[1].attributeCount = 1;
|
||||
state.cVertexBuffers[1].attributes = &state.cAttributes[kMaxVertexAttributes - 1];
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check out of bounds condition on input arrayStride
|
||||
|
@ -200,11 +200,11 @@ TEST_F(VertexStateTest, SetInputStrideOutOfBounds) {
|
|||
state.vertexBufferCount = 1;
|
||||
state.cVertexBuffers[0].arrayStride = kMaxVertexBufferArrayStride;
|
||||
state.cVertexBuffers[0].attributeCount = 1;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
// Test input arrayStride OOB
|
||||
state.cVertexBuffers[0].arrayStride = kMaxVertexBufferArrayStride + 1;
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check multiple of 4 bytes constraint on input arrayStride
|
||||
|
@ -214,11 +214,11 @@ TEST_F(VertexStateTest, SetInputStrideNotAligned) {
|
|||
state.vertexBufferCount = 1;
|
||||
state.cVertexBuffers[0].arrayStride = 4;
|
||||
state.cVertexBuffers[0].attributeCount = 1;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
// Test input arrayStride not multiple of 4 bytes
|
||||
state.cVertexBuffers[0].arrayStride = 2;
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Test that we cannot set an already set attribute
|
||||
|
@ -228,13 +228,13 @@ TEST_F(VertexStateTest, AlreadySetAttribute) {
|
|||
state.vertexBufferCount = 1;
|
||||
state.cVertexBuffers[0].attributeCount = 1;
|
||||
state.cAttributes[0].shaderLocation = 0;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
// Oh no, attribute 0 is set twice
|
||||
state.cVertexBuffers[0].attributeCount = 2;
|
||||
state.cAttributes[0].shaderLocation = 0;
|
||||
state.cAttributes[1].shaderLocation = 0;
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Test that a arrayStride of 0 is valid
|
||||
|
@ -246,11 +246,11 @@ TEST_F(VertexStateTest, SetSameShaderLocation) {
|
|||
state.cAttributes[0].shaderLocation = 0;
|
||||
state.cAttributes[1].shaderLocation = 1;
|
||||
state.cAttributes[1].offset = sizeof(float);
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
// Test same shader location in two attributes in the same buffer
|
||||
state.cAttributes[1].shaderLocation = 0;
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
|
||||
// Test same shader location in two attributes in different buffers
|
||||
state.vertexBufferCount = 2;
|
||||
|
@ -259,7 +259,7 @@ TEST_F(VertexStateTest, SetSameShaderLocation) {
|
|||
state.cVertexBuffers[1].attributeCount = 1;
|
||||
state.cVertexBuffers[1].attributes = &state.cAttributes[1];
|
||||
state.cAttributes[1].shaderLocation = 0;
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check out of bounds condition on attribute shader location
|
||||
|
@ -269,11 +269,11 @@ TEST_F(VertexStateTest, SetAttributeLocationOutOfBounds) {
|
|||
state.vertexBufferCount = 1;
|
||||
state.cVertexBuffers[0].attributeCount = 1;
|
||||
state.cAttributes[0].shaderLocation = kMaxVertexAttributes - 1;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
// Test attribute location OOB
|
||||
state.cAttributes[0].shaderLocation = kMaxVertexAttributes;
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check attribute offset out of bounds
|
||||
|
@ -283,11 +283,11 @@ TEST_F(VertexStateTest, SetAttributeOffsetOutOfBounds) {
|
|||
state.vertexBufferCount = 1;
|
||||
state.cVertexBuffers[0].attributeCount = 1;
|
||||
state.cAttributes[0].offset = kMaxVertexBufferArrayStride - sizeof(wgpu::VertexFormat::Float32);
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
// Test attribute offset out of bounds
|
||||
state.cAttributes[0].offset = kMaxVertexBufferArrayStride - 1;
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check the min(4, formatSize) alignment constraint for the offset.
|
||||
|
@ -300,34 +300,34 @@ TEST_F(VertexStateTest, SetOffsetNotAligned) {
|
|||
// Test that for small formats, the offset must be aligned to the format size.
|
||||
state.cAttributes[0].format = wgpu::VertexFormat::Float32;
|
||||
state.cAttributes[0].offset = 4;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
state.cAttributes[0].offset = 2;
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
|
||||
state.cAttributes[0].format = wgpu::VertexFormat::Snorm16x2;
|
||||
state.cAttributes[0].offset = 4;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
state.cAttributes[0].offset = 2;
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
|
||||
state.cAttributes[0].format = wgpu::VertexFormat::Unorm8x2;
|
||||
state.cAttributes[0].offset = 2;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
state.cAttributes[0].offset = 1;
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
|
||||
// Test that for large formts the offset only needs to be aligned to 4.
|
||||
state.cAttributes[0].format = wgpu::VertexFormat::Snorm16x4;
|
||||
state.cAttributes[0].offset = 4;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
state.cAttributes[0].format = wgpu::VertexFormat::Uint32x3;
|
||||
state.cAttributes[0].offset = 4;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
|
||||
state.cAttributes[0].format = wgpu::VertexFormat::Sint32x4;
|
||||
state.cAttributes[0].offset = 4;
|
||||
CreatePipeline(true, state, kDummyVertexShader);
|
||||
CreatePipeline(true, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check attribute offset overflow
|
||||
|
@ -336,7 +336,7 @@ TEST_F(VertexStateTest, SetAttributeOffsetOverflow) {
|
|||
state.vertexBufferCount = 1;
|
||||
state.cVertexBuffers[0].attributeCount = 1;
|
||||
state.cAttributes[0].offset = std::numeric_limits<uint32_t>::max();
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check for some potential underflow in the vertex input validation
|
||||
|
@ -346,7 +346,7 @@ TEST_F(VertexStateTest, VertexFormatLargerThanNonZeroStride) {
|
|||
state.cVertexBuffers[0].arrayStride = 4;
|
||||
state.cVertexBuffers[0].attributeCount = 1;
|
||||
state.cAttributes[0].format = wgpu::VertexFormat::Float32x4;
|
||||
CreatePipeline(false, state, kDummyVertexShader);
|
||||
CreatePipeline(false, state, kPlaceholderVertexShader);
|
||||
}
|
||||
|
||||
// Check that the vertex format base type must match the shader's variable base type.
|
||||
|
|
|
@ -224,10 +224,10 @@ namespace {
|
|||
|
||||
// Test copying from a buffer to a multi-planar format fails.
|
||||
TEST_F(VideoViewsValidation, B2TCopyAllAspectsFails) {
|
||||
std::vector<uint8_t> dummyData(4, 0);
|
||||
std::vector<uint8_t> placeholderData(4, 0);
|
||||
|
||||
wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
|
||||
device, dummyData.data(), dummyData.size(), wgpu::BufferUsage::CopySrc);
|
||||
device, placeholderData.data(), placeholderData.size(), wgpu::BufferUsage::CopySrc);
|
||||
|
||||
wgpu::Texture dstTexture = CreateVideoTextureForTest(
|
||||
wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
|
||||
|
@ -245,10 +245,10 @@ namespace {
|
|||
|
||||
// Test copying from a buffer to a multi-planar format per plane fails.
|
||||
TEST_F(VideoViewsValidation, B2TCopyPlaneAspectsFails) {
|
||||
std::vector<uint8_t> dummyData(4, 0);
|
||||
std::vector<uint8_t> placeholderData(4, 0);
|
||||
|
||||
wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
|
||||
device, dummyData.data(), dummyData.size(), wgpu::BufferUsage::CopySrc);
|
||||
device, placeholderData.data(), placeholderData.size(), wgpu::BufferUsage::CopySrc);
|
||||
|
||||
wgpu::Texture dstTexture = CreateVideoTextureForTest(
|
||||
wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
|
||||
|
@ -311,13 +311,14 @@ namespace {
|
|||
wgpu::ImageCopyTexture imageCopyTexture =
|
||||
utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
|
||||
|
||||
std::vector<uint8_t> dummyData(4, 0);
|
||||
std::vector<uint8_t> placeholderData(4, 0);
|
||||
wgpu::Extent3D writeSize = {1, 1, 1};
|
||||
|
||||
wgpu::Queue queue = device.GetQueue();
|
||||
|
||||
ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, dummyData.data(),
|
||||
dummyData.size(), &textureDataLayout, &writeSize));
|
||||
ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, placeholderData.data(),
|
||||
placeholderData.size(), &textureDataLayout,
|
||||
&writeSize));
|
||||
}
|
||||
|
||||
// Tests writing into a multi-planar format per plane fails.
|
||||
|
@ -329,13 +330,14 @@ namespace {
|
|||
wgpu::ImageCopyTexture imageCopyTexture =
|
||||
utils::CreateImageCopyTexture(texture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
|
||||
|
||||
std::vector<uint8_t> dummmyData(4, 0);
|
||||
std::vector<uint8_t> placeholderData(4, 0);
|
||||
wgpu::Extent3D writeSize = {1, 1, 1};
|
||||
|
||||
wgpu::Queue queue = device.GetQueue();
|
||||
|
||||
ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, dummmyData.data(),
|
||||
dummmyData.size(), &textureDataLayout, &writeSize));
|
||||
ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, placeholderData.data(),
|
||||
placeholderData.size(), &textureDataLayout,
|
||||
&writeSize));
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
|
|
@ -166,13 +166,13 @@ TEST_F(WireArgumentTests, CStringArgument) {
|
|||
|
||||
wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
|
||||
|
||||
WGPURenderPipeline apiDummyPipeline = api.GetNewRenderPipeline();
|
||||
WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
|
||||
EXPECT_CALL(api,
|
||||
DeviceCreateRenderPipeline(
|
||||
apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
|
||||
return desc->vertex.entryPoint == std::string("main");
|
||||
})))
|
||||
.WillOnce(Return(apiDummyPipeline));
|
||||
.WillOnce(Return(apiPlaceholderPipeline));
|
||||
|
||||
FlushClient();
|
||||
}
|
||||
|
@ -248,7 +248,7 @@ TEST_F(WireArgumentTests, StructureOfValuesArgument) {
|
|||
|
||||
wgpuDeviceCreateSampler(device, &descriptor);
|
||||
|
||||
WGPUSampler apiDummySampler = api.GetNewSampler();
|
||||
WGPUSampler apiPlaceholderSampler = api.GetNewSampler();
|
||||
EXPECT_CALL(api, DeviceCreateSampler(
|
||||
apiDevice, MatchesLambda([](const WGPUSamplerDescriptor* desc) -> bool {
|
||||
return desc->nextInChain == nullptr &&
|
||||
|
@ -261,7 +261,7 @@ TEST_F(WireArgumentTests, StructureOfValuesArgument) {
|
|||
desc->compare == WGPUCompareFunction_Never &&
|
||||
desc->lodMinClamp == kLodMin && desc->lodMaxClamp == kLodMax;
|
||||
})))
|
||||
.WillOnce(Return(apiDummySampler));
|
||||
.WillOnce(Return(apiPlaceholderSampler));
|
||||
|
||||
FlushClient();
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) {
|
|||
|
||||
wgpuDeviceCreatePipelineLayout(device, &descriptor);
|
||||
|
||||
WGPUPipelineLayout apiDummyLayout = api.GetNewPipelineLayout();
|
||||
WGPUPipelineLayout apiPlaceholderLayout = api.GetNewPipelineLayout();
|
||||
EXPECT_CALL(api, DeviceCreatePipelineLayout(
|
||||
apiDevice,
|
||||
MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool {
|
||||
|
@ -290,7 +290,7 @@ TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) {
|
|||
desc->bindGroupLayoutCount == 1 &&
|
||||
desc->bindGroupLayouts[0] == apiBgl;
|
||||
})))
|
||||
.WillOnce(Return(apiDummyLayout));
|
||||
.WillOnce(Return(apiPlaceholderLayout));
|
||||
|
||||
FlushClient();
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace {
|
|||
|
||||
// Test that commands are not received if the client disconnects.
|
||||
TEST_F(WireDisconnectTests, CommandsAfterDisconnect) {
|
||||
// Sanity check that commands work at all.
|
||||
// Check that commands work at all.
|
||||
wgpuDeviceCreateCommandEncoder(device, nullptr);
|
||||
|
||||
WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
|
||||
|
@ -49,7 +49,7 @@ TEST_F(WireDisconnectTests, CommandsAfterDisconnect) {
|
|||
// Test that commands that are serialized before a disconnect but flushed
|
||||
// after are received.
|
||||
TEST_F(WireDisconnectTests, FlushAfterDisconnect) {
|
||||
// Sanity check that commands work at all.
|
||||
// Check that commands work at all.
|
||||
wgpuDeviceCreateCommandEncoder(device, nullptr);
|
||||
|
||||
// Disconnect.
|
||||
|
|
|
@ -38,8 +38,8 @@ TEST_F(WireInjectTextureTests, CallAfterReserveInject) {
|
|||
reservation.deviceId, reservation.deviceGeneration));
|
||||
|
||||
wgpuTextureCreateView(reservation.texture, nullptr);
|
||||
WGPUTextureView apiDummyView = api.GetNewTextureView();
|
||||
EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr)).WillOnce(Return(apiDummyView));
|
||||
WGPUTextureView apiPlaceholderView = api.GetNewTextureView();
|
||||
EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr)).WillOnce(Return(apiPlaceholderView));
|
||||
FlushClient();
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ TEST_F(WireOptionalTests, OptionalObjectValue) {
|
|||
|
||||
wgpuDeviceCreateBindGroup(device, &bgDesc);
|
||||
|
||||
WGPUBindGroup apiDummyBindGroup = api.GetNewBindGroup();
|
||||
WGPUBindGroup apiPlaceholderBindGroup = api.GetNewBindGroup();
|
||||
EXPECT_CALL(api, DeviceCreateBindGroup(
|
||||
apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool {
|
||||
return desc->nextInChain == nullptr && desc->entryCount == 1 &&
|
||||
|
@ -58,7 +58,7 @@ TEST_F(WireOptionalTests, OptionalObjectValue) {
|
|||
desc->entries[0].buffer == nullptr &&
|
||||
desc->entries[0].textureView == nullptr;
|
||||
})))
|
||||
.WillOnce(Return(apiDummyBindGroup));
|
||||
.WillOnce(Return(apiPlaceholderBindGroup));
|
||||
|
||||
FlushClient();
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ TEST_F(WireOptionalTests, OptionalStructPointer) {
|
|||
pipelineDescriptor.depthStencil = &depthStencilState;
|
||||
wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
|
||||
|
||||
WGPURenderPipeline apiDummyPipeline = api.GetNewRenderPipeline();
|
||||
WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
|
||||
EXPECT_CALL(
|
||||
api,
|
||||
DeviceCreateRenderPipeline(
|
||||
|
@ -161,7 +161,7 @@ TEST_F(WireOptionalTests, OptionalStructPointer) {
|
|||
desc->depthStencil->depthBiasSlopeScale == 0.0 &&
|
||||
desc->depthStencil->depthBiasClamp == 0.0;
|
||||
})))
|
||||
.WillOnce(Return(apiDummyPipeline));
|
||||
.WillOnce(Return(apiPlaceholderPipeline));
|
||||
|
||||
FlushClient();
|
||||
|
||||
|
@ -173,7 +173,7 @@ TEST_F(WireOptionalTests, OptionalStructPointer) {
|
|||
apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
|
||||
return desc->depthStencil == nullptr;
|
||||
})))
|
||||
.WillOnce(Return(apiDummyPipeline));
|
||||
.WillOnce(Return(apiPlaceholderPipeline));
|
||||
|
||||
FlushClient();
|
||||
}
|
||||
|
|
|
@ -96,11 +96,11 @@ class D3D12DescriptorHeapTests : public DawnTest {
|
|||
wgpu::ShaderModule mSimpleFSModule;
|
||||
};
|
||||
|
||||
class DummyStagingDescriptorAllocator {
|
||||
class PlaceholderStagingDescriptorAllocator {
|
||||
public:
|
||||
DummyStagingDescriptorAllocator(Device* device,
|
||||
uint32_t descriptorCount,
|
||||
uint32_t allocationsPerHeap)
|
||||
PlaceholderStagingDescriptorAllocator(Device* device,
|
||||
uint32_t descriptorCount,
|
||||
uint32_t allocationsPerHeap)
|
||||
: mAllocator(device,
|
||||
descriptorCount,
|
||||
allocationsPerHeap * descriptorCount,
|
||||
|
@ -899,7 +899,8 @@ TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
|
|||
TEST_P(D3D12DescriptorHeapTests, Single) {
|
||||
constexpr uint32_t kDescriptorCount = 4;
|
||||
constexpr uint32_t kAllocationsPerHeap = 3;
|
||||
DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap);
|
||||
PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
|
||||
kAllocationsPerHeap);
|
||||
|
||||
CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
|
||||
EXPECT_EQ(allocation.GetHeapIndex(), 0u);
|
||||
|
@ -914,7 +915,8 @@ TEST_P(D3D12DescriptorHeapTests, Single) {
|
|||
TEST_P(D3D12DescriptorHeapTests, Sequential) {
|
||||
constexpr uint32_t kDescriptorCount = 4;
|
||||
constexpr uint32_t kAllocationsPerHeap = 3;
|
||||
DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap);
|
||||
PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
|
||||
kAllocationsPerHeap);
|
||||
|
||||
// Allocate |kNumOfHeaps| worth.
|
||||
constexpr uint32_t kNumOfHeaps = 2;
|
||||
|
@ -944,7 +946,8 @@ TEST_P(D3D12DescriptorHeapTests, Sequential) {
|
|||
TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) {
|
||||
constexpr uint32_t kDescriptorCount = 4;
|
||||
constexpr uint32_t kAllocationsPerHeap = 25;
|
||||
DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap);
|
||||
PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
|
||||
kAllocationsPerHeap);
|
||||
|
||||
constexpr uint32_t kNumofHeaps = 10;
|
||||
|
||||
|
@ -987,7 +990,8 @@ TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) {
|
|||
TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) {
|
||||
constexpr uint32_t kDescriptorCount = 4;
|
||||
constexpr uint32_t kAllocationsPerHeap = 25;
|
||||
DummyStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount, kAllocationsPerHeap);
|
||||
PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
|
||||
kAllocationsPerHeap);
|
||||
|
||||
std::list<CPUDescriptorHeapAllocation> list3;
|
||||
std::list<CPUDescriptorHeapAllocation> list5;
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
add_library(dawn_utils STATIC ${DAWN_DUMMY_FILE})
|
||||
add_library(dawn_utils STATIC ${DAWN_PLACEHOLDER_FILE})
|
||||
common_compile_options(dawn_utils)
|
||||
target_sources(dawn_utils PRIVATE
|
||||
"ComboRenderBundleEncoderDescriptor.cpp"
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
// NOTE: This must be included before GLFW/glfw3.h because the latter will
|
||||
// include <vulkan/vulkan.h> and "common/vulkan_platform.h" wants to be
|
||||
// the first header to do so for sanity reasons (e.g. undefining weird
|
||||
// the first header to do so for validity reasons (e.g. undefining weird
|
||||
// macros on Windows and Linux).
|
||||
// clang-format off
|
||||
#include "dawn/common/vulkan_platform.h"
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
namespace utils {
|
||||
|
||||
// The returned CALayer is autoreleased.
|
||||
void* CreateDummyCALayer();
|
||||
void* CreatePlaceholderCALayer();
|
||||
|
||||
} // namespace utils
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
namespace utils {
|
||||
|
||||
void* CreateDummyCALayer() {
|
||||
void* CreatePlaceholderCALayer() {
|
||||
return [CALayer layer];
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ DawnJSONGenerator(
|
|||
RESULT_VARIABLE "DAWN_WIRE_GEN_SOURCES"
|
||||
)
|
||||
|
||||
add_library(dawn_wire ${DAWN_DUMMY_FILE})
|
||||
add_library(dawn_wire ${DAWN_PLACEHOLDER_FILE})
|
||||
common_compile_options(dawn_wire)
|
||||
|
||||
target_compile_definitions(dawn_wire PRIVATE "DAWN_WIRE_IMPLEMENTATION")
|
||||
|
|
|
@ -3410,8 +3410,8 @@ TEST_F(SpvModuleScopeVarParserTest, RegisterInputOutputVars) {
|
|||
|
||||
%300 = OpFunction %void None %voidfn
|
||||
%entry_300 = OpLabel
|
||||
%dummy_300_1 = OpFunctionCall %void %100
|
||||
%dummy_300_2 = OpFunctionCall %void %200
|
||||
%placeholder_300_1 = OpFunctionCall %void %100
|
||||
%placeholder_300_2 = OpFunctionCall %void %200
|
||||
OpReturn
|
||||
OpFunctionEnd
|
||||
|
||||
|
@ -3424,21 +3424,21 @@ TEST_F(SpvModuleScopeVarParserTest, RegisterInputOutputVars) {
|
|||
; Call %100
|
||||
%1100 = OpFunction %void None %voidfn
|
||||
%entry_1100 = OpLabel
|
||||
%dummy_1100_1 = OpFunctionCall %void %100
|
||||
%placeholder_1100_1 = OpFunctionCall %void %100
|
||||
OpReturn
|
||||
OpFunctionEnd
|
||||
|
||||
; Call %200
|
||||
%1200 = OpFunction %void None %voidfn
|
||||
%entry_1200 = OpLabel
|
||||
%dummy_1200_1 = OpFunctionCall %void %200
|
||||
%placeholder_1200_1 = OpFunctionCall %void %200
|
||||
OpReturn
|
||||
OpFunctionEnd
|
||||
|
||||
; Call %300
|
||||
%1300 = OpFunction %void None %voidfn
|
||||
%entry_1300 = OpLabel
|
||||
%dummy_1300_1 = OpFunctionCall %void %300
|
||||
%placeholder_1300_1 = OpFunctionCall %void %300
|
||||
OpReturn
|
||||
OpFunctionEnd
|
||||
|
||||
|
|
|
@ -5,9 +5,9 @@ struct Scene {
|
|||
struct Material {
|
||||
vDiffuseColor : vec4<f32>,
|
||||
vAmbientColor : vec3<f32>,
|
||||
dummy: f32,
|
||||
placeholder: f32,
|
||||
vEmissiveColor : vec3<f32>,
|
||||
dummy2: f32,
|
||||
placeholder2: f32,
|
||||
};
|
||||
|
||||
struct Mesh {
|
||||
|
|
|
@ -11,9 +11,9 @@ struct Scene {
|
|||
struct Material {
|
||||
vec4 vDiffuseColor;
|
||||
vec3 vAmbientColor;
|
||||
float dummy;
|
||||
float placeholder;
|
||||
vec3 vEmissiveColor;
|
||||
float dummy2;
|
||||
float placeholder2;
|
||||
};
|
||||
|
||||
struct Mesh {
|
||||
|
@ -29,9 +29,9 @@ layout(binding = 0) uniform Scene_1 {
|
|||
layout(binding = 1) uniform Material_1 {
|
||||
vec4 vDiffuseColor;
|
||||
vec3 vAmbientColor;
|
||||
float dummy;
|
||||
float placeholder;
|
||||
vec3 vEmissiveColor;
|
||||
float dummy2;
|
||||
float placeholder2;
|
||||
} x_49;
|
||||
|
||||
layout(binding = 2) uniform Mesh_1 {
|
||||
|
|
|
@ -19,9 +19,9 @@ struct Scene {
|
|||
struct Material {
|
||||
/* 0x0000 */ float4 vDiffuseColor;
|
||||
/* 0x0010 */ packed_float3 vAmbientColor;
|
||||
/* 0x001c */ float dummy;
|
||||
/* 0x001c */ float placeholder;
|
||||
/* 0x0020 */ packed_float3 vEmissiveColor;
|
||||
/* 0x002c */ float dummy2;
|
||||
/* 0x002c */ float placeholder2;
|
||||
};
|
||||
|
||||
struct Mesh {
|
||||
|
@ -151,4 +151,3 @@ fragment tint_symbol_3 tint_symbol(const constant Scene* tint_symbol_24 [[buffer
|
|||
wrapper_result.glFragColor_1 = inner_result.glFragColor_1;
|
||||
return wrapper_result;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
OpName %Material "Material"
|
||||
OpMemberName %Material 0 "vDiffuseColor"
|
||||
OpMemberName %Material 1 "vAmbientColor"
|
||||
OpMemberName %Material 2 "dummy"
|
||||
OpMemberName %Material 2 "placeholder"
|
||||
OpMemberName %Material 3 "vEmissiveColor"
|
||||
OpMemberName %Material 4 "dummy2"
|
||||
OpMemberName %Material 4 "placeholder2"
|
||||
OpName %x_49 "x_49"
|
||||
OpName %Mesh "Mesh"
|
||||
OpMemberName %Mesh 0 "visibility"
|
||||
|
|
|
@ -5,9 +5,9 @@ struct Scene {
|
|||
struct Material {
|
||||
vDiffuseColor : vec4<f32>,
|
||||
vAmbientColor : vec3<f32>,
|
||||
dummy : f32,
|
||||
placeholder : f32,
|
||||
vEmissiveColor : vec3<f32>,
|
||||
dummy2 : f32,
|
||||
placeholder2 : f32,
|
||||
}
|
||||
|
||||
struct Mesh {
|
||||
|
|
|
@ -35,20 +35,20 @@ def compile_src(out_dir):
|
|||
shutil.rmtree(out_dir)
|
||||
|
||||
run_tsc_ignore_errors([
|
||||
'--project',
|
||||
os.path.join(webgpu_cts_root_dir, 'tsconfig.json'),
|
||||
'--outDir',
|
||||
"--project",
|
||||
os.path.join(webgpu_cts_root_dir, "tsconfig.json"),
|
||||
"--outDir",
|
||||
out_dir,
|
||||
'--noEmit',
|
||||
'false',
|
||||
'--noEmitOnError',
|
||||
'false',
|
||||
'--declaration',
|
||||
'false',
|
||||
'--sourceMap',
|
||||
'false',
|
||||
'--target',
|
||||
'ES2017',
|
||||
"--noEmit",
|
||||
"false",
|
||||
"--noEmitOnError",
|
||||
"false",
|
||||
"--declaration",
|
||||
"false",
|
||||
"--sourceMap",
|
||||
"false",
|
||||
"--target",
|
||||
"ES2017",
|
||||
])
|
||||
|
||||
|
||||
|
@ -59,43 +59,43 @@ def compile_src_for_node(out_dir, additional_args=None, clean=True):
|
|||
shutil.rmtree(out_dir)
|
||||
|
||||
args = [
|
||||
'--project',
|
||||
os.path.join(webgpu_cts_root_dir, 'node.tsconfig.json'),
|
||||
'--outDir',
|
||||
"--project",
|
||||
os.path.join(webgpu_cts_root_dir, "node.tsconfig.json"),
|
||||
"--outDir",
|
||||
out_dir,
|
||||
'--noEmit',
|
||||
'false',
|
||||
'--noEmitOnError',
|
||||
'false',
|
||||
'--declaration',
|
||||
'false',
|
||||
'--sourceMap',
|
||||
'false',
|
||||
'--target',
|
||||
'ES6',
|
||||
"--noEmit",
|
||||
"false",
|
||||
"--noEmitOnError",
|
||||
"false",
|
||||
"--declaration",
|
||||
"false",
|
||||
"--sourceMap",
|
||||
"false",
|
||||
"--target",
|
||||
"ES6",
|
||||
]
|
||||
args.extend(additional_args)
|
||||
|
||||
run_tsc_ignore_errors(args)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
print('Usage: compile_src.py GEN_DIR')
|
||||
print("Usage: compile_src.py GEN_DIR")
|
||||
sys.exit(1)
|
||||
|
||||
gen_dir = sys.argv[1]
|
||||
|
||||
# Compile the CTS src.
|
||||
compile_src(os.path.join(gen_dir, 'src'))
|
||||
compile_src_for_node(os.path.join(gen_dir, 'src-node'))
|
||||
compile_src(os.path.join(gen_dir, "src"))
|
||||
compile_src_for_node(os.path.join(gen_dir, "src-node"))
|
||||
|
||||
# Run gen_listings.js to overwrite the dummy src/webgpu/listings.js created
|
||||
# Run gen_listings.js to overwrite the placeholder src/webgpu/listings.js created
|
||||
# from transpiling src/
|
||||
RunNode([
|
||||
os.path.join(gen_dir, 'src-node', 'common', 'tools',
|
||||
'gen_listings.js'),
|
||||
'--no-validate',
|
||||
os.path.join(gen_dir, 'src'),
|
||||
os.path.join(gen_dir, 'src-node', 'webgpu'),
|
||||
os.path.join(gen_dir, "src-node", "common", "tools",
|
||||
"gen_listings.js"),
|
||||
"--no-validate",
|
||||
os.path.join(gen_dir, "src"),
|
||||
os.path.join(gen_dir, "src-node", "webgpu"),
|
||||
])
|
||||
|
|
Loading…
Reference in New Issue