Initial commit of all the NXT integration.

More like squashed history, contributors were:
 - Kai Ninomiya
 - Corentin Wallez
This commit is contained in:
Corentin Wallez 2017-04-20 14:38:20 -04:00
commit f07e3bd4c9
134 changed files with 24658 additions and 0 deletions

27
.gitmodules vendored Normal file
View File

@ -0,0 +1,27 @@
[submodule "external/glfw"]
path = third_party/glfw
url = https://github.com/glfw/glfw.git
[submodule "external/googletest"]
path = third_party/googletest
url = https://github.com/google/googletest.git
[submodule "external/glslang"]
path = third_party/glslang
url = https://github.com/google/glslang.git
[submodule "external/shaderc"]
path = third_party/shaderc
url = https://github.com/google/shaderc.git
[submodule "external/spirv-tools"]
path = third_party/spirv-tools
url = https://github.com/KhronosGroup/SPIRV-Tools.git
[submodule "external/spirv-headers"]
path = third_party/spirv-headers
url = https://github.com/KhronosGroup/SPIRV-Headers.git
[submodule "external/spirv-cross"]
path = third_party/spirv-cross
url = https://github.com/KhronosGroup/SPIRV-Cross.git
[submodule "external/stb"]
path = third_party/stb
url = https://github.com/nothings/stb.git
[submodule "external/glm"]
path = third_party/glm
url = https://github.com/g-truc/glm.git

6
AUTHORS Normal file
View File

@ -0,0 +1,6 @@
# This is the list of NXT authors for copyright purposes.
#
# This does not necessarily list everyone who has contributed code, since in
# some cases, their employer may be the copyright holder. To see the full list
# of contributors, see the revision history in source control.
Google Inc.

75
CMakeLists.txt Normal file
View File

@ -0,0 +1,75 @@
# Copyright 2017 The NXT Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cmake_minimum_required(VERSION 2.8)
project(nxt C CXX)
function(SetCXX14 Target)
if(MSVC)
set_property(TARGET ${Target} APPEND PROPERTY COMPILE_OPTIONS "/std:c++14")
else()
set_property(TARGET ${Target} APPEND PROPERTY COMPILE_OPTIONS "-std=c++14")
endif()
endfunction()
function(SetPIC Target)
if(MSVC)
else()
set_property(TARGET ${Target} APPEND PROPERTY COMPILE_OPTIONS "-fPIC")
endif()
endfunction()
add_subdirectory(third_party)
add_subdirectory(generator)
set(INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/src/include)
Generate(
LIB_NAME nxt
LIB_TYPE SHARED
PRINT_NAME libNXT
COMMAND_LINE_ARGS
${GENERATOR_COMMON_ARGS}
-T nxt
)
target_include_directories(nxt PUBLIC ${GENERATED_DIR})
Generate(
LIB_NAME nxtcpp
LIB_TYPE SHARED
PRINT_NAME libNXT++
COMMAND_LINE_ARGS
${GENERATOR_COMMON_ARGS}
-T nxtcpp
)
target_include_directories(nxtcpp PUBLIC ${GENERATED_DIR} PUBLIC ${INCLUDE_DIR})
target_link_libraries(nxtcpp nxt)
SetCXX14(nxtcpp)
Generate(
LIB_NAME mock_nxt
LIB_TYPE SHARED
PRINT_NAME libMockNXT
COMMAND_LINE_ARGS
${GENERATOR_COMMON_ARGS}
-T mock_nxt
)
target_include_directories(mock_nxt PUBLIC ${GENERATED_DIR})
target_link_libraries(mock_nxt nxt gtest)
SetCXX14(mock_nxt)
add_subdirectory(src/backend)
add_subdirectory(src/wire)
add_subdirectory(src/tests)
add_subdirectory(examples)

24
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,24 @@
# How to contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution,
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.

202
LICENSE Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

121
README.md Normal file
View File

@ -0,0 +1,121 @@
# NXT, standalone part
NXT is an unoriginal name for Chromiums investigation and prototyping of a NeXT generation graphics API for the Web. This repository contains a native library implementing NXT on multiple backends, as well as some code generators used for the integration in Chromium. NXT is not an official Google product.
We focused on efforts on two axis:
- An investigation of the constraints coming from the Web and in particular portability, for which we looked at the intersection of the designs of D3D12, Metal, Vulkan, OpenGL and D3D11. See links to some of our investigations below.
- A prototype API inspired by all of D3D12, Metal and Vulkan, but none in particular. The API works on two backends: OpenGL and Metal and is usable from native code (think WebAssembly) and from Javascript inside of Chrome. Our focus was not to have a complete API but to show the breadth of potential usage.
Were making our investigation and prototype public to provide another example for the upcoming discussion in the “GPU for the Web” W3C community group.
NXT currently has the following features:
- Command buffers, graphics and compute pipelines
- Textures, samplers, vertex / index / uniform / storage buffers.
- Descriptor sets (called bind groups) and push constants
- SPIRV for the shading language
- Validation
NXT is missing a lot of things to be usable for anything else than prototyping:
- Render-targets / render passes
- Most of the fixed function pipeline state
- Barriers / resource transitions and GPU - CPU synchronization
- Buffer mapping
- ...
We chose to use SPIRV in our prototype because it was the only language that had translators to other shading languages, thanks to SPIRV-Cross which saved us a ton of work. [SPIRV-Cross](https://github.com/KhronosGroup/SPIRV-Cross) doesnt have an HLSL backend so we didnt attempt a D3D12 backend (and D3D11 was too limiting for our prototype API). Only the Metal and the OpenGL backends are functional at this time. The OpenGL backend let us integrate in Chromium very easily.
## Links
Some of the investigations we made on the design of potential backend APIs:
- [Binding model investigation](https://drive.google.com/open?id=1_xeTnk6DlN7YmePQQAlnHndA043rgwBzUYtatk5y7kQ)
- [Data uploads investigation](https://drive.google.com/open?id=1Mi9l14zG8HzJ5Z6107SdPhON0mq4d-3SUI8iS631nek)
- [Resource creation investigation](https://drive.google.com/open?id=1hK1SkTFkXJXPjyla0EEl1fOIwJSc6T41AV2mGiovyFU)
- [Vertex setup investigation](https://drive.google.com/open?id=1SIUpdg-6Xm5FFF1ktdBfnR5oRKjyPAfXir7Drui4cYM)
[Another presentation](https://drive.google.com/open?id=1mLQEM__twfivV7nJLDBIomS9pegOYkJQWyM6lTse4PQ) about our work with more details on the architecture of the prototype, and a [video](https://youtu.be/ThlZ5K4hJvo) of the demo we showed.
TODO: add a link to the NXT-chromium repo once it is uploaded.
## Key elements of the prototypes architecture
### Builder pattern for object creation
In NXT, object creation is done through builder objects that gather initialization parameters with a fluent interface and return the initialized object when GetResult() is called.
In addition to the improved type-safety and subjective prettiness compared to giant constructors, this style enables additional optimizations. For example this removes the need for any check for an object being built and allows backend to forget parameters it might not care about.
Heres an example of buffer creation:
```cpp
nxt::Buffer buffer = device.CreateBufferBuilder()
.SetUsage(nxt::BufferUsageBit::Uniform | nxt::BufferUsageBit::Mapped)
.SetSize(42)
.GetResult();
```
### The wire client-server and error handling
What we call the wire is an API command buffer for NXT. The name was chosen to avoid conflict with the “command buffer” concept in graphics APIs.
Originally OpenGL was designed as a client-server architecture with asynchronous errors and objects that could be used by the client before they were created by server. Over time more client state-tracking was added but the core asynchronous structure remained. This enabled OpenGL ES 2 / WebGL to be implemented in Chromium in which the web page and the GPU driver live in different processes. In addition to security this separation helps with performance in CPU-bound WebGL apps.
For this reason we built NXT as a network-transparent API so that it could integrate nicely in the Chromium architecture, and we believe any next-generation Web API would have to be network-transparent too.
In NXT, as in OpenGL, API objects can be used immediately after they have been created on the client, even if the server hasnt seen the creation command yet. If object creation succeeds, everything happens transparently otherwise the object is tagged as being an error. NXT calls with error-tagged objects use the following rules:
- Functions result in a noop.
- Functions returning an object return an error value.
- Builder methods mark the builder as an error value.
The idea is that a whole bunch of object creation can be done when the application loads, then all the objects checked once for any error. The concept presented above is similar to [promise pipelining](http://www.erights.org/elib/distrib/pipeline.html) and to the [Maybe monad](https://en.wikipedia.org/wiki/Monad_(functional_programming)#The_Maybe_monad).
Currently the wire only has client to server communication and there is no way to know the error status of objects or read API data like the content of buffers. In our prototype the wire is responsible for object lifetime validation.
### Code generation
Our prototype heavily relies on code generation. It greatly improved iteration time on the API as the generators kept the Javascript bindings, IDL files, wire, C++ bindings and friends up to date. But it reduced flexibility in the API shape as adding as changing the shape required modifying all generators in non-trivial ways.
For example, NXT can only return objects which prevents mapping buffers or even reading back single pixel values. There is currently no way to know the error status on the client side. These improvements, and more, are planned, and contributions are welcome.
Other generators include:
- A C-header with the definition of nxtProcTable that is the “real” underlying NXT API exposed by the backends.
- Glue code generating nxtProcTable for a backend with simple validation included (enum value checks etc.)
- A mock API for testing
## Structure of the code
Here are the main files and directories:
```
/next.json - the JSON file describing the API that is used by the code generators
/examples - example code that was also used for end2end testing (it is not possible to do automated testing without being able to read back data)
/generator - The code generator and its templates
/generator/templates - The code generator templates
/generator/templates/blink - Templates used in the integration with Chromium
/src - Non-generator code for the ANGLE-like library
/src/backend
/src/backend/common - Handles all the state tracking and validation
/src/backend/metal - the Metal backend
/src/backend/opengl - the OpenGL backend
/src/wire - Glue code and interfaces for the wire
/third_party - external dependencies
```
## Getting and building the code
NXT standalone is a CMake project with git submodules. To download and build it, do the following:
```sh
git clone --recursive <insert github git repo url here>
cd <directory name>
mkdir build && cd build
cmake ..
make
# Run executables in examples/, --help will provide the options to choose the backend (compute only works on Metal on OSX) and the command buffer.
```
It is currently known to compile on Linux and OSX, and has some warnings on Windows when using MSVC (it doesnt handle code reachability in enum class switches correctly).

163
examples/Animometer.cpp Normal file
View File

@ -0,0 +1,163 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Utils.h"
#include <cstdlib>
#include <cstdio>
#include <unistd.h>
#include <vector>
nxt::Device device;
nxt::Queue queue;
nxt::Pipeline pipeline;
float RandomFloat(float min, float max) {
float zeroOne = rand() / float(RAND_MAX);
return zeroOne * (max - min) + min;
}
struct ShaderData {
float scale;
float time;
float offsetX;
float offsetY;
float scalar;
float scalarOffset;
};
static std::vector<ShaderData> shaderData;
void init() {
nxtProcTable procs;
GetProcTableAndDevice(&procs, &device);
nxtSetProcs(&procs);
queue = device.CreateQueueBuilder().GetResult();
nxt::ShaderModule vsModule = CreateShaderModule(device, nxt::ShaderStage::Vertex, R"(
#version 450
layout(push_constant) uniform ConstantsBlock {
float scale;
float time;
float offsetX;
float offsetY;
float scalar;
float scalarOffset;
} c;
out vec4 v_color;
const vec4 positions[3] = vec4[3](
vec4( 0.0f, 0.1f, 0.0f, 1.0f),
vec4(-0.1f, -0.1f, 0.0f, 1.0f),
vec4( 0.1f, -0.1f, 0.0f, 1.0f)
);
const vec4 colors[3] = vec4[3](
vec4(1.0f, 0.0f, 0.0f, 1.0f),
vec4(0.0f, 1.0f, 0.0f, 1.0f),
vec4(0.0f, 0.0f, 1.0f, 1.0f)
);
void main() {
vec4 position = positions[gl_VertexIndex];
vec4 color = colors[gl_VertexIndex];
float fade = mod(c.scalarOffset + c.time * c.scalar / 10.0, 1.0);
if (fade < 0.5) {
fade = fade * 2.0;
} else {
fade = (1.0 - fade) * 2.0;
}
float xpos = position.x * c.scale;
float ypos = position.y * c.scale;
float angle = 3.14159 * 2.0 * fade;
float xrot = xpos * cos(angle) - ypos * sin(angle);
float yrot = xpos * sin(angle) + ypos * cos(angle);
xpos = xrot + c.offsetX;
ypos = yrot + c.offsetY;
v_color = vec4(fade, 1.0 - fade, 0.0, 1.0) + color;
gl_Position = vec4(xpos, ypos, 0.0, 1.0);
})"
);
nxt::ShaderModule fsModule = CreateShaderModule(device, nxt::ShaderStage::Fragment, R"(
#version 450
out vec4 fragColor;
in vec4 v_color;
void main() {
fragColor = v_color;
})"
);
pipeline = device.CreatePipelineBuilder()
.SetStage(nxt::ShaderStage::Vertex, vsModule, "main")
.SetStage(nxt::ShaderStage::Fragment, fsModule, "main")
.GetResult();
shaderData.resize(10000);
for (auto& data : shaderData) {
data.scale = RandomFloat(0.2, 0.4);
data.time = 0.0;
data.offsetX = RandomFloat(-0.9, 0.9);
data.offsetY = RandomFloat(-0.9, 0.9);
data.scalar = RandomFloat(0.5, 2.0);
data.scalarOffset = RandomFloat(0.0, 10.0);
}
}
void frame() {
static int f = 0;
f++;
size_t i = 0;
std::vector<nxt::CommandBuffer> commands(50);
for (int j = 0; j < 50; j++) {
nxt::CommandBufferBuilder builder = device.CreateCommandBufferBuilder()
.SetPipeline(pipeline)
.Clone();
for (int k = 0; k < 200; k++) {
shaderData[i].time = f / 60.0f;
builder.SetPushConstants(nxt::ShaderStageBit::Vertex, 0, 6, reinterpret_cast<uint32_t*>(&shaderData[i]))
.DrawArrays(3, 1, 0, 0);
i++;
}
commands[j] = builder.GetResult();
}
queue.Submit(50, commands.data());
SwapBuffers();
fprintf(stderr, "frame %i\n", f);
}
int main(int argc, const char* argv[]) {
if (!InitUtils(argc, argv)) {
return 1;
}
init();
while (!ShouldQuit()) {
frame();
usleep(16000);
}
// TODO release stuff
}

34
examples/BackendBinding.h Normal file
View File

@ -0,0 +1,34 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef UTILS_BACKENDBINDING_H_
#define UTILS_BACKENDBINDING_H_
#include <nxt/nxt.h>
struct GLFWwindow;
class BackendBinding {
public:
virtual void SetupGLFWWindowHints() = 0;
virtual void GetProcAndDevice(nxtProcTable* procs, nxtDevice* device) = 0;
virtual void SwapBuffers() = 0;
void SetWindow(GLFWwindow* window) {this->window = window;}
protected:
GLFWwindow* window = nullptr;
};
#endif // UTILS_BACKENDBINDING_H_

71
examples/CMakeLists.txt Normal file
View File

@ -0,0 +1,71 @@
# Copyright 2017 The NXT Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
list(APPEND UTILS_SOURCES
Utils.h
Utils.cpp
BackendBinding.h
)
if (APPLE)
list(APPEND UTILS_SOURCES
MetalBinding.mm
)
endif()
add_library(utils SHARED ${UTILS_SOURCES})
target_link_libraries(utils nxt_backend nxt_wire shaderc nxtcpp nxt)
SetCXX14(utils)
add_executable(CHelloTriangle HelloTriangle.c)
target_link_libraries(CHelloTriangle utils)
add_executable(CppHelloTriangle HelloTriangle.cpp)
target_link_libraries(CppHelloTriangle utils)
SetCXX14(CppHelloTriangle)
add_executable(ComputeBoids ComputeBoids.cpp)
target_link_libraries(ComputeBoids utils)
target_include_directories(ComputeBoids PUBLIC ../ ${GLM_INCLUDE_DIR})
SetCXX14(ComputeBoids)
add_executable(HelloVertices HelloVertices.cpp)
target_link_libraries(HelloVertices utils)
SetCXX14(HelloVertices)
add_executable(HelloInstancing HelloInstancing.cpp)
target_link_libraries(HelloInstancing utils)
SetCXX14(HelloInstancing)
add_executable(HelloIndices HelloIndices.cpp)
target_link_libraries(HelloIndices utils)
SetCXX14(HelloIndices)
add_executable(HelloUBO HelloUBO.cpp)
target_link_libraries(HelloUBO utils)
SetCXX14(HelloUBO)
add_executable(HelloCompute HelloCompute.cpp)
target_link_libraries(HelloCompute utils)
SetCXX14(HelloCompute)
add_executable(Animometer Animometer.cpp)
target_link_libraries(Animometer utils)
SetCXX14(Animometer)
add_executable(SpirvTest SpirvTest.cpp)
target_link_libraries(SpirvTest shaderc spirv-cross nxtcpp)
SetCXX14(SpirvTest)
add_subdirectory(glTFViewer)

324
examples/ComputeBoids.cpp Normal file
View File

@ -0,0 +1,324 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Utils.h"
#include <array>
#include <cstring>
#include <random>
#include <unistd.h>
#include <glm/glm.hpp>
nxt::Device device;
nxt::Queue queue;
nxt::Buffer modelBuffer;
std::array<nxt::Buffer, 2> particleBuffers;
nxt::Pipeline renderPipeline;
nxt::Buffer updateParams;
nxt::Pipeline updatePipeline;
std::array<nxt::BindGroup, 2> updateBGs;
std::array<nxt::CommandBuffer, 2> commandBuffers;
size_t pingpong = 0;
static const uint32_t kNumParticles = 1000;
struct Particle {
glm::vec2 pos;
glm::vec2 vel;
};
struct SimParams {
float deltaT;
float rule1Distance;
float rule2Distance;
float rule3Distance;
float rule1Scale;
float rule2Scale;
float rule3Scale;
int particleCount;
};
void initBuffers() {
glm::vec2 model[3] = {
{-0.01, -0.02},
{0.01, -0.02},
{0.00, 0.02},
};
modelBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Vertex)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(model))
.GetResult();
modelBuffer.SetSubData(0, sizeof(model) / sizeof(uint32_t),
reinterpret_cast<uint32_t*>(model));
modelBuffer.FreezeUsage(nxt::BufferUsageBit::Vertex);
SimParams params = { 0.04, 0.1, 0.025, 0.025, 0.02, 0.05, 0.005, kNumParticles };
updateParams = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Uniform)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(SimParams))
.GetResult();
updateParams.SetSubData(0, sizeof(SimParams) / sizeof(uint32_t),
reinterpret_cast<uint32_t*>(&params));
updateParams.FreezeUsage(nxt::BufferUsageBit::Uniform);
std::vector<Particle> initialParticles(kNumParticles);
{
std::mt19937 generator;
std::uniform_real_distribution<float> dist(-1.0f, 1.0f);
for (auto& p : initialParticles)
{
p.pos = glm::vec2(dist(generator), dist(generator));
p.vel = glm::vec2(dist(generator), dist(generator)) * 0.1f;
}
}
for (int i = 0; i < 2; i++) {
particleBuffers[i] = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Vertex | nxt::BufferUsageBit::Storage)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(Particle) * kNumParticles)
.GetResult();
particleBuffers[i].SetSubData(0,
sizeof(Particle) * kNumParticles / sizeof(uint32_t),
reinterpret_cast<uint32_t*>(initialParticles.data()));
}
}
void initRender() {
nxt::ShaderModule vsModule = CreateShaderModule(device, nxt::ShaderStage::Vertex, R"(
#version 450
layout(location = 0) in vec2 a_particlePos;
layout(location = 1) in vec2 a_particleVel;
layout(location = 2) in vec2 a_pos;
void main() {
float angle = -atan(a_particleVel.x, a_particleVel.y);
vec2 pos = vec2(a_pos.x * cos(angle) - a_pos.y * sin(angle),
a_pos.x * sin(angle) + a_pos.y * cos(angle));
gl_Position = vec4(pos + a_particlePos, 0, 1);
}
)");
nxt::ShaderModule fsModule = CreateShaderModule(device, nxt::ShaderStage::Fragment, R"(
#version 450
out vec4 fragColor;
void main() {
fragColor = vec4(1.0);
}
)");
nxt::InputState inputState = device.CreateInputStateBuilder()
.SetAttribute(0, 0, nxt::VertexFormat::FloatR32G32, offsetof(Particle, pos))
.SetAttribute(1, 0, nxt::VertexFormat::FloatR32G32, offsetof(Particle, vel))
.SetInput(0, sizeof(Particle), nxt::InputStepMode::Instance)
.SetAttribute(2, 1, nxt::VertexFormat::FloatR32G32, 0)
.SetInput(1, sizeof(glm::vec2), nxt::InputStepMode::Vertex)
.GetResult();
renderPipeline = device.CreatePipelineBuilder()
.SetStage(nxt::ShaderStage::Vertex, vsModule, "main")
.SetStage(nxt::ShaderStage::Fragment, fsModule, "main")
.SetInputState(inputState)
.GetResult();
}
void initSim() {
nxt::ShaderModule module = CreateShaderModule(device, nxt::ShaderStage::Compute, R"(
#version 450
struct Particle {
vec2 pos;
vec2 vel;
};
layout(std140, set = 0, binding = 0) uniform SimParams {
float deltaT;
float rule1Distance;
float rule2Distance;
float rule3Distance;
float rule1Scale;
float rule2Scale;
float rule3Scale;
int particleCount;
} params;
layout(std140, set = 0, binding = 1) buffer ParticlesA {
Particle particlesA[1000];
};
layout(std140, set = 0, binding = 2) buffer ParticlesB {
Particle particlesB[1000];
};
void main() {
// https://github.com/austinEng/Project6-Vulkan-Flocking/blob/master/data/shaders/computeparticles/particle.comp
uint index = gl_GlobalInvocationID.x;
if (index >= params.particleCount) { return; }
vec2 vPos = particlesA[index].pos;
vec2 vVel = particlesA[index].vel;
vec2 cMass = vec2(0.0, 0.0);
vec2 cVel = vec2(0.0, 0.0);
vec2 colVel = vec2(0.0, 0.0);
int cMassCount = 0;
int cVelCount = 0;
vec2 pos;
vec2 vel;
for (int i = 0; i < params.particleCount; ++i) {
if (i == index) { continue; }
pos = particlesA[i].pos.xy;
vel = particlesA[i].vel.xy;
if (distance(pos, vPos) < params.rule1Distance) {
cMass += pos;
cMassCount++;
}
if (distance(pos, vPos) < params.rule2Distance) {
colVel -= (pos - vPos);
}
if (distance(pos, vPos) < params.rule3Distance) {
cVel += vel;
cVelCount++;
}
}
if (cMassCount > 0) {
cMass = cMass / cMassCount - vPos;
}
if (cVelCount > 0) {
cVel = cVel / cVelCount;
}
vVel += cMass * params.rule1Scale + colVel * params.rule2Scale + cVel * params.rule3Scale;
// clamp velocity for a more pleasing simulation.
vVel = normalize(vVel) * clamp(length(vVel), 0.0, 0.1);
// kinematic update
vPos += vVel * params.deltaT;
// Wrap around boundary
if (vPos.x < -1.0) vPos.x = 1.0;
if (vPos.x > 1.0) vPos.x = -1.0;
if (vPos.y < -1.0) vPos.y = 1.0;
if (vPos.y > 1.0) vPos.y = -1.0;
particlesB[index].pos = vPos;
// Write back
particlesB[index].vel = vVel;
}
)");
nxt::BindGroupLayout bgl = device.CreateBindGroupLayoutBuilder()
.SetBindingsType(nxt::ShaderStageBit::Compute, nxt::BindingType::UniformBuffer, 0, 1)
.SetBindingsType(nxt::ShaderStageBit::Compute, nxt::BindingType::StorageBuffer, 1, 2)
.GetResult();
nxt::PipelineLayout pl = device.CreatePipelineLayoutBuilder()
.SetBindGroupLayout(0, bgl)
.GetResult();
updatePipeline = device.CreatePipelineBuilder()
.SetLayout(pl)
.SetStage(nxt::ShaderStage::Compute, module, "main")
.GetResult();
nxt::BufferView updateParamsView = updateParams.CreateBufferViewBuilder()
.SetExtent(0, sizeof(SimParams))
.GetResult();
std::array<nxt::BufferView, 2> views;
for (uint32_t i = 0; i < 2; ++i) {
views[i] = particleBuffers[i].CreateBufferViewBuilder()
.SetExtent(0, kNumParticles * sizeof(Particle))
.GetResult();
}
for (uint32_t i = 0; i < 2; ++i) {
updateBGs[i] = device.CreateBindGroupBuilder()
.SetLayout(bgl)
.SetUsage(nxt::BindGroupUsage::Frozen)
.SetBufferViews(0, 1, &updateParamsView)
.SetBufferViews(1, 1, &views[i])
.SetBufferViews(2, 1, &views[(i + 1) % 2])
.GetResult();
}
}
void initCommandBuffers() {
static const uint32_t zeroOffsets[1] = {0};
for (int i = 0; i < 2; ++i) {
auto& bufferSrc = particleBuffers[i];
auto& bufferDst = particleBuffers[(i + 1) % 2];
commandBuffers[i] = device.CreateCommandBufferBuilder()
.SetPipeline(updatePipeline)
.TransitionBufferUsage(bufferSrc, nxt::BufferUsageBit::Storage)
.TransitionBufferUsage(bufferDst, nxt::BufferUsageBit::Storage)
.SetBindGroup(0, updateBGs[i])
.Dispatch(kNumParticles, 1, 1)
.SetPipeline(renderPipeline)
.TransitionBufferUsage(bufferDst, nxt::BufferUsageBit::Vertex)
.SetVertexBuffers(0, 1, &bufferDst, zeroOffsets)
.SetVertexBuffers(1, 1, &modelBuffer, zeroOffsets)
.DrawArrays(3, kNumParticles, 0, 0)
.GetResult();
}
}
void init() {
nxtProcTable procs;
GetProcTableAndDevice(&procs, &device);
nxtSetProcs(&procs);
queue = device.CreateQueueBuilder().GetResult();
initBuffers();
initRender();
initSim();
initCommandBuffers();
}
void frame() {
queue.Submit(1, &commandBuffers[pingpong]);
SwapBuffers();
pingpong = (pingpong + 1) % 2;
}
int main(int argc, const char* argv[]) {
if (!InitUtils(argc, argv)) {
return 1;
}
init();
while (!ShouldQuit()) {
frame();
usleep(16000);
}
// TODO release stuff
}

154
examples/HelloCompute.cpp Normal file
View File

@ -0,0 +1,154 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Utils.h"
#include <string.h>
#include <unistd.h>
nxt::Device device;
nxt::Queue queue;
nxt::Buffer buffer;
nxt::Pipeline renderPipeline;
nxt::BindGroup renderBindGroup;
nxt::Pipeline computePipeline;
nxt::BindGroup computeBindGroup;
void init() {
nxtProcTable procs;
GetProcTableAndDevice(&procs, &device);
nxtSetProcs(&procs);
queue = device.CreateQueueBuilder().GetResult();
struct {uint32_t a; float b;} s;
memset(&s, sizeof(s), 0);
buffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Uniform | nxt::BufferUsageBit::Storage)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(s))
.GetResult();
buffer.SetSubData(0, sizeof(s) / sizeof(uint32_t), reinterpret_cast<uint32_t*>(&s));
nxt::BufferView view = buffer.CreateBufferViewBuilder()
.SetExtent(0, sizeof(s))
.GetResult();
{
nxt::ShaderModule module = CreateShaderModule(device, nxt::ShaderStage::Compute, R"(
#version 450
layout(set = 0, binding = 0) buffer myBlock {
int a;
float b;
} myStorage;
void main() {
myStorage.a = (myStorage.a + 1) % 256;
myStorage.b = mod((myStorage.b + 0.02), 1.0);
})"
);
nxt::BindGroupLayout bgl = device.CreateBindGroupLayoutBuilder()
.SetBindingsType(nxt::ShaderStageBit::Compute, nxt::BindingType::StorageBuffer, 0, 1)
.GetResult();
nxt::PipelineLayout pl = device.CreatePipelineLayoutBuilder()
.SetBindGroupLayout(0, bgl)
.GetResult();
computePipeline = device.CreatePipelineBuilder()
.SetLayout(pl)
.SetStage(nxt::ShaderStage::Compute, module, "main")
.GetResult();
computeBindGroup = device.CreateBindGroupBuilder()
.SetLayout(bgl)
.SetUsage(nxt::BindGroupUsage::Frozen)
.SetBufferViews(0, 1, &view)
.GetResult();
}
{
nxt::ShaderModule vsModule = CreateShaderModule(device, nxt::ShaderStage::Vertex, R"(
#version 450
const vec2 pos[3] = vec2[3](vec2(0.0f, 0.5f), vec2(-0.5f, -0.5f), vec2(0.5f, -0.5f));
void main() {
gl_Position = vec4(pos[gl_VertexIndex], 0.5, 1.0);
})"
);
nxt::ShaderModule fsModule = CreateShaderModule(device, nxt::ShaderStage::Fragment, R"(
#version 450
layout(set = 0, binding = 0) uniform myBlock {
int a;
float b;
} myUbo;
out vec4 fragColor;
void main() {
fragColor = vec4(1.0, myUbo.a / 255.0, myUbo.b, 1.0);
})"
);
nxt::BindGroupLayout bgl = device.CreateBindGroupLayoutBuilder()
.SetBindingsType(nxt::ShaderStageBit::Fragment, nxt::BindingType::UniformBuffer, 0, 1)
.GetResult();
nxt::PipelineLayout pl = device.CreatePipelineLayoutBuilder()
.SetBindGroupLayout(0, bgl)
.GetResult();
renderPipeline = device.CreatePipelineBuilder()
.SetLayout(pl)
.SetStage(nxt::ShaderStage::Vertex, vsModule, "main")
.SetStage(nxt::ShaderStage::Fragment, fsModule, "main")
.GetResult();
renderBindGroup = device.CreateBindGroupBuilder()
.SetLayout(bgl)
.SetUsage(nxt::BindGroupUsage::Frozen)
.SetBufferViews(0, 1, &view)
.GetResult();
}
}
void frame() {
nxt::CommandBuffer commands = device.CreateCommandBufferBuilder()
.SetPipeline(computePipeline)
.TransitionBufferUsage(buffer, nxt::BufferUsageBit::Storage)
.SetBindGroup(0, computeBindGroup)
.Dispatch(1, 1, 1)
.SetPipeline(renderPipeline)
.TransitionBufferUsage(buffer, nxt::BufferUsageBit::Uniform)
.SetBindGroup(0, renderBindGroup)
.DrawArrays(3, 1, 0, 0)
.GetResult();
queue.Submit(1, &commands);
SwapBuffers();
}
int main(int argc, const char* argv[]) {
if (!InitUtils(argc, argv)) {
return 1;
}
init();
while (!ShouldQuit()) {
frame();
usleep(16000);
}
// TODO release stuff
}

117
examples/HelloIndices.cpp Normal file
View File

@ -0,0 +1,117 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Utils.h"
#include <unistd.h>
#include <vector>
nxt::Device device;
nxt::Buffer indexBuffer;
nxt::Buffer vertexBuffer;
nxt::Queue queue;
nxt::Pipeline pipeline;
void initBuffers() {
static const uint32_t indexData[3] = {
0, 1, 2,
};
indexBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Index)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(indexData))
.GetResult();
indexBuffer.SetSubData(0, sizeof(indexData) / sizeof(uint32_t), indexData);
indexBuffer.FreezeUsage(nxt::BufferUsageBit::Index);
static const float vertexData[12] = {
0.0f, 0.5f, 0.0f, 1.0f,
-0.5f, -0.5f, 0.0f, 1.0f,
0.5f, -0.5f, 0.0f, 1.0f,
};
vertexBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Vertex)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(vertexData))
.GetResult();
vertexBuffer.SetSubData(0, sizeof(vertexData) / sizeof(uint32_t),
reinterpret_cast<const uint32_t*>(vertexData));
vertexBuffer.FreezeUsage(nxt::BufferUsageBit::Vertex);
}
void init() {
nxtProcTable procs;
GetProcTableAndDevice(&procs, &device);
nxtSetProcs(&procs);
queue = device.CreateQueueBuilder().GetResult();
initBuffers();
nxt::ShaderModule vsModule = CreateShaderModule(device, nxt::ShaderStage::Vertex, R"(
#version 450
layout(location = 0) in vec4 pos;
void main() {
gl_Position = pos;
})"
);
nxt::ShaderModule fsModule = CreateShaderModule(device, nxt::ShaderStage::Fragment, R"(
#version 450
out vec4 fragColor;
void main() {
fragColor = vec4(1.0, 0.0, 0.0, 1.0);
})"
);
auto inputState = device.CreateInputStateBuilder()
.SetAttribute(0, 0, nxt::VertexFormat::FloatR32G32B32A32, 0)
.SetInput(0, 4 * sizeof(float), nxt::InputStepMode::Vertex)
.GetResult();
pipeline = device.CreatePipelineBuilder()
.SetStage(nxt::ShaderStage::Vertex, vsModule, "main")
.SetStage(nxt::ShaderStage::Fragment, fsModule, "main")
.SetInputState(inputState)
.GetResult();
}
void frame() {
static const uint32_t vertexBufferOffsets[1] = {0};
nxt::CommandBuffer commands = device.CreateCommandBufferBuilder()
.SetPipeline(pipeline)
.SetVertexBuffers(0, 1, &vertexBuffer, vertexBufferOffsets)
.SetIndexBuffer(indexBuffer, 0, nxt::IndexFormat::Uint32)
.DrawElements(3, 1, 0, 0)
.GetResult();
queue.Submit(1, &commands);
SwapBuffers();
}
int main(int argc, const char* argv[]) {
if (!InitUtils(argc, argv)) {
return 1;
}
init();
while (!ShouldQuit()) {
frame();
usleep(16000);
}
// TODO release stuff
}

View File

@ -0,0 +1,124 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Utils.h"
#include <unistd.h>
#include <vector>
nxt::Device device;
nxt::Buffer vertexBuffer;
nxt::Buffer instanceBuffer;
nxt::Queue queue;
nxt::Pipeline pipeline;
void initBuffers() {
static const float vertexData[12] = {
0.0f, 0.1f, 0.0f, 1.0f,
-0.1f, -0.1f, 0.0f, 1.0f,
0.1f, -0.1f, 0.0f, 1.0f,
};
vertexBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Vertex)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(vertexData))
.GetResult();
vertexBuffer.SetSubData(0, sizeof(vertexData) / sizeof(uint32_t),
reinterpret_cast<const uint32_t*>(vertexData));
vertexBuffer.FreezeUsage(nxt::BufferUsageBit::Vertex);
static const float instanceData[8] = {
-0.5f, -0.5f,
-0.5f, 0.5f,
0.5f, -0.5f,
0.5f, 0.5f,
};
instanceBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Vertex)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(instanceData))
.GetResult();
instanceBuffer.SetSubData(0, sizeof(instanceData) / sizeof(uint32_t),
reinterpret_cast<const uint32_t*>(instanceData));
instanceBuffer.FreezeUsage(nxt::BufferUsageBit::Vertex);
}
void init() {
nxtProcTable procs;
GetProcTableAndDevice(&procs, &device);
nxtSetProcs(&procs);
queue = device.CreateQueueBuilder().GetResult();
initBuffers();
nxt::ShaderModule vsModule = CreateShaderModule(device, nxt::ShaderStage::Vertex, R"(
#version 450
layout(location = 0) in vec4 pos;
layout(location = 1) in vec2 instance;
void main() {
gl_Position = vec4(pos.xy + instance, pos.zw);
})"
);
nxt::ShaderModule fsModule = CreateShaderModule(device, nxt::ShaderStage::Fragment, R"(
#version 450
out vec4 fragColor;
void main() {
fragColor = vec4(1.0, 0.0, 0.0, 1.0);
})"
);
auto inputState = device.CreateInputStateBuilder()
.SetAttribute(0, 0, nxt::VertexFormat::FloatR32G32B32A32, 0)
.SetInput(0, 4 * sizeof(float), nxt::InputStepMode::Vertex)
.SetAttribute(1, 1, nxt::VertexFormat::FloatR32G32, 0)
.SetInput(1, 2 * sizeof(float), nxt::InputStepMode::Instance)
.GetResult();
pipeline = device.CreatePipelineBuilder()
.SetStage(nxt::ShaderStage::Vertex, vsModule, "main")
.SetStage(nxt::ShaderStage::Fragment, fsModule, "main")
.SetInputState(inputState)
.GetResult();
}
void frame() {
static const uint32_t vertexBufferOffsets[1] = {0};
nxt::CommandBuffer commands = device.CreateCommandBufferBuilder()
.SetPipeline(pipeline)
.SetVertexBuffers(0, 1, &vertexBuffer, vertexBufferOffsets)
.SetVertexBuffers(1, 1, &instanceBuffer, vertexBufferOffsets)
.DrawArrays(3, 4, 0, 0)
.GetResult();
queue.Submit(1, &commands);
SwapBuffers();
}
int main(int argc, const char* argv[]) {
if (!InitUtils(argc, argv)) {
return 1;
}
init();
while (!ShouldQuit()) {
frame();
usleep(16000);
}
// TODO release stuff
}

90
examples/HelloTriangle.c Normal file
View File

@ -0,0 +1,90 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Utils.h"
#include <unistd.h>
nxtDevice device;
nxtQueue queue;
nxtPipeline pipeline;
void init() {
nxtProcTable procs;
GetProcTableAndDevice(&procs, &device);
nxtSetProcs(&procs);
{
nxtQueueBuilder builder = nxtDeviceCreateQueueBuilder(device);
queue = nxtQueueBuilderGetResult(builder);
nxtQueueBuilderRelease(builder);
}
const char* vs =
"#version 450\n"
"const vec2 pos[3] = vec2[3](vec2(0.0f, 0.5f), vec2(-0.5f, -0.5f), vec2(0.5f, -0.5f));\n"
"void main() {\n"
" gl_Position = vec4(pos[gl_VertexIndex], 0.0, 1.0);\n"
"}\n";
nxtShaderModule vsModule = CreateShaderModule(device, NXT_SHADER_STAGE_VERTEX, vs);
const char* fs =
"#version 450\n"
"out vec4 fragColor;"
"void main() {\n"
" fragColor = vec4(1.0, 0.0, 0.0, 1.0);\n"
"}\n";
nxtShaderModule fsModule = CreateShaderModule(device, NXT_SHADER_STAGE_FRAGMENT, fs);
{
nxtPipelineBuilder builder = nxtDeviceCreatePipelineBuilder(device);
nxtPipelineBuilderSetStage(builder, NXT_SHADER_STAGE_VERTEX, vsModule, "main");
nxtPipelineBuilderSetStage(builder, NXT_SHADER_STAGE_FRAGMENT, fsModule, "main");
pipeline = nxtPipelineBuilderGetResult(builder);
nxtPipelineBuilderRelease(builder);
}
nxtShaderModuleRelease(vsModule);
nxtShaderModuleRelease(fsModule);
}
void frame() {
nxtCommandBuffer commands;
{
nxtCommandBufferBuilder builder = nxtDeviceCreateCommandBufferBuilder(device);
nxtCommandBufferBuilderSetPipeline(builder, pipeline);
nxtCommandBufferBuilderDrawArrays(builder, 3, 1, 0, 0);
commands = nxtCommandBufferBuilderGetResult(builder);
nxtCommandBufferBuilderRelease(builder);
}
nxtQueueSubmit(queue, 1, &commands);
nxtCommandBufferRelease(commands);
SwapBuffers();
}
int main(int argc, const char* argv[]) {
if (!InitUtils(argc, argv)) {
return 1;
}
init();
while (!ShouldQuit()) {
frame();
usleep(16000);
}
// TODO release stuff
}

185
examples/HelloTriangle.cpp Normal file
View File

@ -0,0 +1,185 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Utils.h"
#include <unistd.h>
#include <vector>
nxt::Device device;
nxt::Buffer indexBuffer;
nxt::Buffer vertexBuffer;
nxt::Texture texture;
nxt::Sampler sampler;
nxt::Queue queue;
nxt::Pipeline pipeline;
nxt::BindGroup bindGroup;
void initBuffers() {
static const uint32_t indexData[3] = {
0, 1, 2,
};
indexBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Index)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(indexData))
.GetResult();
indexBuffer.SetSubData(0, sizeof(indexData) / sizeof(uint32_t), indexData);
indexBuffer.FreezeUsage(nxt::BufferUsageBit::Index);
static const float vertexData[12] = {
0.0f, 0.5f, 0.0f, 1.0f,
-0.5f, -0.5f, 0.0f, 1.0f,
0.5f, -0.5f, 0.0f, 1.0f,
};
vertexBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Vertex)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(vertexData))
.GetResult();
vertexBuffer.SetSubData(0, sizeof(vertexData) / sizeof(uint32_t),
reinterpret_cast<const uint32_t*>(vertexData));
vertexBuffer.FreezeUsage(nxt::BufferUsageBit::Vertex);
}
void initTextures() {
texture = device.CreateTextureBuilder()
.SetDimension(nxt::TextureDimension::e2D)
.SetExtent(1024, 1024, 1)
.SetFormat(nxt::TextureFormat::R8G8B8A8Unorm)
.SetMipLevels(1)
.SetAllowedUsage(nxt::TextureUsageBit::TransferDst | nxt::TextureUsageBit::Sampled)
.GetResult();
sampler = device.CreateSamplerBuilder()
.SetFilterMode(nxt::FilterMode::Linear, nxt::FilterMode::Linear, nxt::FilterMode::Linear)
.GetResult();
// Initialize the texture with arbitrary data until we can load images
std::vector<uint8_t> data(4 * 1024 * 1024, 0);
for (size_t i = 0; i < data.size(); ++i) {
data[i] = i % 253;
}
nxt::Buffer stagingBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::TransferSrc)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(data.size())
.GetResult();
stagingBuffer.SetSubData(0, data.size() / sizeof(uint32_t), reinterpret_cast<uint32_t*>(data.data()));
stagingBuffer.FreezeUsage(nxt::BufferUsageBit::TransferSrc);
nxt::CommandBuffer copy = device.CreateCommandBufferBuilder()
.TransitionTextureUsage(texture, nxt::TextureUsageBit::TransferDst)
.CopyBufferToTexture(stagingBuffer, texture, 0, 0, 0, 1024, 1024, 1, 0)
.GetResult();
queue.Submit(1, &copy);
texture.FreezeUsage(nxt::TextureUsageBit::Sampled);
}
void init() {
nxtProcTable procs;
GetProcTableAndDevice(&procs, &device);
nxtSetProcs(&procs);
queue = device.CreateQueueBuilder().GetResult();
initBuffers();
initTextures();
nxt::ShaderModule vsModule = CreateShaderModule(device, nxt::ShaderStage::Vertex, R"(
#version 450
layout(location = 0) in vec4 pos;
void main() {
gl_Position = pos;
})"
);
nxt::ShaderModule fsModule = CreateShaderModule(device, nxt::ShaderStage::Fragment, R"(
#version 450
layout(set = 0, binding = 0) uniform sampler mySampler;
layout(set = 0, binding = 1) uniform texture2D myTexture;
out vec4 fragColor;
void main() {
fragColor = texture(sampler2D(myTexture, mySampler), gl_FragCoord.xy / vec2(640.0, 480.0));
})"
);
auto inputState = device.CreateInputStateBuilder()
.SetAttribute(0, 0, nxt::VertexFormat::FloatR32G32B32A32, 0)
.SetInput(0, 4 * sizeof(float), nxt::InputStepMode::Vertex)
.GetResult();
nxt::BindGroupLayout bgl = device.CreateBindGroupLayoutBuilder()
.SetBindingsType(nxt::ShaderStageBit::Fragment, nxt::BindingType::Sampler, 0, 1)
.SetBindingsType(nxt::ShaderStageBit::Fragment, nxt::BindingType::SampledTexture, 1, 1)
.GetResult();
nxt::PipelineLayout pl = device.CreatePipelineLayoutBuilder()
.SetBindGroupLayout(0, bgl)
.GetResult();
pipeline = device.CreatePipelineBuilder()
.SetLayout(pl)
.SetStage(nxt::ShaderStage::Vertex, vsModule, "main")
.SetStage(nxt::ShaderStage::Fragment, fsModule, "main")
.SetInputState(inputState)
.GetResult();
nxt::TextureView view = texture.CreateTextureViewBuilder().GetResult();
bindGroup = device.CreateBindGroupBuilder()
.SetLayout(bgl)
.SetUsage(nxt::BindGroupUsage::Frozen)
.SetSamplers(0, 1, &sampler)
.SetTextureViews(1, 1, &view)
.GetResult();
}
struct {uint32_t a; float b;} s;
void frame() {
s.a = (s.a + 1) % 256;
s.b += 0.02;
if (s.b >= 1.0f) {s.b = 0.0f;}
static const uint32_t vertexBufferOffsets[1] = {0};
nxt::CommandBuffer commands = device.CreateCommandBufferBuilder()
.SetPipeline(pipeline)
.SetBindGroup(0, bindGroup)
.SetVertexBuffers(0, 1, &vertexBuffer, vertexBufferOffsets)
.SetIndexBuffer(indexBuffer, 0, nxt::IndexFormat::Uint32)
.DrawElements(3, 1, 0, 0)
.GetResult();
queue.Submit(1, &commands);
SwapBuffers();
}
int main(int argc, const char* argv[]) {
if (!InitUtils(argc, argv)) {
return 1;
}
init();
while (!ShouldQuit()) {
frame();
usleep(16000);
}
// TODO release stuff
}

116
examples/HelloUBO.cpp Normal file
View File

@ -0,0 +1,116 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Utils.h"
#include <unistd.h>
nxt::Device device;
nxt::Queue queue;
nxt::Pipeline pipeline;
nxt::Buffer buffer;
nxt::BindGroup bindGroup;
struct {uint32_t a; float b;} s;
void init() {
nxtProcTable procs;
GetProcTableAndDevice(&procs, &device);
nxtSetProcs(&procs);
queue = device.CreateQueueBuilder().GetResult();
nxt::ShaderModule vsModule = CreateShaderModule(device, nxt::ShaderStage::Vertex, R"(
#version 450
const vec2 pos[3] = vec2[3](vec2(0.0f, 0.5f), vec2(-0.5f, -0.5f), vec2(0.5f, -0.5f));
void main() {
gl_Position = vec4(pos[gl_VertexIndex], 0.5, 1.0);
})"
);
nxt::ShaderModule fsModule = CreateShaderModule(device, nxt::ShaderStage::Fragment, R"(
#version 450
layout(set = 0, binding = 0) uniform myBlock {
int a;
float b;
} myUbo;
out vec4 fragColor;
void main() {
fragColor = vec4(1.0, myUbo.a / 255.0, myUbo.b, 1.0);
})"
);
nxt::BindGroupLayout bgl = device.CreateBindGroupLayoutBuilder()
.SetBindingsType(nxt::ShaderStageBit::Fragment, nxt::BindingType::UniformBuffer, 0, 1)
.GetResult();
nxt::PipelineLayout pl = device.CreatePipelineLayoutBuilder()
.SetBindGroupLayout(0, bgl)
.GetResult();
pipeline = device.CreatePipelineBuilder()
.SetLayout(pl)
.SetStage(nxt::ShaderStage::Vertex, vsModule, "main")
.SetStage(nxt::ShaderStage::Fragment, fsModule, "main")
.GetResult();
buffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Uniform)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(s))
.GetResult();
nxt::BufferView view = buffer.CreateBufferViewBuilder()
.SetExtent(0, sizeof(s))
.GetResult();
bindGroup = device.CreateBindGroupBuilder()
.SetLayout(bgl)
.SetUsage(nxt::BindGroupUsage::Frozen)
.SetBufferViews(0, 1, &view)
.GetResult();
}
void frame() {
s.a = (s.a + 1) % 256;
s.b += 0.02;
if (s.b >= 1.0f) {s.b = 0.0f;}
buffer.TransitionUsage(nxt::BufferUsageBit::Mapped);
buffer.SetSubData(0, sizeof(s) / sizeof(uint32_t), reinterpret_cast<uint32_t*>(&s));
nxt::CommandBuffer commands = device.CreateCommandBufferBuilder()
.SetPipeline(pipeline)
.TransitionBufferUsage(buffer, nxt::BufferUsageBit::Uniform)
.SetBindGroup(0, bindGroup)
.DrawArrays(3, 1, 0, 0)
.GetResult();
queue.Submit(1, &commands);
SwapBuffers();
}
int main(int argc, const char* argv[]) {
if (!InitUtils(argc, argv)) {
return 1;
}
init();
while (!ShouldQuit()) {
frame();
usleep(16000);
}
// TODO release stuff
}

104
examples/HelloVertices.cpp Normal file
View File

@ -0,0 +1,104 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Utils.h"
#include <unistd.h>
#include <vector>
nxt::Device device;
nxt::Buffer vertexBuffer;
nxt::Queue queue;
nxt::Pipeline pipeline;
void initBuffers() {
static const float vertexData[12] = {
0.0f, 0.5f, 0.0f, 1.0f,
-0.5f, -0.5f, 0.0f, 1.0f,
0.5f, -0.5f, 0.0f, 1.0f,
};
vertexBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Vertex)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(vertexData))
.GetResult();
vertexBuffer.SetSubData(0, sizeof(vertexData) / sizeof(uint32_t),
reinterpret_cast<const uint32_t*>(vertexData));
vertexBuffer.FreezeUsage(nxt::BufferUsageBit::Vertex);
}
void init() {
nxtProcTable procs;
GetProcTableAndDevice(&procs, &device);
nxtSetProcs(&procs);
queue = device.CreateQueueBuilder().GetResult();
initBuffers();
nxt::ShaderModule vsModule = CreateShaderModule(device, nxt::ShaderStage::Vertex, R"(
#version 450
layout(location = 0) in vec4 pos;
void main() {
gl_Position = pos;
})"
);
nxt::ShaderModule fsModule = CreateShaderModule(device, nxt::ShaderStage::Fragment, R"(
#version 450
out vec4 fragColor;
void main() {
fragColor = vec4(1.0, 0.0, 0.0, 1.0);
})"
);
auto inputState = device.CreateInputStateBuilder()
.SetAttribute(0, 0, nxt::VertexFormat::FloatR32G32B32A32, 0)
.SetInput(0, 4 * sizeof(float), nxt::InputStepMode::Vertex)
.GetResult();
pipeline = device.CreatePipelineBuilder()
.SetStage(nxt::ShaderStage::Vertex, vsModule, "main")
.SetStage(nxt::ShaderStage::Fragment, fsModule, "main")
.SetInputState(inputState)
.GetResult();
}
void frame() {
static const uint32_t vertexBufferOffsets[1] = {0};
nxt::CommandBuffer commands = device.CreateCommandBufferBuilder()
.SetPipeline(pipeline)
.SetVertexBuffers(0, 1, &vertexBuffer, vertexBufferOffsets)
.DrawArrays(3, 1, 0, 0)
.GetResult();
queue.Submit(1, &commands);
SwapBuffers();
}
int main(int argc, const char* argv[]) {
if (!InitUtils(argc, argv)) {
return 1;
}
init();
while (!ShouldQuit()) {
frame();
usleep(16000);
}
// TODO release stuff
}

78
examples/MetalBinding.mm Normal file
View File

@ -0,0 +1,78 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "BackendBinding.h"
#define GLFW_EXPOSE_NATIVE_COCOA
#include "GLFW/glfw3.h"
#include "GLFW/glfw3native.h"
#import <QuartzCore/CAMetalLayer.h>
#import <Metal/Metal.h>
namespace backend {
namespace metal {
void Init(id<MTLDevice> metalDevice, nxtProcTable* procs, nxtDevice* device);
void SetNextDrawable(nxtDevice device, id<CAMetalDrawable> drawable);
void Present(nxtDevice device);
}
}
class MetalBinding : public BackendBinding {
public:
void SetupGLFWWindowHints() override {
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
}
void GetProcAndDevice(nxtProcTable* procs, nxtDevice* device) override {
metalDevice = MTLCreateSystemDefaultDevice();
id nsWindow = glfwGetCocoaWindow(window);
NSView* contentView = [nsWindow contentView];
[contentView setWantsLayer: YES];
layer = [CAMetalLayer layer];
[layer setDevice: metalDevice];
[layer setPixelFormat: MTLPixelFormatBGRA8Unorm];
[layer setFramebufferOnly: YES];
[layer setDrawableSize: [contentView bounds].size];
[contentView setLayer: layer];
backend::metal::Init(metalDevice, procs, device);
backendDevice = *device;
backend::metal::SetNextDrawable(backendDevice, GetNextDrawable());
}
void SwapBuffers() override {
backend::metal::Present(backendDevice);
backend::metal::SetNextDrawable(backendDevice, GetNextDrawable());
}
private:
id<CAMetalDrawable> GetNextDrawable() {
lastDrawable = [layer nextDrawable];
return lastDrawable;
}
id<MTLDevice> metalDevice = nil;
CAMetalLayer* layer = nullptr;
id<CAMetalDrawable> lastDrawable = nil;
nxtDevice backendDevice = nullptr;
};
BackendBinding* CreateMetalBinding() {
return new MetalBinding;
}

89
examples/SpirvTest.cpp Normal file
View File

@ -0,0 +1,89 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <iostream>
#include <nxt/nxtcpp.h>
#include <shaderc/shaderc.hpp>
#include <spirv-cross/spirv_glsl.hpp>
bool CompileToSpv(std::string source, nxt::ShaderStage stage, std::vector<uint32_t>* spv) {
shaderc::Compiler compiler;
shaderc::CompileOptions options;
shaderc_shader_kind kind;
switch (stage) {
case nxt::ShaderStage::Vertex:
kind = shaderc_glsl_vertex_shader;
break;
case nxt::ShaderStage::Fragment:
kind = shaderc_glsl_fragment_shader;
break;
default:
return false;
}
{
auto result = compiler.CompileGlslToSpvAssembly(source.c_str(), source.size(), kind, "toto", options);
if (result.GetCompilationStatus() != shaderc_compilation_status_success) {
std::cerr << result.GetErrorMessage();
return false;
}
std::cout << "*** Begin spirv assembly" << std::endl;
std::cout << std::string(result.cbegin(), result.cend()) << std::endl;
std::cout << "*** End spirv assembly" << std::endl;
}
auto result = compiler.CompileGlslToSpv(source.c_str(), source.size(), kind, "toto", options);
if (result.GetCompilationStatus() != shaderc_compilation_status_success) {
std::cerr << result.GetErrorMessage();
return false;
}
spv->assign(result.cbegin(), result.cend());
return true;
}
void TestSpv(std::string source, nxt::ShaderStage stage) {
std::vector<uint32_t> spv;
if (!CompileToSpv(source, stage, &spv)) {
return;
}
spirv_cross::CompilerGLSL glsl(std::move(spv));
spirv_cross::CompilerGLSL::Options options;
options.version = 450;
options.es = false;
glsl.set_options(options);
std::cout << "*** Begin glsl cross" << std::endl;
std::cout << glsl.compile() << std::endl;
std::cout << "*** End glsl cross" << std::endl;
}
int main(int argc, char const* const* argv) {
const char* vs =
"#version 450\n"
"layout(push_constant) uniform toto {int foo;} tata;\n"
"const vec2 pos[3] = vec2[3](vec2(0.0f, 0.5f), vec2(-0.5f, -0.5f), vec2(0.5f, -0.5f));\n"
"void main() {\n"
" gl_Position = vec4(pos[gl_VertexIndex], 0.0, 1.0);\n"
"}\n";
TestSpv(vs, nxt::ShaderStage::Vertex);
return 0;
}

267
examples/Utils.cpp Normal file
View File

@ -0,0 +1,267 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nxt/nxt.h>
#include <nxt/nxtcpp.h>
#include <shaderc/shaderc.hpp>
#include "GLFW/glfw3.h"
#include "BackendBinding.h"
#include "../src/wire/TerribleCommandBuffer.h"
#include <cstring>
#include <iostream>
#include <sstream>
#include <iomanip>
BackendBinding* CreateMetalBinding();
namespace backend {
void RegisterSynchronousErrorCallback(nxtDevice device, void(*)(const char*, void*), void* userData);
namespace opengl {
void Init(void* (*getProc)(const char*), nxtProcTable* procs, nxtDevice* device);
void HACKCLEAR();
}
}
class OpenGLBinding : public BackendBinding {
public:
void SetupGLFWWindowHints() override {
#ifdef __APPLE__
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
#else
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 5);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
#endif
}
void GetProcAndDevice(nxtProcTable* procs, nxtDevice* device) override {
glfwMakeContextCurrent(window);
backend::opengl::Init(reinterpret_cast<void*(*)(const char*)>(glfwGetProcAddress), procs, device);
}
void SwapBuffers() override {
glfwSwapBuffers(window);
backend::opengl::HACKCLEAR();
}
};
enum class BackendType {
OpenGL,
Metal,
};
enum class CmdBufType {
None,
Terrible,
};
static BackendType backendType = BackendType::OpenGL;
static CmdBufType cmdBufType = CmdBufType::Terrible;
static BackendBinding* binding = nullptr;
static GLFWwindow* window = nullptr;
static nxt::wire::CommandHandler* wireServer = nullptr;
static nxt::wire::TerribleCommandBuffer* cmdBuf = nullptr;
void HandleSynchronousError(const char* errorMessage, void* userData) {
std::cerr << errorMessage << std::endl;
if (userData != nullptr) {
auto wireServer = reinterpret_cast<nxt::wire::CommandHandler*>(userData);
wireServer->OnSynchronousError();
}
}
void GetProcTableAndDevice(nxtProcTable* procs, nxt::Device* device) {
switch (backendType) {
case BackendType::OpenGL:
binding = new OpenGLBinding;
break;
case BackendType::Metal:
#if defined(__APPLE__)
binding = CreateMetalBinding();
#else
fprintf(stderr, "Metal backend no present on this platform\n");
#endif
break;
}
if (!glfwInit()) {
return;
}
binding->SetupGLFWWindowHints();
window = glfwCreateWindow(640, 480, "NXT window", nullptr, nullptr);
if (!window) {
return;
}
binding->SetWindow(window);
nxtDevice backendDevice;
nxtProcTable backendProcs;
binding->GetProcAndDevice(&backendProcs, &backendDevice);
switch (cmdBufType) {
case CmdBufType::None:
*procs = backendProcs;
*device = nxt::Device::Acquire(backendDevice);
break;
case CmdBufType::Terrible:
{
wireServer = nxt::wire::CreateCommandHandler(backendDevice, backendProcs);
cmdBuf = new nxt::wire::TerribleCommandBuffer(wireServer);
nxtDevice clientDevice;
nxtProcTable clientProcs;
nxt::wire::NewClientDevice(&clientProcs, &clientDevice, cmdBuf);
*procs = clientProcs;
*device = nxt::Device::Acquire(clientDevice);
}
break;
}
backend::RegisterSynchronousErrorCallback(backendDevice, HandleSynchronousError, wireServer);
}
nxt::ShaderModule CreateShaderModule(const nxt::Device& device, nxt::ShaderStage stage, const char* source) {
shaderc::Compiler compiler;
shaderc::CompileOptions options;
shaderc_shader_kind kind;
switch (stage) {
case nxt::ShaderStage::Vertex:
kind = shaderc_glsl_vertex_shader;
break;
case nxt::ShaderStage::Fragment:
kind = shaderc_glsl_fragment_shader;
break;
case nxt::ShaderStage::Compute:
kind = shaderc_glsl_compute_shader;
break;
}
auto result = compiler.CompileGlslToSpv(source, strlen(source), kind, "myshader?", options);
if (result.GetCompilationStatus() != shaderc_compilation_status_success) {
std::cerr << result.GetErrorMessage();
return {};
}
size_t size = (result.cend() - result.cbegin());
#ifdef DUMP_SPIRV_ASSEMBLY
{
auto resultAsm = compiler.CompileGlslToSpvAssembly(source, strlen(source), kind, "myshader?", options);
size_t sizeAsm = (resultAsm.cend() - resultAsm.cbegin());
char* buffer = reinterpret_cast<char*>(malloc(sizeAsm + 1));
memcpy(buffer, resultAsm.cbegin(), sizeAsm);
buffer[sizeAsm] = '\0';
printf("SPIRV ASSEMBLY DUMP START\n%s\nSPIRV ASSEMBLY DUMP END\n", buffer);
free(buffer);
}
#endif
#ifdef DUMP_SPIRV_JS_ARRAY
printf("SPIRV JS ARRAY DUMP START\n");
for (size_t i = 0; i < size; i++) {
printf("%#010x", result.cbegin()[i]);
if ((i + 1) % 4 == 0) {
printf(",\n");
} else {
printf(", ");
}
}
printf("\n");
printf("SPIRV JS ARRAY DUMP END\n");
#endif
return device.CreateShaderModuleBuilder()
.SetSource(size, result.cbegin())
.GetResult();
}
extern "C" {
bool InitUtils(int argc, const char** argv) {
for (int i = 0; i < argc; i++) {
if (std::string("-b") == argv[i] || std::string("--backend") == argv[i]) {
i++;
if (i < argc && std::string("opengl") == argv[i]) {
backendType = BackendType::OpenGL;
continue;
}
if (i < argc && std::string("metal") == argv[i]) {
backendType = BackendType::Metal;
continue;
}
fprintf(stderr, "--backend expects a backend name (opengl, metal)\n");
return false;
}
if (std::string("-c") == argv[i] || std::string("--comand-buffer") == argv[i]) {
i++;
if (i < argc && std::string("none") == argv[i]) {
cmdBufType = CmdBufType::None;
continue;
}
if (i < argc && std::string("terrible") == argv[i]) {
cmdBufType = CmdBufType::Terrible;
continue;
}
fprintf(stderr, "--command-buffer expects a command buffer name (none, terrible)\n");
return false;
}
if (std::string("-h") == argv[i] || std::string("--help") == argv[i]) {
printf("Usage: %s [-b BACKEND] [-c COMMAND_BUFFER]\n", argv[0]);
printf(" BACKEND is one of: opengl, metal\n");
printf(" COMMAND_BUFFER is one of: none, terrible\n");
return false;
}
}
return true;
}
void GetProcTableAndDevice(nxtProcTable* procs, nxtDevice* device) {
nxt::Device cppDevice;
GetProcTableAndDevice(procs, &cppDevice);
*device = cppDevice.Release();
}
nxtShaderModule CreateShaderModule(nxtDevice device, nxtShaderStage stage, const char* source) {
return CreateShaderModule(device, static_cast<nxt::ShaderStage>(stage), source).Release();
}
void SwapBuffers() {
if (cmdBuf) {
cmdBuf->Flush();
}
glfwPollEvents();
binding->SwapBuffers();
}
bool ShouldQuit() {
return glfwWindowShouldClose(window);
}
GLFWwindow* GetWindow() {
return window;
}
}

38
examples/Utils.h Normal file
View File

@ -0,0 +1,38 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nxt/nxt.h>
#if defined(__cplusplus)
extern "C" {
#endif
bool InitUtils(int argc, const char** argv);
void SwapBuffers();
bool ShouldQuit();
struct GLFWwindow;
struct GLFWwindow* GetWindow();
#if defined(__cplusplus)
}
#endif
// Yuck
#if defined(__cplusplus)
#include <nxt/nxtcpp.h>
void GetProcTableAndDevice(nxtProcTable* procs, nxt::Device* device);
nxt::ShaderModule CreateShaderModule(const nxt::Device& device, nxt::ShaderStage stage, const char* source);
#else
void GetProcTableAndDevice(nxtProcTable* procs, nxtDevice* device);
nxtShaderModule CreateShaderModule(nxtDevice device, nxtShaderStage stage, const char* source);
#endif

View File

@ -0,0 +1,18 @@
# Copyright 2017 The NXT Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
add_executable(glTFViewer glTFViewer.cpp)
target_link_libraries(glTFViewer utils)
target_include_directories(glTFViewer PUBLIC ../ ${GLM_INCLUDE_DIR})
SetCXX14(glTFViewer)

View File

@ -0,0 +1,67 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
class Camera {
public:
Camera()
: _azimuth(glm::radians(45.f)),
_altitude(glm::radians(30.f)),
_radius(10.f),
_center(0, 0, 0),
_dirty(true) {
recalculate();
}
void rotate(float dAzimuth, float dAltitude) {
_dirty = true;
_azimuth = glm::mod(_azimuth + dAzimuth, glm::radians(360.f));
_altitude = glm::clamp(_altitude + dAltitude, glm::radians(-89.f), glm::radians(89.f));
}
void pan(float dX, float dY) {
recalculate();
glm::vec3 vX = glm::normalize(glm::cross(-_eyeDir, glm::vec3(0, 1, 0)));
glm::vec3 vY = glm::normalize(glm::cross(_eyeDir, vX));
_center += vX * dX * _radius + vY * dY * _radius;
}
void zoom(float factor) {
_dirty = true;
_radius = _radius * glm::exp(-factor);
}
glm::mat4 view() {
if (_dirty) {
recalculate();
}
return _view;
}
private:
void recalculate() {
glm::vec4 eye4 = glm::vec4(1, 0, 0, 1);
eye4 = glm::rotate(glm::mat4(), _altitude, glm::vec3(0, 0, 1)) * eye4;
eye4 = glm::rotate(glm::mat4(), _azimuth, glm::vec3(0, 1, 0)) * eye4;
_eyeDir = glm::vec3(eye4);
_view = glm::lookAt(_center + _eyeDir * _radius, _center, glm::vec3(0, 1, 0));
_dirty = false;
}
float _azimuth;
float _altitude;
float _radius;
glm::vec3 _center;
glm::vec3 _eyeDir;
bool _dirty;
glm::mat4 _view;
};

View File

@ -0,0 +1,30 @@
# NXT glTF Viewer
This is a barebones glTF model viewer using the NXT API. It is intended as a
proof of concept for the API and is not a robust model viewer. It can load
basic mesh/texture data from a few
[glTF sample models](https://github.com/KhronosGroup/glTF-Sample-Models/tree/master/1.0),
such as:
* 2CylinderEngine
* BoxWithoutIndices
* Cesium Man
* Duck
* Monster
* VC (Virtual City)
## Usage
`build/examples/glTFViewer/glTFViewer path/to/Duck.gltf`
`build/examples/glTFViewer/glTFViewer path/to/Duck.gltf --backend metal`
## Screenshots
Duck:
![Duck](img/nxt-gltf-duck.jpg)
VC (Virtual City):
![Virtual City](img/nxt-gltf-vc.jpg)

View File

@ -0,0 +1,654 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Utils.h"
#include <bitset>
#include <unistd.h>
#define GLM_FORCE_DEPTH_ZERO_TO_ONE
#include <glm/mat4x4.hpp>
#include <glm/gtc/matrix_inverse.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include "GLFW/glfw3.h"
#define TINYGLTF_LOADER_IMPLEMENTATION
#define STB_IMAGE_IMPLEMENTATION
#include <tinygltfloader/tiny_gltf_loader.h>
#include "Camera.inl"
namespace gl {
enum {
Triangles = 0x0004,
UnsignedShort = 0x1403,
UnsignedInt = 0x1405,
Float = 0x1406,
RGBA = 0x1908,
Nearest = 0x2600,
Linear = 0x2601,
NearestMipmapNearest = 0x2700,
LinearMipmapNearest = 0x2701,
NearestMipmapLinear = 0x2702,
LinearMipmapLinear = 0x2703,
ArrayBuffer = 0x8892,
ElementArrayBuffer = 0x8893,
FragmentShader = 0x8B30,
VertexShader = 0x8B31,
FloatVec2 = 0x8B50,
FloatVec3 = 0x8B51,
FloatVec4 = 0x8B52,
};
}
struct MaterialInfo {
nxt::Buffer uniformBuffer;
nxt::Pipeline pipeline;
nxt::BindGroup bindGroup0;
std::map<uint32_t, std::string> slotSemantics;
};
struct u_transform_block {
glm::mat4 modelViewProj;
glm::mat4 modelInvTr;
};
nxt::Device device;
nxt::Queue queue;
nxt::Buffer defaultBuffer;
std::map<std::string, nxt::Buffer> buffers;
std::map<std::string, nxt::CommandBuffer> commandBuffers;
std::map<uint32_t, std::string> slotSemantics = {{0, "POSITION"}, {1, "NORMAL"}, {2, "TEXCOORD_0"}};
nxt::Sampler defaultSampler;
std::map<std::string, nxt::Sampler> samplers;
nxt::TextureView defaultTexture;
std::map<std::string, nxt::TextureView> textures;
tinygltf::Scene scene;
glm::mat4 projection = glm::perspective(glm::radians(60.f), 640.f/480, 0.1f, 2000.f);
Camera camera;
// Helpers
namespace {
std::string getFilePathExtension(const std::string &FileName) {
if (FileName.find_last_of(".") != std::string::npos) {
return FileName.substr(FileName.find_last_of(".") + 1);
}
return "";
}
bool techniqueParameterTypeToVertexFormat(int type, nxt::VertexFormat *format) {
switch (type) {
case gl::FloatVec2:
*format = nxt::VertexFormat::FloatR32G32;
return true;
case gl::FloatVec3:
*format = nxt::VertexFormat::FloatR32G32B32;
return true;
case gl::FloatVec4:
*format = nxt::VertexFormat::FloatR32G32B32A32;
return true;
default:
return false;
}
}
}
// Initialization
namespace {
void initBuffers() {
defaultBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Vertex | nxt::BufferUsageBit::Index)
.SetSize(256)
.GetResult();
defaultBuffer.FreezeUsage(nxt::BufferUsageBit::Vertex | nxt::BufferUsageBit::Index);
for (const auto& bv : scene.bufferViews) {
const auto& iBufferViewID = bv.first;
const auto& iBufferView = bv.second;
nxt::BufferUsageBit usage = nxt::BufferUsageBit::None;
switch (iBufferView.target) {
case gl::ArrayBuffer:
usage |= nxt::BufferUsageBit::Vertex;
break;
case gl::ElementArrayBuffer:
usage |= nxt::BufferUsageBit::Index;
break;
case 0:
fprintf(stderr, "TODO: buffer view has no target; skipping\n");
continue;
default:
fprintf(stderr, "unsupported buffer view target %d\n", iBufferView.target);
continue;
}
const auto& iBuffer = scene.buffers.at(iBufferView.buffer);
uint32_t iBufferViewSize =
iBufferView.byteLength ? iBufferView.byteLength :
(iBuffer.data.size() - iBufferView.byteOffset);
auto oBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | usage)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(iBufferViewSize)
.GetResult();
oBuffer.SetSubData(0, iBufferViewSize / sizeof(uint32_t),
reinterpret_cast<const uint32_t*>(&iBuffer.data.at(iBufferView.byteOffset)));
oBuffer.FreezeUsage(usage);
buffers[iBufferViewID] = std::move(oBuffer);
}
}
const MaterialInfo& getMaterial(const std::string& iMaterialID, uint32_t stridePos, uint32_t strideNor, uint32_t strideTxc) {
static std::map<std::tuple<std::string, uint32_t, uint32_t, uint32_t>, MaterialInfo> materials;
auto key = make_tuple(iMaterialID, stridePos, strideNor, strideTxc);
auto it = materials.find(key);
if (it != materials.end()) {
return it->second;
}
const auto& iMaterial = scene.materials.at(iMaterialID);
const auto& iTechnique = scene.techniques.at(iMaterial.technique);
const auto& iProgram = scene.programs.at(iTechnique.program);
auto oVSModule = CreateShaderModule(device, nxt::ShaderStage::Vertex, R"(
#version 450
layout(set = 0, binding = 0) uniform u_transform_block {
mat4 modelViewProj;
mat4 modelInvTr;
} u_transform;
layout(location = 0) in vec4 a_position;
layout(location = 1) in vec3 a_normal;
layout(location = 2) in vec2 a_texcoord;
layout(location = 0) out vec3 v_normal;
layout(location = 1) out vec2 v_texcoord;
void main() {
v_normal = (u_transform.modelInvTr * vec4(normalize(a_normal), 0)).rgb;
v_texcoord = a_texcoord;
gl_Position = u_transform.modelViewProj * a_position;
})");
auto oFSModule = CreateShaderModule(device, nxt::ShaderStage::Fragment, R"(
#version 450
layout(set = 0, binding = 1) uniform sampler u_samp;
layout(set = 0, binding = 2) uniform texture2D u_tex;
layout(location = 0) in vec3 v_normal;
layout(location = 1) in vec2 v_texcoord;
out vec4 fragcolor;
void main() {
const vec3 lightdir = normalize(vec3(-1, -2, 3));
vec3 normal = normalize(v_normal);
float diffuse = abs(dot(lightdir, normal));
float diffamb = diffuse * 0.85 + 0.15;
vec3 albedo = texture(sampler2D(u_tex, u_samp), v_texcoord).rgb;
fragcolor = vec4(diffamb * albedo, 1);
})");
nxt::InputStateBuilder builder = device.CreateInputStateBuilder();
std::bitset<3> slotsSet;
for (const auto& a : iTechnique.attributes) {
const auto iAttributeName = a.first;
const auto iParameter = iTechnique.parameters.at(a.second);
nxt::VertexFormat format;
if (!techniqueParameterTypeToVertexFormat(iParameter.type, &format)) {
fprintf(stderr, "unsupported technique parameter type %d\n", iParameter.type);
continue;
}
if (iParameter.semantic == "POSITION") {
builder.SetAttribute(0, 0, format, 0);
builder.SetInput(0, stridePos, nxt::InputStepMode::Vertex);
slotsSet.set(0);
} else if (iParameter.semantic == "NORMAL") {
builder.SetAttribute(1, 1, format, 0);
builder.SetInput(1, strideNor, nxt::InputStepMode::Vertex);
slotsSet.set(1);
} else if (iParameter.semantic == "TEXCOORD_0") {
builder.SetAttribute(2, 2, format, 0);
builder.SetInput(2, strideTxc, nxt::InputStepMode::Vertex);
slotsSet.set(2);
} else {
fprintf(stderr, "unsupported technique attribute semantic %s\n", iParameter.semantic.c_str());
}
// TODO: use iAttributeParameter.node?
}
for (size_t i = 0; i < slotsSet.size(); i++) {
if (slotsSet[i]) {
continue;
}
builder.SetAttribute(i, i, nxt::VertexFormat::FloatR32G32B32A32, 0);
builder.SetInput(i, 0, nxt::InputStepMode::Vertex);
}
auto inputState = builder.GetResult();
auto bindGroupLayout = device.CreateBindGroupLayoutBuilder()
.SetBindingsType(nxt::ShaderStageBit::Vertex, nxt::BindingType::UniformBuffer, 0, 1)
.SetBindingsType(nxt::ShaderStageBit::Fragment, nxt::BindingType::Sampler, 1, 1)
.SetBindingsType(nxt::ShaderStageBit::Fragment, nxt::BindingType::SampledTexture, 2, 1)
.GetResult();
auto pipelineLayout = device.CreatePipelineLayoutBuilder()
.SetBindGroupLayout(0, bindGroupLayout)
.GetResult();
auto pipeline = device.CreatePipelineBuilder()
.SetLayout(pipelineLayout)
.SetStage(nxt::ShaderStage::Vertex, oVSModule, "main")
.SetStage(nxt::ShaderStage::Fragment, oFSModule, "main")
.SetInputState(inputState)
.GetResult();
auto uniformBuffer = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::Uniform)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(sizeof(u_transform_block))
.GetResult();
auto uniformView = uniformBuffer.CreateBufferViewBuilder()
.SetExtent(0, sizeof(u_transform_block))
.GetResult();
auto bindGroupBuilder = device.CreateBindGroupBuilder();
bindGroupBuilder.SetLayout(bindGroupLayout)
.SetUsage(nxt::BindGroupUsage::Frozen)
.SetBufferViews(0, 1, &uniformView);
{
auto it = iMaterial.values.find("diffuse");
if (it != iMaterial.values.end() && !it->second.string_value.empty()) {
const auto& iTextureID = it->second.string_value;
const auto& textureView = textures[iTextureID];
const auto& iSamplerID = scene.textures[iTextureID].sampler;
bindGroupBuilder.SetSamplers(1, 1, &samplers[iSamplerID]);
bindGroupBuilder.SetTextureViews(2, 1, &textureView);
} else {
bindGroupBuilder.SetSamplers(1, 1, &defaultSampler);
bindGroupBuilder.SetTextureViews(2, 1, &defaultTexture);
}
}
MaterialInfo material = {
uniformBuffer.Get(),
pipeline.Get(),
bindGroupBuilder.GetResult(),
std::map<uint32_t, std::string>(),
};
materials[key] = std::move(material);
return materials.at(key);
}
void initSamplers() {
defaultSampler = device.CreateSamplerBuilder()
.SetFilterMode(nxt::FilterMode::Nearest, nxt::FilterMode::Nearest, nxt::FilterMode::Nearest)
// TODO: wrap modes
.GetResult();
for (const auto& s : scene.samplers) {
const auto& iSamplerID = s.first;
const auto& iSampler = s.second;
auto magFilter = nxt::FilterMode::Nearest;
auto minFilter = nxt::FilterMode::Nearest;
auto mipmapFilter = nxt::FilterMode::Nearest;
switch (iSampler.magFilter) {
case gl::Nearest:
magFilter = nxt::FilterMode::Nearest;
break;
case gl::Linear:
magFilter = nxt::FilterMode::Linear;
break;
default:
fprintf(stderr, "unsupported magFilter %d\n", iSampler.magFilter);
break;
}
switch (iSampler.minFilter) {
case gl::Nearest:
case gl::NearestMipmapNearest:
case gl::NearestMipmapLinear:
minFilter = nxt::FilterMode::Nearest;
break;
case gl::Linear:
case gl::LinearMipmapNearest:
case gl::LinearMipmapLinear:
minFilter = nxt::FilterMode::Linear;
break;
default:
fprintf(stderr, "unsupported minFilter %d\n", iSampler.magFilter);
break;
}
switch (iSampler.minFilter) {
case gl::NearestMipmapNearest:
case gl::LinearMipmapNearest:
mipmapFilter = nxt::FilterMode::Nearest;
break;
case gl::NearestMipmapLinear:
case gl::LinearMipmapLinear:
mipmapFilter = nxt::FilterMode::Linear;
break;
}
auto oSampler = device.CreateSamplerBuilder()
.SetFilterMode(magFilter, minFilter, mipmapFilter)
// TODO: wrap modes
.GetResult();
samplers[iSamplerID] = std::move(oSampler);
}
}
void initTextures() {
{
auto oTexture = device.CreateTextureBuilder()
.SetDimension(nxt::TextureDimension::e2D)
.SetExtent(1, 1, 1)
.SetFormat(nxt::TextureFormat::R8G8B8A8Unorm)
.SetMipLevels(1)
.SetAllowedUsage(nxt::TextureUsageBit::TransferDst | nxt::TextureUsageBit::Sampled)
.GetResult();
// TODO: release this texture
nxt::Buffer staging = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::TransferSrc)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(4)
.GetResult();
// TODO: release this buffer
uint32_t white = 0xffffffff;
staging.SetSubData(0, 1, &white);
staging.FreezeUsage(nxt::BufferUsageBit::TransferSrc);
auto cmdbuf = device.CreateCommandBufferBuilder()
.TransitionTextureUsage(oTexture, nxt::TextureUsageBit::TransferDst)
.CopyBufferToTexture(staging, oTexture, 0, 0, 0, 1, 1, 1, 0)
.GetResult();
queue.Submit(1, &cmdbuf);
oTexture.FreezeUsage(nxt::TextureUsageBit::Sampled);
defaultTexture = oTexture.CreateTextureViewBuilder().GetResult();
}
for (const auto& t : scene.textures) {
const auto& iTextureID = t.first;
const auto& iTexture = t.second;
const auto& iImage = scene.images[iTexture.source];
nxt::TextureFormat format = nxt::TextureFormat::R8G8B8A8Unorm;
switch (iTexture.format) {
case gl::RGBA:
format = nxt::TextureFormat::R8G8B8A8Unorm;
break;
default:
fprintf(stderr, "unsupported texture format %d\n", iTexture.format);
continue;
}
auto oTexture = device.CreateTextureBuilder()
.SetDimension(nxt::TextureDimension::e2D)
.SetExtent(iImage.width, iImage.height, 1)
.SetFormat(format)
.SetMipLevels(1)
.SetAllowedUsage(nxt::TextureUsageBit::TransferDst | nxt::TextureUsageBit::Sampled)
.GetResult();
// TODO: release this texture
uint32_t numPixels = iImage.width * iImage.height;
const uint8_t* origData = iImage.image.data();
const uint8_t* data = nullptr;
std::vector<uint8_t> newData;
if (iImage.component == 4) {
data = origData;
} else if (iImage.component == 3) {
newData.resize(numPixels * 4);
for (size_t i = 0; i < numPixels; ++i) {
newData[4 * i + 0] = origData[3 * i + 0];
newData[4 * i + 1] = origData[3 * i + 1];
newData[4 * i + 2] = origData[3 * i + 2];
newData[4 * i + 3] = 255;
}
data = newData.data();
} else {
fprintf(stderr, "unsupported image.component %d\n", iImage.component);
}
nxt::Buffer staging = device.CreateBufferBuilder()
.SetAllowedUsage(nxt::BufferUsageBit::Mapped | nxt::BufferUsageBit::TransferSrc)
.SetInitialUsage(nxt::BufferUsageBit::Mapped)
.SetSize(numPixels * 4)
.GetResult();
// TODO: release this buffer
staging.SetSubData(0, numPixels,
reinterpret_cast<const uint32_t*>(data));
staging.FreezeUsage(nxt::BufferUsageBit::TransferSrc);
auto cmdbuf = device.CreateCommandBufferBuilder()
.TransitionTextureUsage(oTexture, nxt::TextureUsageBit::TransferDst)
.CopyBufferToTexture(staging, oTexture, 0, 0, 0, iImage.width, iImage.height, 1, 0)
.GetResult();
queue.Submit(1, &cmdbuf);
oTexture.FreezeUsage(nxt::TextureUsageBit::Sampled);
textures[iTextureID] = oTexture.CreateTextureViewBuilder().GetResult();
}
}
void init() {
nxtProcTable procs;
GetProcTableAndDevice(&procs, &device);
nxtSetProcs(&procs);
queue = device.CreateQueueBuilder().GetResult();
initBuffers();
initSamplers();
initTextures();
}
}
// Drawing
namespace {
void drawMesh(const tinygltf::Mesh& iMesh, const glm::mat4& model) {
nxt::CommandBufferBuilder cmd = device.CreateCommandBufferBuilder();
for (const auto& iPrim : iMesh.primitives) {
if (iPrim.mode != gl::Triangles) {
fprintf(stderr, "unsupported primitive mode %d\n", iPrim.mode);
continue;
}
u_transform_block transforms = {
(projection * camera.view() * model),
glm::inverseTranspose(model),
};
uint32_t strides[3] = {0};
for (const auto& s : slotSemantics) {
if (s.first < 3) {
auto it = iPrim.attributes.find(s.second);
if (it == iPrim.attributes.end()) {
continue;
}
const auto& iAccessorName = it->second;
strides[s.first] = scene.accessors.at(iAccessorName).byteStride;
}
}
const MaterialInfo& material = getMaterial(iPrim.material, strides[0], strides[1], strides[2]);
material.uniformBuffer.TransitionUsage(nxt::BufferUsageBit::Mapped);
material.uniformBuffer.SetSubData(0,
sizeof(u_transform_block) / sizeof(uint32_t),
reinterpret_cast<const uint32_t*>(&transforms));
cmd.SetPipeline(material.pipeline);
cmd.TransitionBufferUsage(material.uniformBuffer, nxt::BufferUsageBit::Uniform);
cmd.SetBindGroup(0, material.bindGroup0);
uint32_t vertexCount = 0;
for (const auto& s : slotSemantics) {
uint32_t slot = s.first;
const auto& iSemantic = s.second;
auto it = iPrim.attributes.find(s.second);
if (it == iPrim.attributes.end()) {
uint32_t zero = 0;
cmd.SetVertexBuffers(slot, 1, &defaultBuffer, &zero);
continue;
}
const auto& iAccessor = scene.accessors.at(it->second);
if (iAccessor.componentType != gl::Float ||
(iAccessor.type != TINYGLTF_TYPE_VEC4 && iAccessor.type != TINYGLTF_TYPE_VEC3 && iAccessor.type != TINYGLTF_TYPE_VEC2)) {
fprintf(stderr, "unsupported vertex accessor component type %d and type %d\n", iAccessor.componentType, iAccessor.type);
continue;
}
if (!vertexCount) {
vertexCount = iAccessor.count;
}
const auto& oBuffer = buffers.at(iAccessor.bufferView);
uint32_t iBufferOffset = iAccessor.byteOffset;
cmd.SetVertexBuffers(slot, 1, &oBuffer, &iBufferOffset);
}
if (!iPrim.indices.empty()) {
const auto& iIndices = scene.accessors.at(iPrim.indices);
// DrawElements
if (iIndices.componentType != gl::UnsignedShort || iIndices.type != TINYGLTF_TYPE_SCALAR) {
fprintf(stderr, "unsupported index accessor component type %d and type %d\n", iIndices.componentType, iIndices.type);
continue;
}
const auto& oIndicesBuffer = buffers.at(iIndices.bufferView);
cmd.SetIndexBuffer(oIndicesBuffer, iIndices.byteOffset, nxt::IndexFormat::Uint16);
cmd.DrawElements(iIndices.count, 1, 0, 0);
} else {
// DrawArrays
cmd.DrawArrays(vertexCount, 1, 0, 0);
}
}
auto commands = cmd.GetResult();
queue.Submit(1, &commands);
}
void drawNode(const tinygltf::Node& node, const glm::mat4& parent = glm::mat4()) {
glm::mat4 model;
if (node.matrix.size() == 16) {
model = glm::make_mat4(node.matrix.data());
} else {
if (node.scale.size() == 3) {
glm::vec3 scale = glm::make_vec3(node.scale.data());
model = glm::scale(model, scale);
}
if (node.rotation.size() == 4) {
glm::quat rotation = glm::make_quat(node.rotation.data());
model = glm::mat4_cast(rotation) * model;
}
if (node.translation.size() == 3) {
glm::vec3 translation = glm::make_vec3(node.translation.data());
model = glm::translate(model, translation);
}
}
model = parent * model;
for (const auto& meshID : node.meshes) {
drawMesh(scene.meshes[meshID], model);
}
for (const auto& child : node.children) {
drawNode(scene.nodes.at(child), model);
}
}
void frame() {
const auto& defaultSceneNodes = scene.scenes.at(scene.defaultScene);
for (const auto& n : defaultSceneNodes) {
const auto& node = scene.nodes.at(n);
drawNode(node);
}
SwapBuffers();
}
}
// Mouse camera control
namespace {
bool buttons[GLFW_MOUSE_BUTTON_LAST + 1] = {0};
void mouseButtonCallback(GLFWwindow *window, int button, int action, int mods) {
buttons[button] = (action == GLFW_PRESS);
}
void cursorPosCallback(GLFWwindow *window, double mouseX, double mouseY) {
static float oldX, oldY;
float dX = mouseX - oldX;
float dY = mouseY - oldY;
oldX = mouseX;
oldY = mouseY;
if (buttons[2] || (buttons[0] && buttons[1])) {
camera.pan(-dX * 0.002, dY * 0.002);
} else if (buttons[0]) {
camera.rotate(dX * -0.01, dY * 0.01);
} else if (buttons[1]) {
camera.zoom(dY * -0.005);
}
}
void scrollCallback(GLFWwindow *window, double xoffset, double yoffset) {
camera.zoom(yoffset * 0.04);
}
}
int main(int argc, const char* argv[]) {
if (!InitUtils(argc, argv)) {
return 1;
}
if (argc < 2) {
fprintf(stderr, "Usage: %s model.gltf [... NXT Options]\n", argv[0]);
return 1;
}
tinygltf::TinyGLTFLoader loader;
std::string err;
std::string input_filename(argv[1]);
std::string ext = getFilePathExtension(input_filename);
bool ret = false;
if (ext.compare("glb") == 0) {
// assume binary glTF.
ret = loader.LoadBinaryFromFile(&scene, &err, input_filename.c_str());
} else {
// assume ascii glTF.
ret = loader.LoadASCIIFromFile(&scene, &err, input_filename.c_str());
}
if (!err.empty()) {
fprintf(stderr, "ERR: %s\n", err.c_str());
}
if (!ret) {
fprintf(stderr, "Failed to load .glTF : %s\n", argv[1]);
exit(-1);
}
init();
GLFWwindow* window = GetWindow();
glfwSetMouseButtonCallback(window, mouseButtonCallback);
glfwSetCursorPosCallback(window, cursorPosCallback);
glfwSetScrollCallback(window, scrollCallback);
while (!ShouldQuit()) {
frame();
usleep(16000);
}
// TODO release stuff
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

74
generator/CMakeLists.txt Normal file
View File

@ -0,0 +1,74 @@
# Copyright 2017 The NXT Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
find_package(PythonInterp REQUIRED)
include(CMakeParseArguments)
# Check for Jinja2
message(STATUS "${PYTHON_EXECUTABLE}")
execute_process(
COMMAND ${PYTHON_EXECUTABLE} -c "import jinja2"
RESULT_VARIABLE RET
)
if (NOT RET EQUAL 0)
message(FATAL_ERROR "Missing dependencies for VkCPP generation, please ensure you have python-jinja2 installed.")
endif()
function(Generate)
set(oneValueArgs LIB_NAME LIB_TYPE PRINT_NAME EXECUTABLE)
set(multiValueArgs COMMAND_LINE_ARGS EXTRA_DEPS SOURCE)
cmake_parse_arguments(G "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
execute_process(
COMMAND ${G_COMMAND_LINE_ARGS} --print-dependencies
OUTPUT_VARIABLE DEPENDENCIES
RESULT_VARIABLE RET
)
if (NOT RET EQUAL 0)
message(STATUS ${RET})
message(FATAL_ERROR "Failed to get the dependencies for ${G_PRINT_NAME}.")
endif()
execute_process(
COMMAND ${G_COMMAND_LINE_ARGS} --print-outputs
OUTPUT_VARIABLE OUTPUTS
RESULT_VARIABLE RET
)
if (NOT RET EQUAL 0)
message(FATAL_ERROR "Failed to get the outputs for ${G_PRINT_NAME}.")
endif()
add_custom_command(
COMMAND ${G_COMMAND_LINE_ARGS}
DEPENDS ${DEPENDENCIES} ${G_EXTRA_DEPS}
OUTPUT ${OUTPUTS}
COMMENT "Generating files for ${G_PRINT_NAME}."
)
add_library(${G_LIB_NAME} ${G_LIB_TYPE}
${G_SOURCE}
${OUTPUTS}
)
endfunction()
set(GENERATED_DIR ${CMAKE_CURRENT_BINARY_DIR} PARENT_SCOPE)
set(GENERATOR_COMMON_ARGS
${PYTHON_EXECUTABLE}
${CMAKE_CURRENT_SOURCE_DIR}/main.py
${CMAKE_SOURCE_DIR}/next.json
-t ${CMAKE_CURRENT_SOURCE_DIR}/templates
-o ${CMAKE_CURRENT_BINARY_DIR}
PARENT_SCOPE
)

427
generator/main.py Normal file
View File

@ -0,0 +1,427 @@
# Copyright 2017 The NXT Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############################################################
# COMMON
############################################################
from collections import namedtuple
class Name:
def __init__(self, name, native=False):
self.native = native
if native:
self.chunks = [name]
else:
self.chunks = name.split(' ')
def CamelChunk(self, chunk):
return chunk[0].upper() + chunk[1:]
def canonical_case(self):
return (' '.join(self.chunks)).lower()
def concatcase(self):
return ''.join(self.chunks)
def camelCase(self):
return self.chunks[0] + ''.join([self.CamelChunk(chunk) for chunk in self.chunks[1:]])
def CamelCase(self):
return ''.join([self.CamelChunk(chunk) for chunk in self.chunks])
def SNAKE_CASE(self):
return '_'.join([chunk.upper() for chunk in self.chunks])
def snake_case(self):
return '_'.join(self.chunks)
class Type:
def __init__(self, name, record, native=False):
self.record = record
self.dict_name = name
self.name = Name(name, native=native)
self.category = record['category']
self.is_builder = self.name.canonical_case().endswith(" builder")
EnumValue = namedtuple('EnumValue', ['name', 'value'])
class EnumType(Type):
def __init__(self, name, record):
Type.__init__(self, name, record)
self.values = [EnumValue(Name(m['name']), m['value']) for m in self.record['values']]
BitmaskValue = namedtuple('BitmaskValue', ['name', 'value'])
class BitmaskType(Type):
def __init__(self, name, record):
Type.__init__(self, name, record)
self.values = [BitmaskValue(Name(m['name']), m['value']) for m in self.record['values']]
self.full_mask = 0
for value in self.values:
self.full_mask = self.full_mask | value.value
class NativeType(Type):
def __init__(self, name, record):
Type.__init__(self, name, record, native=True)
class MethodArgument:
def __init__(self, name, typ, annotation):
self.name = name
self.type = typ
self.annotation = annotation
self.length = None
Method = namedtuple('Method', ['name', 'return_type', 'arguments'])
class ObjectType(Type):
def __init__(self, name, record):
Type.__init__(self, name, record)
self.methods = []
############################################################
# PARSE
############################################################
import json
def link_object(obj, types):
def make_method(record):
arguments = []
arguments_by_name = {}
for a in record.get('args', []):
arg = MethodArgument(Name(a['name']), types[a['type']], a.get('annotation', 'value'))
arguments.append(arg)
arguments_by_name[arg.name.canonical_case()] = arg
for (arg, a) in zip(arguments, record.get('args', [])):
assert(arg.annotation == 'value' or 'length' in a)
if arg.annotation != 'value':
if a['length'] == 'strlen':
arg.length = 'strlen'
else:
arg.length = arguments_by_name[a['length']]
return Method(Name(record['name']), types[record.get('returns', 'void')], arguments)
obj.methods = [make_method(m) for m in obj.record.get('methods', [])]
def parse_json(json):
category_to_parser = {
'bitmask': BitmaskType,
'enum': EnumType,
'native': NativeType,
'object': ObjectType,
}
types = {}
by_category = {}
for name in category_to_parser.keys():
by_category[name] = []
for (name, record) in json.items():
if name[0] == '_':
continue
category = record['category']
parsed = category_to_parser[category](name, record)
by_category[category].append(parsed)
types[name] = parsed
for obj in by_category['object']:
link_object(obj, types)
for category in by_category.keys():
by_category[category] = sorted(by_category[category], key=lambda typ: typ.name.canonical_case())
return {
'types': types,
'by_category': by_category
}
#############################################################
# OUTPUT
#############################################################
import re, os, sys
from collections import OrderedDict
try:
import jinja2
except ImportError:
# Try using Chromium's Jinja2
dir, _ = os.path.split(os.path.realpath(__file__))
third_party_dir = os.path.normpath(dir + (os.path.sep + os.path.pardir) * 2)
sys.path.insert(1, third_party_dir)
import jinja2
# A custom Jinja2 template loader that removes the extra indentation
# of the template blocks so that the output is correctly indented
class PreprocessingLoader(jinja2.BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = os.path.join(self.path, template)
if not os.path.exists(path):
raise jinja2.TemplateNotFound(template)
mtime = os.path.getmtime(path)
with open(path) as f:
source = self.preprocess(f.read())
return source, path, lambda: mtime == os.path.getmtime(path)
blockstart = re.compile('{%-?\s*(if|for|block)[^}]*%}')
blockend = re.compile('{%-?\s*end(if|for|block)[^}]*%}')
def preprocess(self, source):
lines = source.split('\n')
# Compute the current indentation level of the template blocks and remove their indentation
result = []
indentation_level = 0
for line in lines:
# The capture in the regex adds one element per block start or end so we divide by two
# there is also an extra line chunk corresponding to the line end, so we substract it.
numends = (len(self.blockend.split(line)) - 1) / 2
indentation_level -= numends
result.append(self.remove_indentation(line, indentation_level))
numstarts = (len(self.blockstart.split(line)) - 1) / 2
indentation_level += numstarts
return '\n'.join(result)
def remove_indentation(self, line, n):
for _ in range(n):
if line.startswith(' '):
line = line[4:]
elif line.startswith('\t'):
line = line[1:]
else:
assert(line.strip() == '')
return line
FileRender = namedtuple('FileRender', ['template', 'output', 'params_dicts'])
def do_renders(renders, template_dir, output_dir):
env = jinja2.Environment(loader=PreprocessingLoader(template_dir), trim_blocks=True, lstrip_blocks=True, line_comment_prefix='//*')
for render in renders:
params = {}
for param_dict in render.params_dicts:
params.update(param_dict)
output = env.get_template(render.template).render(**params)
output_file = output_dir + os.path.sep + render.output
directory = os.path.dirname(output_file)
if not os.path.exists(directory):
os.makedirs(directory)
content = ""
try:
with open(output_file, 'r') as outfile:
content = outfile.read()
except:
pass
if output != content:
with open(output_file, 'w') as outfile:
outfile.write(output)
#############################################################
# MAIN SOMETHING WHATEVER
#############################################################
import argparse, sys
def as_varName(*names):
return names[0].camelCase() + ''.join([name.CamelCase() for name in names[1:]])
def as_cType(name):
if name.native:
return name.concatcase()
else:
return 'nxt' + name.CamelCase()
def as_cppType(name):
if name.native:
return name.concatcase()
else:
return name.CamelCase()
def decorate(name, typ, arg):
if arg.annotation == 'value':
return typ + ' ' + name
elif arg.annotation == '*':
return typ + '* ' + name
elif arg.annotation == 'const*':
return typ + ' const * ' + name
else:
assert(False)
def annotated(typ, arg):
name = as_varName(arg.name)
return decorate(name, typ, arg)
def as_cEnum(type_name, value_name):
assert(not type_name.native and not value_name.native)
return 'NXT' + '_' + type_name.SNAKE_CASE() + '_' + value_name.SNAKE_CASE()
def as_cppEnum(value_name):
assert(not value_name.native)
if value_name.concatcase()[0].isdigit():
return "e" + value_name.CamelCase()
return value_name.CamelCase()
def as_cMethod(type_name, method_name):
assert(not type_name.native and not method_name.native)
return 'nxt' + type_name.CamelCase() + method_name.CamelCase()
def as_MethodSuffix(type_name, method_name):
assert(not type_name.native and not method_name.native)
return type_name.CamelCase() + method_name.CamelCase()
def as_cProc(type_name, method_name):
assert(not type_name.native and not method_name.native)
return 'nxt' + 'Proc' + type_name.CamelCase() + method_name.CamelCase()
def as_backendType(typ):
if typ.category == 'object':
return typ.name.CamelCase() + '*'
else:
return as_cType(typ.name)
def native_methods(types, typ):
return [
Method(Name('reference'), types['void'], []),
Method(Name('release'), types['void'], []),
] + typ.methods
def debug(text):
print(text)
def main():
targets = ['nxt', 'nxtcpp', 'mock_nxt', 'opengl', 'metal', 'wire', 'blink']
parser = argparse.ArgumentParser(
description = 'Generates code for various target for NXT.',
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('json', metavar='NXT_JSON', nargs=1, type=str, help ='The NXT JSON definition to use.')
parser.add_argument('-t', '--template-dir', default='templates', type=str, help='Directory with template files.')
parser.add_argument('-o', '--output-dir', default=None, type=str, help='Output directory for the generated source files.')
parser.add_argument('-T', '--targets', default=None, type=str, help='Comma-separated subset of targets to output. Available targets: ' + ', '.join(targets))
parser.add_argument('--print-dependencies', action='store_true', help='Prints a space separated list of file dependencies, used for CMake integration')
parser.add_argument('--print-outputs', action='store_true', help='Prints a space separated list of file outputs, used for CMake integration')
parser.add_argument('--gn', action='store_true', help='Make the printing of dependencies by GN friendly')
args = parser.parse_args()
if args.targets != None:
targets = args.targets.split(',')
with open(args.json[0]) as f:
loaded_json = json.loads(f.read())
api_params = parse_json(loaded_json)
base_params = {
'enumerate': enumerate,
'format': format,
'len': len,
'debug': debug,
'Name': lambda name: Name(name),
'as_annotated_cType': lambda arg: annotated(as_cType(arg.type.name), arg),
'as_annotated_cppType': lambda arg: annotated(as_cppType(arg.type.name), arg),
'as_cEnum': as_cEnum,
'as_cppEnum': as_cppEnum,
'as_cMethod': as_cMethod,
'as_MethodSuffix': as_MethodSuffix,
'as_cProc': as_cProc,
'as_cType': as_cType,
'as_cppType': as_cppType,
'as_varName': as_varName,
'decorate': decorate,
'native_methods': lambda typ: native_methods(api_params['types'], typ)
}
renders = []
if 'nxt' in targets:
renders.append(FileRender('api.h', 'nxt/nxt.h', [base_params, api_params]))
renders.append(FileRender('api.c', 'nxt/nxt.c', [base_params, api_params]))
if 'nxtcpp' in targets:
renders.append(FileRender('apicpp.h', 'nxt/nxtcpp.h', [base_params, api_params]))
renders.append(FileRender('apicpp.cpp', 'nxt/nxtcpp.cpp', [base_params, api_params]))
if 'mock_nxt' in targets:
renders.append(FileRender('mock_api.h', 'mock/mock_nxt.h', [base_params, api_params]))
renders.append(FileRender('mock_api.cpp', 'mock/mock_nxt.cpp', [base_params, api_params]))
base_backend_params = [
base_params,
api_params,
{
'as_backendType': lambda typ: as_backendType(typ), # TODO as_backendType and friends take a Type and not a Name :(
'as_annotated_backendType': lambda arg: annotated(as_backendType(arg.type), arg)
}
]
if 'opengl' in targets:
opengl_params = {
'namespace': 'opengl',
}
renders.append(FileRender('BackendProcTable.cpp', 'opengl/ProcTable.cpp', base_backend_params + [opengl_params]))
if 'metal' in targets:
metal_params = {
'namespace': 'metal',
}
renders.append(FileRender('BackendProcTable.cpp', 'metal/ProcTable.mm', base_backend_params + [metal_params]))
if 'wire' in targets:
renders.append(FileRender('wire/WireCmd.h', 'wire/WireCmd_autogen.h', base_backend_params))
renders.append(FileRender('wire/WireCmd.cpp', 'wire/WireCmd.cpp', base_backend_params))
renders.append(FileRender('wire/WireClient.cpp', 'wire/WireClient.cpp', base_backend_params))
renders.append(FileRender('wire/WireServer.cpp', 'wire/WireServer.cpp', base_backend_params))
if 'blink' in targets:
renders.append(FileRender('blink/autogen.gni', 'autogen.gni', [base_params, api_params]))
renders.append(FileRender('blink/Objects.cpp', 'NXT.cpp', [base_params, api_params]))
renders.append(FileRender('blink/Forward.h', 'Forward.h', [base_params, api_params]))
for typ in api_params['by_category']['object']:
file_prefix = 'NXT' + typ.name.CamelCase()
params = [base_params, api_params, {'type': typ}]
renders.append(FileRender('blink/Object.h', file_prefix + '.h', params))
renders.append(FileRender('blink/Object.idl', file_prefix + '.idl', params))
output_separator = '\n' if args.gn else ';'
if args.print_dependencies:
dependencies = set(
[os.path.abspath(args.template_dir + os.path.sep + render.template) for render in renders] +
[os.path.abspath(args.json[0])] +
[os.path.realpath(__file__)]
)
sys.stdout.write(output_separator.join(dependencies))
return 0
if args.print_outputs:
outputs = set(
[os.path.abspath(args.output_dir + os.path.sep + render.output) for render in renders]
)
sys.stdout.write(output_separator.join(outputs))
return 0
do_renders(renders, args.template_dir, args.output_dir)
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,185 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
#include "nxt/nxt.h"
#include "nxt/nxtcpp.h"
#include "{{namespace}}/GeneratedCodeIncludes.h"
#include <iostream>
namespace backend {
namespace {{namespace}} {
namespace {
//* Helper functions to check the value of enums
{% for type in by_category["enum"] %}
{% set cType = as_cType(type.name) %}
bool CheckEnum{{cType}}({{cType}} value) {
switch (value) {
{% for value in type.values %}
case {{as_cEnum(type.name, value.name)}}:
return true;
{% endfor %}
default:
return false;
}
}
{% endfor %}
{% for type in by_category["bitmask"] %}
{% set cType = as_cType(type.name) %}
bool CheckBitmask{{cType}}({{cType}} value) {
return (value & ~{{type.full_mask}}) == 0;
}
{% endfor %}
{% set methodsWithExtraValidation = (
"CommandBufferBuilderGetResult",
"QueueSubmit",
) %}
{% for type in by_category["object"] %}
{% for method in native_methods(type) %}
{% set suffix = as_MethodSuffix(type.name, method.name) %}
//* Entry point without validation, forwards the arguments to the method directly
{{as_backendType(method.return_type)}} NonValidating{{suffix}}(
{{-as_backendType(type)}} self
{%- for arg in method.arguments -%}
, {{as_annotated_backendType(arg)}}
{%- endfor -%}
) {
{% if method.return_type.name.canonical_case() != "void" %}
auto result =
{%- endif %}
self->{{method.name.CamelCase()}}(
{%- for arg in method.arguments -%}
{%- if not loop.first %}, {% endif -%}
{%- if arg.type.category in ["enum", "bitmask"] -%}
static_cast<nxt::{{as_cppType(arg.type.name)}}>({{as_varName(arg.name)}})
{%- else -%}
{{as_varName(arg.name)}}
{%- endif -%}
{%- endfor -%}
);
{% if method.return_type.name.canonical_case() != "void" %}
return reinterpret_cast<{{as_backendType(method.return_type)}}>(result);
{% endif %}
}
//* Autogenerated part of the entry point validation
//* - Check that enum and bitmaks are in the correct range
//* - Check that builders have not been consumed already
//* - Others TODO
bool ValidateBase{{suffix}}(
{{-as_backendType(type)}} self
{%- for arg in method.arguments -%}
, {{as_annotated_backendType(arg)}}
{%- endfor -%}
) {
{% if type.is_builder and method.name.canonical_case() not in ("release", "reference") %}
if (self->WasConsumed()) return false;
{% else %}
(void) self;
{% endif %}
{% for arg in method.arguments %}
{% if arg.type.category == "enum" %}
if (!CheckEnum{{as_cType(arg.type.name)}}({{as_varName(arg.name)}})) return false;
{% elif arg.type.category == "bitmask" %}
if (!CheckBitmask{{as_cType(arg.type.name)}}({{as_varName(arg.name)}})) return false;
{% else %}
(void) {{as_varName(arg.name)}};
{% endif %}
{% endfor %}
return true;
}
//* Entry point with validation
{{as_backendType(method.return_type)}} Validating{{suffix}}(
{{-as_backendType(type)}} self
{%- for arg in method.arguments -%}
, {{as_annotated_backendType(arg)}}
{%- endfor -%}
) {
//* Do the autogenerated checks
bool valid = ValidateBase{{suffix}}(self
{%- for arg in method.arguments -%}
, {{as_varName(arg.name)}}
{%- endfor -%}
);
{% if suffix in methodsWithExtraValidation %}
if (valid) {
valid = self->Validate{{method.name.CamelCase()}}(
{%- for arg in method.arguments -%}
{% if not loop.first %}, {% endif %}{{as_varName(arg.name)}}
{%- endfor -%}
);
}
{% endif %}
//* TODO Do the hand-written checks if necessary
//* On success, forward the arguments to the method, else error out without calling it
if (!valid) {
// TODO get the device and give it the error?
std::cout << "Error in {{suffix}}" << std::endl;
}
{% if method.return_type.name.canonical_case() == "void" %}
if (!valid) return;
{% else %}
if (!valid) {
return {};
}
auto result =
{%- endif %}
self->{{method.name.CamelCase()}}(
{%- for arg in method.arguments -%}
{%- if not loop.first %}, {% endif -%}
{%- if arg.type.category in ["enum", "bitmask"] -%}
static_cast<nxt::{{as_cppType(arg.type.name)}}>({{as_varName(arg.name)}})
{%- else -%}
{{as_varName(arg.name)}}
{%- endif -%}
{%- endfor -%}
);
{% if method.return_type.name.canonical_case() != "void" %}
return reinterpret_cast<{{as_backendType(method.return_type)}}>(result);
{% endif %}
}
{% endfor %}
{% endfor %}
}
nxtProcTable GetNonValidatingProcs() {
nxtProcTable table;
{% for type in by_category["object"] %}
{% for method in native_methods(type) %}
table.{{as_varName(type.name, method.name)}} = reinterpret_cast<{{as_cProc(type.name, method.name)}}>(NonValidating{{as_MethodSuffix(type.name, method.name)}});
{% endfor %}
{% endfor %}
return table;
}
nxtProcTable GetValidatingProcs() {
nxtProcTable table;
{% for type in by_category["object"] %}
{% for method in native_methods(type) %}
table.{{as_varName(type.name, method.name)}} = reinterpret_cast<{{as_cProc(type.name, method.name)}}>(Validating{{as_MethodSuffix(type.name, method.name)}});
{% endfor %}
{% endfor %}
return table;
}
}
}

46
generator/templates/api.c Normal file
View File

@ -0,0 +1,46 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
#include "nxt/nxt.h"
static nxtProcTable procs;
static nxtProcTable nullProcs;
void nxtSetProcs(const nxtProcTable* procs_) {
if (procs_) {
procs = *procs_;
} else {
procs = nullProcs;
}
}
{% for type in by_category["object"] %}
{% for method in native_methods(type) %}
{{as_cType(method.return_type.name)}} {{as_cMethod(type.name, method.name)}}(
{{-as_cType(type.name)}} {{as_varName(type.name)}}
{%- for arg in method.arguments -%}
, {{as_annotated_cType(arg)}}
{%- endfor -%}
) {
{% if method.return_type.name.canonical_case() != "void" %}return {% endif %}
procs.{{as_varName(type.name, method.name)}}({{as_varName(type.name)}}
{%- for arg in method.arguments -%}
, {{as_varName(arg.name)}}
{%- endfor -%}
);
}
{% endfor %}
{% endfor %}

85
generator/templates/api.h Normal file
View File

@ -0,0 +1,85 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
#ifndef NXT_H
#define NXT_H
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
{% for type in by_category["object"] %}
typedef struct {{as_cType(type.name)}}Impl* {{as_cType(type.name)}};
{% endfor %}
{% for type in by_category["enum"] + by_category["bitmask"] %}
typedef enum {
{% for value in type.values %}
{{as_cEnum(type.name, value.name)}} = 0x{{format(value.value, "08X")}},
{% endfor %}
{{as_cEnum(type.name, Name("force32"))}} = 0x7FFFFFFF
} {{as_cType(type.name)}};
{% endfor %}
#ifdef __cplusplus
extern "C" {
#endif
{% for type in by_category["object"] %}
// Procs of {{type.name.CamelCase()}}
{% for method in native_methods(type) %}
typedef {{as_cType(method.return_type.name)}} (*{{as_cProc(type.name, method.name)}})(
{{-as_cType(type.name)}} {{as_varName(type.name)}}
{%- for arg in method.arguments -%}
, {{as_annotated_cType(arg)}}
{%- endfor -%}
);
{% endfor %}
{% endfor %}
struct nxtProcTable_s {
{% for type in by_category["object"] %}
{% for method in native_methods(type) %}
{{as_cProc(type.name, method.name)}} {{as_varName(type.name, method.name)}};
{% endfor %}
{% endfor %}
};
typedef struct nxtProcTable_s nxtProcTable;
// Stuff below is for convenience and will forward calls to a static nxtProcTable.
// Set which nxtProcTable will be used
void nxtSetProcs(const nxtProcTable* procs);
{% for type in by_category["object"] %}
// Methods of {{type.name.CamelCase()}}
{% for method in native_methods(type) %}
{{as_cType(method.return_type.name)}} {{as_cMethod(type.name, method.name)}}(
{{-as_cType(type.name)}} {{as_varName(type.name)}}
{%- for arg in method.arguments -%}
, {{as_annotated_cType(arg)}}
{%- endfor -%}
);
{% endfor %}
{% endfor %}
#ifdef __cplusplus
} // extern "C"
#endif
#endif // NXT_H

View File

@ -0,0 +1,109 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
#include "nxtcpp.h"
namespace nxt {
{% for type in by_category["enum"] + by_category["bitmask"] %}
{% set CppType = as_cppType(type.name) %}
{% set CType = as_cType(type.name) %}
static_assert(sizeof({{CppType}}) == sizeof({{CType}}), "sizeof mismatch for {{CppType}}");
static_assert(alignof({{CppType}}) == alignof({{CType}}), "alignof mismatch for {{CppType}}");
{% for value in type.values %}
static_assert(static_cast<uint32_t>({{CppType}}::{{as_cppEnum(value.name)}}) == {{as_cEnum(type.name, value.name)}}, "value mismatch for {{CppType}}::{{as_cppEnum(value.name)}}");
{% endfor %}
{% endfor %}
{% for type in by_category["object"] %}
{% set CppType = as_cppType(type.name) %}
{% set CType = as_cType(type.name) %}
static_assert(sizeof({{CppType}}) == sizeof({{CType}}), "sizeof mismatch for {{CppType}}");
static_assert(alignof({{CppType}}) == alignof({{CType}}), "alignof mismatch for {{CppType}}");
{% macro render_cpp_method_declaration(type, method) %}
{% set CppType = as_cppType(type.name) %}
{% if method.return_type.name.concatcase() == "void" -%}
{{CppType}} const&
{%- else -%}
{{as_cppType(method.return_type.name)}}
{%- endif -%}
{{" "}}{{CppType}}::{{method.name.CamelCase()}}(
{%- for arg in method.arguments -%}
{%- if not loop.first %}, {% endif -%}
{%- if arg.type.category == "object" and arg.annotation == "value" -%}
{{as_cppType(arg.type.name)}} const& {{as_varName(arg.name)}}
{%- else -%}
{{as_annotated_cppType(arg)}}
{%- endif -%}
{%- endfor -%}
) const
{%- endmacro %}
{% macro render_cpp_to_c_method_call(type, method) -%}
{{as_cMethod(type.name, method.name)}}(Get()
{%- for arg in method.arguments -%},{{" "}}
{%- if arg.annotation == "value" -%}
{%- if arg.type.category == "object" -%}
{{as_varName(arg.name)}}.Get()
{%- elif arg.type.category == "enum" or arg.type.category == "bitmask" -%}
static_cast<{{as_cType(arg.type.name)}}>({{as_varName(arg.name)}})
{%- elif arg.type.category == "native" -%}
{{as_varName(arg.name)}}
{%- else -%}
UNHANDLED
{%- endif -%}
{%- else -%}
reinterpret_cast<{{decorate("", as_cType(arg.type.name), arg)}}>({{as_varName(arg.name)}})
{%- endif -%}
{%- endfor -%}
)
{%- endmacro %}
{% for method in type.methods %}
{{render_cpp_method_declaration(type, method)}} {
{% if method.return_type.name.concatcase() == "void" %}
{{render_cpp_to_c_method_call(type, method)}};
return *this;
{% else %}
auto result = {{render_cpp_to_c_method_call(type, method)}};
{% if method.return_type.category == "native" %}
return result;
{% elif method.return_type.category == "object" %}
return {{as_cppType(method.return_type.name)}}::Acquire(result);
{% else %}
return static_cast<{{as_cppType(method.return_type.name)}}>(result);
{% endif%}
{% endif %}
}
{% endfor %}
void {{CppType}}::NxtReference({{CType}} handle) {
if (handle != nullptr) {
{{as_cMethod(type.name, Name("reference"))}}(handle);
}
}
void {{CppType}}::NxtRelease({{CType}} handle) {
if (handle != nullptr) {
{{as_cMethod(type.name, Name("release"))}}(handle);
}
}
{% endfor %}
}

View File

@ -0,0 +1,149 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
#ifndef NXTCPP_H
#define NXTCPP_H
#include "nxt.h"
#include "nxt/EnumClassBitmasks.h"
namespace nxt {
{% for type in by_category["enum"] %}
enum class {{as_cppType(type.name)}} : uint32_t {
{% for value in type.values %}
{{as_cppEnum(value.name)}} = 0x{{format(value.value, "08X")}},
{% endfor %}
};
{% endfor %}
{% for type in by_category["bitmask"] %}
enum class {{as_cppType(type.name)}} : uint32_t {
{% for value in type.values %}
{{as_cppEnum(value.name)}} = 0x{{format(value.value, "08X")}},
{% endfor %}
};
{% endfor %}
{% for type in by_category["bitmask"] %}
template<>
struct IsNXTBitmask<{{as_cppType(type.name)}}> {
static constexpr bool enable = true;
};
{% endfor %}
{% for type in by_category["object"] %}
class {{as_cppType(type.name)}};
{% endfor %}
template<typename Derived, typename CType>
class ObjectBase {
public:
ObjectBase(): handle(nullptr) {
}
ObjectBase(CType handle): handle(handle) {
if (handle) Derived::NxtReference(handle);
}
~ObjectBase() {
if (handle) Derived::NxtRelease(handle);
}
ObjectBase(ObjectBase const& other) = delete;
Derived& operator=(ObjectBase const& other) = delete;
ObjectBase(ObjectBase&& other) {
Derived::NxtRelease(handle);
handle = other.handle;
other.handle = 0;
}
Derived& operator=(ObjectBase&& other) {
if (&other == this) return static_cast<Derived&>(*this);
Derived::NxtRelease(handle);
handle = other.handle;
other.handle = 0;
return static_cast<Derived&>(*this);
}
explicit operator bool() const {
return handle != nullptr;
}
CType Get() const {
return handle;
}
CType Release() {
CType result = handle;
handle = 0;
return result;
}
Derived Clone() const {
return Derived(handle);
}
static Derived Acquire(CType handle) {
Derived result;
result.handle = handle;
return result;
}
protected:
CType handle;
};
{% macro render_cpp_method_declaration(type, method) %}
{% set CppType = as_cppType(type.name) %}
{% if method.return_type.name.concatcase() == "void" -%}
{{CppType}} const&
{%- else -%}
{{as_cppType(method.return_type.name)}}
{%- endif -%}
{{" "}}{{method.name.CamelCase()}}(
{%- for arg in method.arguments -%}
{%- if not loop.first %}, {% endif -%}
{%- if arg.type.category == "object" and arg.annotation == "value" -%}
{{as_cppType(arg.type.name)}} const& {{as_varName(arg.name)}}
{%- else -%}
{{as_annotated_cppType(arg)}}
{%- endif -%}
{%- endfor -%}
) const
{%- endmacro %}
{% for type in by_category["object"] %}
{% set CppType = as_cppType(type.name) %}
{% set CType = as_cType(type.name) %}
class {{CppType}} : public ObjectBase<{{CppType}}, {{CType}}> {
public:
using ObjectBase::ObjectBase;
using ObjectBase::operator=;
{% for method in type.methods %}
{{render_cpp_method_declaration(type, method)}};
{% endfor %}
private:
friend ObjectBase<{{CppType}}, {{CType}}>;
static void NxtReference({{CType}} handle);
static void NxtRelease({{CType}} handle);
};
{% endfor %}
} // namespace nxt
#endif // NXTCPP_H

View File

@ -0,0 +1,33 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
#ifndef NXTForward_H
#define NXTForward_H
namespace blink {
{% for other_type in by_category["object"] %}
class NXT{{other_type.name.CamelCase()}};
{% endfor %}
}
struct nxtProcTable_s;
typedef struct nxtProcTable_s nxtProcTable;
{% for type in by_category["object"] %}
typedef struct {{as_cType(type.name)}}Impl* {{as_cType(type.name)}};
{% endfor %}
#endif //NXTForward_H

View File

@ -0,0 +1,99 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
{% macro blinkType(type) -%}
{%- if type.category == "object" -%}
NXT{{type.name.CamelCase()}}*
{%- elif type.category == "enum" or type.category == "bitmask" -%}
uint32_t
{%- else -%}
{{as_cType(type.name)}}
{%- endif -%}
{%- endmacro %}
{% set Class = "NXT" + type.name.CamelCase() %}
#ifndef {{Class}}_H
#define {{Class}}_H
#include "bindings/core/v8/ScriptWrappable.h"
#include "platform/heap/GarbageCollected.h"
#include "wtf/text/WTFString.h"
#include "../NXTState.h"
#include "Forward.h"
namespace blink {
class {{Class}} final :
public GarbageCollectedFinalized<{{Class}}>,
public ScriptWrappable {
WTF_MAKE_NONCOPYABLE({{Class}});
USING_PRE_FINALIZER({{Class}}, dispose);
public:
DEFINE_INLINE_TRACE() {
visitor->trace(m_state);
};
public:
DEFINE_WRAPPERTYPEINFO();
public:
{{Class}}({{as_cType(type.name)}} self, Member<NXTState> state);
void dispose();
{% for method in type.methods %}
{% if method.return_type.name.concatcase() == "void" %}
{{Class}}*
{%- else %}
{{blinkType(method.return_type)}}
{%- endif -%}
{{" "}}{{method.name.camelCase()}}(
{%- for arg in method.arguments -%}
{%- if not loop.first %}, {% endif -%}
{%- if arg.annotation == "value" -%}
{{blinkType(arg.type)}} {{as_varName(arg.name)}}
{%- elif arg.annotation == "const*" and arg.length == "strlen" -%}
String {{as_varName(arg.name)}}
{%- else -%}
{%- if arg.type.category == "object" -%}
const HeapVector<Member<NXT{{(arg.type.name.CamelCase())}}>>& {{as_varName(arg.name)}}
{%- else -%}
const Vector<{{blinkType(arg.type)}}>& {{as_varName(arg.name)}}
{%- endif -%}
{%- endif -%}
{%- endfor -%}
);
{% endfor %}
{{as_cType(type.name)}} GetNXT();
{% if type.name.canonical_case() == "device" %}
{% for type in by_category["enum"] + by_category["bitmask"] %}
{% for value in type.values %}
static constexpr uint32_t k{{type.name.CamelCase()}}{{value.name.CamelCase()}} = 0x{{format(value.value, "08X")}};
{% endfor %}
{% endfor %}
{% endif %}
private:
{{as_cType(type.name)}} m_self = nullptr;
Member<NXTState> m_state;
};
}
#endif // {{Class}}_H

View File

@ -0,0 +1,55 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
typedef unsigned long uint32_t;
{% macro idlType(type) -%}
{%- if type.category == "object" -%}
NXT{{type.name.CamelCase()}}
{%- elif type.category == "enum" or type.category == "bitmask" -%}
uint32_t
{%- else -%}
{{as_cType(type.name)}}
{%- endif -%}
{%- endmacro %}
interface {{idlType(type)}} {
{% if type.name.canonical_case() == "device" %}
{% for type in by_category["enum"] + by_category["bitmask"] %}
{% for value in type.values %}
const uint32_t {{type.name.SNAKE_CASE()}}_{{value.name.SNAKE_CASE()}} = 0x{{format(value.value, "08X")}};
{% endfor %}
{% endfor %}
{% endif %}
{% for method in type.methods %}
{% if method.return_type.name.concatcase() == "void" %}
{{idlType(type)}}
{%- else %}
{{idlType(method.return_type)}}
{%- endif -%}
{{" "}}{{method.name.camelCase()}}(
{%- for arg in method.arguments -%}
{%- if not loop.first %}, {% endif -%}
{%- if arg.annotation == "value" -%}
{{idlType(arg.type)}} {{as_varName(arg.name)}}
{%- elif arg.annotation == "const*" and arg.length == "strlen" -%}
DOMString {{as_varName(arg.name)}}
{%- else -%}
{{idlType(arg.type)}}[] {{as_varName(arg.name)}}
{%- endif -%}
{%- endfor -%}
);
{% endfor %}
};

View File

@ -0,0 +1,117 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
{% macro blinkType(type) -%}
{%- if type.category == "object" -%}
NXT{{type.name.CamelCase()}}*
{%- elif type.category == "enum" or type.category == "bitmask" -%}
uint32_t
{%- else -%}
{{as_cType(type.name)}}
{%- endif -%}
{%- endmacro %}
{% for other_type in by_category["object"] %}
#include "NXT{{other_type.name.CamelCase()}}.h"
{% endfor %}
#include "nxt/nxt.h"
#include "wtf/text/StringUTF8Adaptor.h"
namespace blink {
{% for type in by_category["object"] %}
{% set Class = "NXT" + type.name.CamelCase() %}
{{Class}}::{{Class}}({{as_cType(type.name)}} self, Member<NXTState> state)
: m_self(self), m_state(state) {
}
void {{Class}}::dispose() {
{% if type.name.canonical_case() != "device" %}
m_state->getProcTable()->{{as_varName(type.name, Name("release"))}}(m_self);
{% endif %}
}
{% for method in type.methods %}
{% if method.return_type.name.concatcase() == "void" %}
{{Class}}*
{%- else %}
{{blinkType(method.return_type)}}
{%- endif -%}
{{" "}}{{Class}}::{{method.name.camelCase()}}(
{%- for arg in method.arguments -%}
{%- if not loop.first %}, {% endif -%}
{%- if arg.annotation == "value" -%}
{{blinkType(arg.type)}} {{as_varName(arg.name)}}_
{%- elif arg.annotation == "const*" and arg.length == "strlen" -%}
String {{as_varName(arg.name)}}_
{%- else -%}
{%- if arg.type.category == "object" -%}
const HeapVector<Member<NXT{{(arg.type.name.CamelCase())}}>>& {{as_varName(arg.name)}}_
{%- else -%}
const Vector<{{blinkType(arg.type)}}>& {{as_varName(arg.name)}}_
{%- endif -%}
{%- endif -%}
{%- endfor -%}
) {
{% for arg in method.arguments %}
{% set argName = as_varName(arg.name) %}
{% set cType = as_cType(arg.type.name) %}
{% if arg.annotation == "value" %}
{% if arg.type.category == "object" %}
{{cType}} {{argName}} = {{argName}}_->GetNXT();
{% else %}
{{cType}} {{argName}} = static_cast<{{cType}}>({{argName}}_);
{% endif %}
{% elif arg.annotation == "const*" %}
{% if arg.length == "strlen" %}
WTF::StringUTF8Adaptor {{argName}}Adaptor({{argName}}_);
std::string {{argName}}String({{argName}}Adaptor.data(), {{argName}}Adaptor.length());
const char* {{argName}} = {{argName}}String.c_str();
{% elif arg.type.category == "object" %}
//* TODO error on bad length
auto {{argName}}Array = std::unique_ptr<{{cType}}[]>(new {{cType}}[{{argName}}_.size()]);
for (size_t i = 0; i < {{argName}}_.size(); i++) {
{{argName}}Array[i] = {{argName}}_[i]->GetNXT();
}
const {{cType}}* {{argName}} = &{{argName}}Array[0];
{% else %}
//* TODO error on bad length
const {{cType}}* {{argName}} = {{argName}}_.data();
{% endif %}
{% endif %}
{% endfor %}
{% if method.return_type.name.concatcase() != "void" %}
auto result =
{%- endif %}
m_state->getProcTable()->{{as_varName(type.name, method.name)}}(m_self
{%- for arg in method.arguments -%}
, {{as_varName(arg.name)}}
{%- endfor -%}
);
{% if method.return_type.name.concatcase() == "void" %}
return this;
{% else %}
// TODO actually return the object given by the call to the procs
return new NXT{{method.return_type.name.CamelCase()}}(result, m_state);
{% endif %}
}
{% endfor %}
{{as_cType(type.name)}} {{Class}}::GetNXT() {
return m_self;
}
{% endfor %}
}

View File

@ -0,0 +1,53 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
typedef unsigned long uint32_t;
{% macro idlType(type) -%}
{%- if type.category == "object" -%}
NXT{{type.name.CamelCase()}}
{%- else -%}
{{as_cType(type.name)}}
{%- endif -%}
{%- endmacro %}
interface {{as_idlType(type.name)}} {
{% if type.name.canonical_case() == "device" %}
{% for type in by_category["enum"] + by_category["bitmask"] %}
{% for value in type.values %}
const uint32_t {{type.name.SNAKE_CASE()}}_{{value.name.SNAKE_CASE()}} = 0x{{format(value.value, "08X")}};
{% endfor %}
{% endfor %}
{% endif %}
{% for method in type.methods %}
{% if method.return_type.name.concatcase() == "void" %}
{{as_idlType(type.name)}}
{%- else %}
{{as_idlType(method.return_type.name)}}
{%- endif -%}
{{" "}}{{method.name.camelCase()}}(
{%- for arg in method.arguments -%}
{%- if not loop.first %}, {% endif -%}
{%- if arg.annotation == "value" -%}
{{as_idlType(arg.type.name)}} {{as_varName(arg.name)}}
{%- elif arg.annotation == "const*" and arg.length == "strlen" -%}
String {{as_varName(arg.name)}}
{%- else -%}
{{as_idlType(arg.type.name)}}[] {{as_varName(arg.name)}}
{%- endif -%}
{%- endfor -%}
);
{% endfor %}
};

View File

@ -0,0 +1,27 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
autogen_nxt_sources = [
"gen/NXT.cpp",
"gen/Forward.h",
{% for type in by_category["object"] %}
"gen/NXT{{type.name.CamelCase()}}.h",
{% endfor %}
]
autogen_nxt_idl = [
{% for type in by_category["object"] %}
"nxt/gen/NXT{{type.name.CamelCase()}}.idl",
{% endfor %}
]

View File

@ -0,0 +1,60 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
#include "mock_nxt.h"
namespace {
{% for type in by_category["object"] %}
{% for method in native_methods(type) %}
{{as_cType(method.return_type.name)}} Forward{{as_MethodSuffix(type.name, method.name)}}(
{{-as_cType(type.name)}} self
{%- for arg in method.arguments -%}
, {{as_annotated_cType(arg)}}
{%- endfor -%}
) {
auto tablePtr = reinterpret_cast<ProcTableAsClass**>(self);
return (*tablePtr)->{{as_MethodSuffix(type.name, method.name)}}(self
{%- for arg in method.arguments -%}
, {{as_varName(arg.name)}}
{%- endfor -%}
);
}
{% endfor %}
{% endfor %}
}
ProcTableAsClass::~ProcTableAsClass() {
for (auto ptr : selfPtrs) {
delete ptr;
}
}
void ProcTableAsClass::GetProcTableAndDevice(nxtProcTable* table, nxtDevice* device) {
*device = GetNewDevice();
{% for type in by_category["object"] %}
{% for method in native_methods(type) %}
table->{{as_varName(type.name, method.name)}} = reinterpret_cast<{{as_cProc(type.name, method.name)}}>(Forward{{as_MethodSuffix(type.name, method.name)}});
{% endfor %}
{% endfor %}
}
{% for type in by_category["object"] %}
{{as_cType(type.name)}} ProcTableAsClass::GetNew{{type.name.CamelCase()}}() {
auto self = new ProcTableAsClass*(this);
selfPtrs.push_back(self);
return reinterpret_cast<{{as_cType(type.name)}}>(self);
}
{% endfor %}

View File

@ -0,0 +1,64 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
#ifndef MOCK_NXT_H
#define MOCK_NXT_H
#include <gmock/gmock.h>
#include <nxt/nxt.h>
class ProcTableAsClass {
public:
virtual ~ProcTableAsClass();
void GetProcTableAndDevice(nxtProcTable* table, nxtDevice* device);
{% for type in by_category["object"] %}
{{as_cType(type.name)}} GetNew{{type.name.CamelCase()}}();
{% endfor %}
{% for type in by_category["object"] %}
{% for method in native_methods(type) %}
virtual {{as_cType(method.return_type.name)}} {{as_MethodSuffix(type.name, method.name)}}(
{{-as_cType(type.name)}} {{as_varName(type.name)}}
{%- for arg in method.arguments -%}
, {{as_annotated_cType(arg)}}
{%- endfor -%}
) = 0;
{% endfor %}
{% endfor %}
private:
std::vector<ProcTableAsClass**> selfPtrs;
};
class MockProcTable : public ProcTableAsClass {
public:
{% for type in by_category["object"] %}
{% for method in native_methods(type) %}
MOCK_METHOD{{len(method.arguments) + 1}}(
{{-as_MethodSuffix(type.name, method.name)}},
{{as_cType(method.return_type.name)}}(
{{-as_cType(type.name)}} {{as_varName(type.name)}}
{%- for arg in method.arguments -%}
, {{as_annotated_cType(arg)}}
{%- endfor -%}
));
{% endfor %}
{% endfor %}
};
#endif // MOCK_NXT_H

View File

@ -0,0 +1,221 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
#include "Wire.h"
#include "WireCmd.h"
#include <cstring>
#include <vector>
namespace nxt {
namespace wire {
//* Client side implementation of the API, will serialize everything to memory to send to the server side.
namespace client {
class Device;
//* All non-Device objects of the client side have:
//* - A pointer to the device to get where to serialize commands
//* - The external reference count
//* - An ID that is used to refer to this object when talking with the server side
struct ObjectBase {
ObjectBase(Device* device, uint32_t refcount, uint32_t id)
:device(device), refcount(refcount), id(id) {
}
Device* device;
uint32_t refcount;
uint32_t id;
};
{% for type in by_category["object"] if not type.name.canonical_case() == "device" %}
struct {{type.name.CamelCase()}} : ObjectBase {
using ObjectBase::ObjectBase;
};
{% endfor %}
//* TODO: Remember objects so they can all be destroyed at device destruction.
template<typename T>
class ObjectAllocator {
public:
ObjectAllocator(Device* device) : device(device) {
}
T* New() {
return new T(device, 1, GetNewId());
}
void Free(T* obj) {
FreeId(obj->id);
delete obj;
}
private:
uint32_t GetNewId() {
if (freeIds.empty()) {
return currentId ++;
}
uint32_t id = freeIds.back();
freeIds.pop_back();
return id;
}
void FreeId(uint32_t id) {
freeIds.push_back(id);
};
// 0 is an ID reserved to represent nullptr
uint32_t currentId = 1;
std::vector<uint32_t> freeIds;
Device* device;
};
//* The client wire uses the global NXT device to store its global data such as the serializer
//* and the object id allocators.
class Device : public ObjectBase {
public:
Device(CommandSerializer* serializer)
: ObjectBase(this, 1, 1),
{% for type in by_category["object"] if not type.name.canonical_case() == "device" %}
{{type.name.camelCase()}}(this),
{% endfor %}
serializer(serializer) {
}
void* GetCmdSpace(size_t size) {
return serializer->GetCmdSpace(size);
}
{% for type in by_category["object"] if not type.name.canonical_case() == "device" %}
ObjectAllocator<{{type.name.CamelCase()}}> {{type.name.camelCase()}};
{% endfor %}
private:
CommandSerializer* serializer = nullptr;
};
//* Implementation of the client API functions.
{% for type in by_category["object"] %}
{% set Type = type.name.CamelCase() %}
{% for method in type.methods %}
{% set Suffix = as_MethodSuffix(type.name, method.name) %}
{{as_backendType(method.return_type)}} Client{{Suffix}}(
{{-as_backendType(type)}} self
{%- for arg in method.arguments -%}
, {{as_annotated_backendType(arg)}}
{%- endfor -%}
) {
Device* device = self->device;
wire::{{Suffix}}Cmd cmd;
//* Create the structure going on the wire on the stack and fill it with the value
//* arguments so it can compute its size.
{
//* Value objects are stored as IDs
{% for arg in method.arguments if arg.annotation == "value" %}
{% if arg.type.category == "object" %}
cmd.{{as_varName(arg.name)}} = {{as_varName(arg.name)}}->id;
{% else %}
cmd.{{as_varName(arg.name)}} = {{as_varName(arg.name)}};
{% endif %}
{% endfor %}
cmd.self = self->id;
//* The length of const char* is considered a value argument.
{% for arg in method.arguments if arg.length == "strlen" %}
cmd.{{as_varName(arg.name)}}Strlen = strlen({{as_varName(arg.name)}});
{% endfor %}
}
//* Allocate space to send the command and copy the value args over.
size_t requiredSize = cmd.GetRequiredSize();
auto allocCmd = reinterpret_cast<decltype(cmd)*>(device->GetCmdSpace(requiredSize));
*allocCmd = cmd;
//* In the allocated space, write the non-value arguments.
{% for arg in method.arguments if arg.annotation != "value" %}
{% set argName = as_varName(arg.name) %}
{% if arg.length == "strlen" %}
memcpy(allocCmd->GetPtr_{{argName}}(), {{argName}}, allocCmd->{{argName}}Strlen + 1);
{% elif arg.type.category == "object" %}
auto {{argName}}Storage = reinterpret_cast<uint32_t*>(allocCmd->GetPtr_{{argName}}());
for (size_t i = 0; i < {{as_varName(arg.length.name)}}; i++) {
{{argName}}Storage[i] = {{argName}}[i]->id;
}
{% else %}
memcpy(allocCmd->GetPtr_{{argName}}(), {{argName}}, {{as_varName(arg.length.name)}} * sizeof(*{{argName}}));
{% endif %}
{% endfor %}
//* For object creation, store the object ID the client will use for the result.
{% if method.return_type.category == "object" %}
auto result = self->device->{{method.return_type.name.camelCase()}}.New();
allocCmd->resultId = result->id;
return result;
{% endif %}
}
{% endfor %}
{% if not type.name.canonical_case() == "device" %}
//* When an object's refcount reaches 0, notify the server side of it and delete it.
void Client{{as_MethodSuffix(type.name, Name("release"))}}({{Type}}* obj) {
obj->refcount --;
if (obj->refcount > 0) {
return;
}
wire::{{as_MethodSuffix(type.name, Name("destroy"))}}Cmd cmd;
cmd.objectId = obj->id;
size_t requiredSize = cmd.GetRequiredSize();
auto allocCmd = reinterpret_cast<decltype(cmd)*>(obj->device->GetCmdSpace(requiredSize));
*allocCmd = cmd;
obj->device->{{type.name.camelCase()}}.Free(obj);
}
void Client{{as_MethodSuffix(type.name, Name("reference"))}}({{Type}}* obj) {
obj->refcount ++;
}
{% endif %}
{% endfor %}
void ClientDeviceReference(Device* self) {
}
void ClientDeviceRelease(Device* self) {
}
nxtProcTable GetProcs() {
nxtProcTable table;
{% for type in by_category["object"] %}
{% for method in native_methods(type) %}
table.{{as_varName(type.name, method.name)}} = reinterpret_cast<{{as_cProc(type.name, method.name)}}>(Client{{as_MethodSuffix(type.name, method.name)}});
{% endfor %}
{% endfor %}
return table;
}
}
void NewClientDevice(nxtProcTable* procs, nxtDevice* device, CommandSerializer* serializer) {
*device = reinterpret_cast<nxtDeviceImpl*>(new client::Device(serializer));
*procs = client::GetProcs();
}
}
}

View File

@ -0,0 +1,75 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
#include "wire/WireCmd_autogen.h"
namespace nxt {
namespace wire {
{% for type in by_category["object"] %}
{% for method in type.methods %}
{% set Suffix = as_MethodSuffix(type.name, method.name) %}
size_t {{Suffix}}Cmd::GetRequiredSize() const {
size_t result = sizeof(*this);
{% for arg in method.arguments if arg.annotation != "value" %}
{% if arg.length == "strlen" %}
result += {{as_varName(arg.name)}}Strlen + 1;
{% elif arg.type.category == "object" %}
result += {{as_varName(arg.length.name)}} * sizeof(uint32_t);
{% else %}
result += {{as_varName(arg.length.name)}} * sizeof({{as_cType(arg.type.name)}});
{% endif %}
{% endfor %}
return result;
}
{% for const in ["", "const"] %}
{% for get_arg in method.arguments if get_arg.annotation != "value" %}
{{const}} uint8_t* {{Suffix}}Cmd::GetPtr_{{as_varName(get_arg.name)}}() {{const}} {
//* Start counting after the current structure
{{const}} uint8_t* ptr = reinterpret_cast<{{const}} uint8_t*>(this + 1);
//* Increment the pointer until we find the 'arg' then return early.
//* This will mean some of the code will be unreachable but there is no
//* "break" in Jinja2.
{% for arg in method.arguments if arg.annotation != "value" %}
{% if get_arg == arg %}
return ptr;
{% endif %}
{% if arg.length == "strlen" %}
ptr += {{as_varName(arg.name)}}Strlen + 1;
{% elif arg.type.category == "object" %}
ptr += {{as_varName(arg.length.name)}} * sizeof(uint32_t);
{% else %}
ptr += {{as_varName(arg.length.name)}} * sizeof({{as_cType(arg.type.name)}});
{% endif %}
{% endfor %}
}
{% endfor %}
{% endfor %}
{% endfor %}
{% set Suffix = as_MethodSuffix(type.name, Name("destroy")) %}
size_t {{Suffix}}Cmd::GetRequiredSize() const {
return sizeof(*this);
}
{% endfor %}
}
}

View File

@ -0,0 +1,96 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
#ifndef WIRE_WIRECMD_AUTOGEN_H_
#define WIRE_WIRECMD_AUTOGEN_H_
#include <nxt/nxt.h>
namespace nxt {
namespace wire {
//* Enum used as a prefix to each command on the wire format.
enum class WireCmd : uint32_t {
{% for type in by_category["object"] %}
{% for method in type.methods %}
{{as_MethodSuffix(type.name, method.name)}},
{% endfor %}
{{as_MethodSuffix(type.name, Name("destroy"))}},
{% endfor %}
};
{% for type in by_category["object"] %}
{% for method in type.methods %}
{% set Suffix = as_MethodSuffix(type.name, method.name) %}
//* Structure for the wire format of each of the commands. Parameters passed by value
//* are embedded directly in the structure. Other parameters are assumed to be in the
//* memory directly following the structure in the buffer. With value parameters the
//* structure can compute how much buffer size it needs and where the start of non-value
//* parameters is in the buffer.
struct {{Suffix}}Cmd {
//* Start the structure with the command ID, so that casting to WireCmd gives the ID.
wire::WireCmd commandId = wire::WireCmd::{{Suffix}};
uint32_t self;
//* Commands creating objects say which ID the created object will be referred as.
{% if method.return_type.category == "object" %}
uint32_t resultId;
{% endif %}
//* Value types are directly in the command, objects being replaced with their IDs.
{% for arg in method.arguments if arg.annotation == "value" %}
{% if arg.type.category == "object" %}
uint32_t {{as_varName(arg.name)}};
{% else %}
{{as_cType(arg.type.name)}} {{as_varName(arg.name)}};
{% endif %}
{% endfor %}
//* const char* have their length embedded directly in the command.
{% for arg in method.arguments if arg.length == "strlen" %}
size_t {{as_varName(arg.name)}}Strlen;
{% endfor %}
//* The following commands do computation, provided the members for value parameters
//* have been initialized.
//* Compute how much buffer memory is required to hold the structure and all its arguments.
size_t GetRequiredSize() const;
//* Gets the pointer to the start of the buffer containing a non-value parameter.
{% for get_arg in method.arguments if get_arg.annotation != "value" %}
{% set ArgName = as_varName(get_arg.name) %}
uint8_t* GetPtr_{{ArgName}}();
const uint8_t* GetPtr_{{ArgName}}() const;
{% endfor %}
};
{% endfor %}
//* The command structure used when sending that an ID is destroyed.
{% set Suffix = as_MethodSuffix(type.name, Name("destroy")) %}
struct {{Suffix}}Cmd {
wire::WireCmd commandId = wire::WireCmd::{{Suffix}};
uint32_t objectId;
size_t GetRequiredSize() const;
};
{% endfor %}
}
}
#endif // WIRE_WIRECMD_AUTOGEN_H_

View File

@ -0,0 +1,343 @@
//* Copyright 2017 The NXT Authors
//*
//* Licensed under the Apache License, Version 2.0 (the "License");
//* you may not use this file except in compliance with the License.
//* You may obtain a copy of the License at
//*
//* http://www.apache.org/licenses/LICENSE-2.0
//*
//* Unless required by applicable law or agreed to in writing, software
//* distributed under the License is distributed on an "AS IS" BASIS,
//* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//* See the License for the specific language governing permissions and
//* limitations under the License.
#include "Wire.h"
#include "WireCmd.h"
#include <cassert>
#include <vector>
namespace nxt {
namespace wire {
namespace server {
//* Stores what the backend knows about the type.
template<typename T>
struct ObjectDataBase {
//* The backend-provided handle to this object.
T handle;
//* Used by the error-propagation mechanism to know if this object is an error.
//* TODO(cwallez@chromium.org): this is doubling the memory usae of
//* std::vector<ObjectDataBase> consider making it a special marker value in handle instead.
bool valid;
//* Whether this object has been allocated, used by the KnownObjects queries
//* TODO(cwallez@chromium.org): make this an internal bit vector in KnownObjects.
bool allocated;
};
//* Keeps track of the mapping between client IDs and backend objects.
template<typename T>
class KnownObjects {
public:
using Data = ObjectDataBase<T>;
KnownObjects() {
//* Pre-allocate ID 0 to refer to the null handle.
Data nullObject;
nullObject.handle = nullptr;
nullObject.valid = true;
nullObject.allocated = true;
known.push_back(nullObject);
}
//* Get a backend objects for a given client ID.
//* Returns nullptr if the ID hasn't previously been allocated.
Data* Get(uint32_t id) {
if (id >= known.size()) {
return nullptr;
}
Data* data = &known[id];
if (!data->allocated) {
return nullptr;
}
return data;
}
//* Allocates the data for a given ID and returns it.
//* Returns nullptr if the ID is already allocated, or too far ahead.
//* Invalidates all the Data*
Data* Allocate(uint32_t id) {
if (id > known.size()) {
return nullptr;
}
Data data;
data.allocated = true;
data.valid = false;
data.handle = nullptr;
if (id >= known.size()) {
known.push_back(data);
return &known.back();
}
if (known[id].allocated) {
return nullptr;
}
known[id] = data;
return &known[id];
}
//* Marks an ID as deallocated
void Free(uint32_t id) {
assert(id < known.size());
known[id].allocated = false;
}
private:
std::vector<Data> known;
};
class Server : public CommandHandler {
public:
Server(nxtDevice device, const nxtProcTable& procs) : procs(procs) {
//* The client-server knowledge is bootstrapped with device 1.
auto* deviceData = knownDevice.Allocate(1);
deviceData->handle = device;
deviceData->valid = true;
}
const uint8_t* HandleCommands(const uint8_t* commands, size_t size) override {
while (size > sizeof(WireCmd)) {
WireCmd cmdId = *reinterpret_cast<const WireCmd*>(commands);
bool success = false;
switch (cmdId) {
{% for type in by_category["object"] %}
{% for method in type.methods %}
{% set Suffix = as_MethodSuffix(type.name, method.name) %}
case WireCmd::{{Suffix}}:
success = Handle{{Suffix}}(&commands, &size);
break;
{% endfor %}
{% set Suffix = as_MethodSuffix(type.name, Name("destroy")) %}
case WireCmd::{{Suffix}}:
success = Handle{{Suffix}}(&commands, &size);
break;
{% endfor %}
default:
success = false;
}
if (!success) {
return nullptr;
}
}
if (size != 0) {
return nullptr;
}
return commands;
}
void OnSynchronousError() override {
gotError = true;
}
private:
nxtProcTable procs;
bool gotError = false;
//* The list of known IDs for each object type.
{% for type in by_category["object"] %}
KnownObjects<{{as_cType(type.name)}}> known{{type.name.CamelCase()}};
{% endfor %}
//* Helper function for the getting of the command data in command handlers.
//* Checks there is enough data left, updates the buffer / size and returns
//* the command (or nullptr for an error).
template<typename T>
const T* GetCommand(const uint8_t** commands, size_t* size) {
if (*size < sizeof(T)) {
return nullptr;
}
const T* cmd = reinterpret_cast<const T*>(*commands);
size_t cmdSize = cmd->GetRequiredSize();
if (*size < cmdSize) {
return nullptr;
}
*commands += cmdSize;
*size -= cmdSize;
return cmd;
}
//* Implementation of the command handlers
{% for type in by_category["object"] %}
{% for method in type.methods %}
{% set Suffix = as_MethodSuffix(type.name, method.name) %}
//* The generic command handlers
bool Handle{{Suffix}}(const uint8_t** commands, size_t* size) {
//* Get command ptr, and check it fits in the buffer.
const auto* cmd = GetCommand<{{Suffix}}Cmd>(commands, size);
if (cmd == nullptr) {
return false;
}
//* While unpacking arguments, if any of them is an error, valid will be set to false.
bool valid = true;
//* Unpack 'self'
{% set Type = type.name.CamelCase() %}
{{as_cType(type.name)}} self;
auto* selfData = known{{Type}}.Get(cmd->self);
{
if (selfData == nullptr) {
return false;
}
valid = valid && selfData->valid;
self = selfData->handle;
}
//* Unpack value objects from IDs.
{% for arg in method.arguments if arg.annotation == "value" and arg.type.category == "object" %}
{% set Type = arg.type.name.CamelCase() %}
{{as_cType(arg.type.name)}} arg_{{as_varName(arg.name)}};
{
auto* data = known{{Type}}.Get(cmd->{{as_varName(arg.name)}});
if (data == nullptr) {
return false;
}
valid = valid && data->valid;
arg_{{as_varName(arg.name)}} = data->handle;
}
{% endfor %}
//* Unpack pointer arguments
{% for arg in method.arguments if arg.annotation != "value" %}
{% set argName = as_varName(arg.name) %}
const {{as_cType(arg.type.name)}}* arg_{{argName}};
{% if arg.length == "strlen" %}
//* Unpack strings, checking they are null-terminated.
arg_{{argName}} = reinterpret_cast<const {{as_cType(arg.type.name)}}*>(cmd->GetPtr_{{argName}}());
if (arg_{{argName}}[cmd->{{argName}}Strlen] != 0) {
return false;
}
{% elif arg.type.category == "object" %}
//* Unpack arrays of objects.
//* TODO(cwallez@chromium.org) do not allocate when there are few objects.
std::vector<{{as_cType(arg.type.name)}}> {{argName}}Storage(cmd->{{as_varName(arg.length.name)}});
auto {{argName}}Ids = reinterpret_cast<const uint32_t*>(cmd->GetPtr_{{argName}}());
for (size_t i = 0; i < cmd->{{as_varName(arg.length.name)}}; i++) {
{% set Type = arg.type.name.CamelCase() %}
auto* data = known{{Type}}.Get({{argName}}Ids[i]);
if (data == nullptr) {
return false;
}
{{argName}}Storage[i] = data->handle;
valid = valid && data->valid;
}
arg_{{argName}} = {{argName}}Storage.data();
{% else %}
//* For anything else, just get the pointer.
arg_{{argName}} = reinterpret_cast<const {{as_cType(arg.type.name)}}*>(cmd->GetPtr_{{argName}}());
{% endif %}
{% endfor %}
//* At that point all the data has been upacked in cmd->* or arg_*
//* In all cases allocate the object data as it will be refered-to by the client.
{% set returns = method.return_type.name.canonical_case() != "void" %}
{% if returns %}
{% set Type = method.return_type.name.CamelCase() %}
auto* resultData = known{{Type}}.Allocate(cmd->resultId);
if (resultData == nullptr) {
return false;
}
{% endif %}
//* After the data is allocated, apply the argument error propagation mechanism
if (!valid) {
return true;
}
{% if returns -%}
auto result =
{%- endif -%}
procs.{{as_varName(type.name, method.name)}}(self
{%- for arg in method.arguments -%}
{% if arg.annotation == "value" and arg.type.category != "object" %}
, cmd->{{as_varName(arg.name)}}
{% else %}
, arg_{{as_varName(arg.name)}}
{% endif %}
{%- endfor -%}
);
{% if returns %}
resultData->handle = result;
resultData->valid = result != nullptr;
{% endif %}
if (gotError) {
{% if type.is_builder %}
//* Get the data again, has been invalidated by the call to
//* known.Allocate
known{{type.name.CamelCase()}}.Get(cmd->self)->valid = false;
{% endif %}
gotError = false;
}
return true;
}
{% endfor %}
//* Handlers for the destruction of objects: clients do the tracking of the
//* reference / release and only send destroy on refcount = 0.
{% set Suffix = as_MethodSuffix(type.name, Name("destroy")) %}
bool Handle{{Suffix}}(const uint8_t** commands, size_t* size) {
const auto* cmd = GetCommand<{{Suffix}}Cmd>(commands, size);
if (cmd == nullptr) {
return false;
}
//* ID 0 are reserved for nullptr and cannot be destroyed.
if (cmd->objectId == 0) {
return false;
}
auto* data = known{{type.name.CamelCase()}}.Get(cmd->objectId);
if (data == nullptr) {
return false;
}
if (data->valid) {
procs.{{as_varName(type.name, Name("release"))}}(data->handle);
}
known{{type.name.CamelCase()}}.Free(cmd->objectId);
return true;
}
{% endfor %}
};
}
CommandHandler* CreateCommandHandler(nxtDevice device, const nxtProcTable& procs) {
return new server::Server(device, procs);
}
}
}

670
next.json Normal file
View File

@ -0,0 +1,670 @@
{
"_comment": [
"Copyright 2017 The NXT Authors",
"",
"Licensed under the Apache License, Version 2.0 (the \"License\");",
"you may not use this file except in compliance with the License.",
"You may obtain a copy of the License at",
"",
" http://www.apache.org/licenses/LICENSE-2.0",
"",
"Unless required by applicable law or agreed to in writing, software",
"distributed under the License is distributed on an \"AS IS\" BASIS,",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"See the License for the specific language governing permissions and",
"limitations under the License."
],
"bind group": {
"category": "object"
},
"bind group builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "bind group"
},
{
"name": "set layout",
"args": [
{"name": "layout", "type": "bind group layout"}
]
},
{
"name": "set usage",
"args": [
{"name": "usage", "type": "bind group usage"}
]
},
{
"name": "set buffer views",
"args": [
{"name": "start", "type": "uint32_t"},
{"name": "count", "type": "uint32_t"},
{"name": "buffer views", "type": "buffer view", "annotation": "const*", "length": "count"}
]
},
{
"name": "set samplers",
"args": [
{"name": "start", "type": "uint32_t"},
{"name": "count", "type": "uint32_t"},
{"name": "samplers", "type": "sampler", "annotation": "const*", "length": "count"}
]
},
{
"name": "set texture views",
"args": [
{"name": "start", "type": "uint32_t"},
{"name": "count", "type": "uint32_t"},
{"name": "texture views", "type": "texture view", "annotation": "const*", "length": "count"}
]
}
],
"TODO": [
"When resource are added, add methods for setting the content of the bind group"
]
},
"bind group usage": {
"category": "enum",
"values": [
{"value": 0, "name": "frozen"},
{"value": 1, "name": "dynamic"}
]
},
"bind group layout": {
"category": "object"
},
"bind group layout builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "bind group layout"
},
{
"name": "set bindings type",
"args": [
{"name": "visibility", "type": "shader stage bit"},
{"name": "binding type", "type": "binding type"},
{"name": "start", "type": "uint32_t"},
{"name": "count", "type": "uint32_t"}
]
}
]
},
"binding type": {
"category": "enum",
"values": [
{"value": 0, "name": "uniform buffer"},
{"value": 1, "name": "sampler"},
{"value": 2, "name": "sampled texture"},
{"value": 3, "name": "storage buffer"}
]
},
"buffer": {
"category": "object",
"methods": [
{
"name": "create buffer view builder",
"returns": "buffer view builder"
},
{
"name": "set sub data",
"args": [
{"name": "start", "type": "uint32_t"},
{"name": "count", "type": "uint32_t"},
{"name": "data", "type": "uint32_t", "annotation": "const*", "length": "count"}
]
},
{
"name": "transition usage",
"args": [
{"name": "usage", "type": "buffer usage bit"}
]
},
{
"name": "freeze usage",
"args": [
{"name": "usage", "type": "buffer usage bit"}
]
}
]
},
"buffer builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "buffer"
},
{
"name": "set allowed usage",
"args": [
{"name": "usage", "type": "buffer usage bit"}
]
},
{
"name": "set initial usage",
"args": [
{"name": "usage", "type": "buffer usage bit"}
]
},
{
"name": "set size",
"args": [
{"name": "size", "type": "uint32_t"}
]
}
]
},
"buffer usage bit": {
"category": "bitmask",
"values": [
{"value": 0, "name": "none"},
{"value": 1, "name": "mapped"},
{"value": 2, "name": "transfer src"},
{"value": 4, "name": "transfer dst"},
{"value": 8, "name": "index"},
{"value": 16, "name": "vertex"},
{"value": 32, "name": "uniform"},
{"value": 64, "name": "storage"}
]
},
"buffer view": {
"category": "object"
},
"buffer view builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "buffer view"
},
{
"name": "set extent",
"args": [
{"name": "offset", "type": "uint32_t"},
{"name": "size", "type": "uint32_t"}
]
}
]
},
"char": {
"category": "native"
},
"command buffer": {
"category": "object"
},
"command buffer builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "command buffer"
},
{
"name": "copy buffer to texture",
"args": [
{"name": "buffer", "type": "buffer"},
{"name": "texture", "type": "texture"},
{"name": "x", "type": "uint32_t"},
{"name": "y", "type": "uint32_t"},
{"name": "z", "type": "uint32_t"},
{"name": "width", "type": "uint32_t"},
{"name": "height", "type": "uint32_t"},
{"name": "depth", "type": "uint32_t"},
{"name": "level", "type": "uint32_t"}
],
"TODO": [
"Make pretty with Offset and Extents structures",
"Allow choosing the aspect (depth vs. stencil)?",
"The following where removed because gmock supports only 10 arguments",
"this means the buffer is assumed to be packed and starting at 0.",
{"name": "row length", "type": "uint32_t"},
{"name": "image height", "type": "uint32_t"},
{"name": "buffer offset", "type": "uint32_t"}
]
},
{
"name": "dispatch",
"args": [
{"name": "x", "type": "uint32_t"},
{"name": "y", "type": "uint32_t"},
{"name": "z", "type": "uint32_t"}
]
},
{
"name": "draw arrays",
"args": [
{"name": "vertex count", "type": "uint32_t"},
{"name": "instance count", "type": "uint32_t"},
{"name": "first vertex", "type": "uint32_t"},
{"name": "first instance", "type": "uint32_t"}
]
},
{
"name": "draw elements",
"args": [
{"name": "index count", "type": "uint32_t"},
{"name": "instance count", "type": "uint32_t"},
{"name": "first index", "type": "uint32_t"},
{"name": "first instance", "type": "uint32_t"}
]
},
{
"name": "set bind group",
"args": [
{"name": "group index", "type": "uint32_t"},
{"name": "group", "type": "bind group"}
]
},
{
"name": "set index buffer",
"args": [
{"name": "buffer", "type": "buffer"},
{"name": "offset", "type": "uint32_t"},
{"name": "format", "type": "index format"}
]
},
{
"name": "set push constants",
"TODO": [
"data should be void*",
"TODO Vulkan has an additional stage mask"
],
"args": [
{"name": "stage", "type": "shader stage bit"},
{"name": "offset", "type": "uint32_t"},
{"name": "count", "type": "uint32_t"},
{"name": "data", "type": "uint32_t", "annotation": "const*", "length": "count"}
]
},
{
"name": "set pipeline",
"args": [
{"name": "pipeline", "type": "pipeline"}
],
"notes": [
"Not specifying graphics or compute because we know from render pass"
]
},
{
"name": "set vertex buffers",
"args": [
{"name": "start slot", "type": "uint32_t"},
{"name": "count", "type": "uint32_t"},
{"name": "buffers", "type": "buffer", "annotation": "const*", "length": "count"},
{"name": "offsets", "type": "uint32_t", "annotation": "const*", "length": "count"}
]
},
{
"name": "transition buffer usage",
"args": [
{"name": "buffer", "type": "buffer"},
{"name": "usage", "type": "buffer usage bit"}
]
},
{
"name": "transition texture usage",
"args": [
{"name": "texture", "type": "texture"},
{"name": "usage", "type": "texture usage bit"}
]
}
]
},
"device": {
"category": "object",
"methods": [
{
"name": "create bind group builder",
"returns": "bind group builder"
},
{
"name": "create bind group layout builder",
"returns": "bind group layout builder"
},
{
"name": "create buffer builder",
"returns": "buffer builder"
},
{
"name": "create command buffer builder",
"returns": "command buffer builder"
},
{
"name": "create input state builder",
"returns": "input state builder"
},
{
"name": "create pipeline builder",
"returns": "pipeline builder"
},
{
"name": "create pipeline layout builder",
"returns": "pipeline layout builder"
},
{
"name": "create queue builder",
"returns": "queue builder"
},
{
"name": "create sampler builder",
"returns": "sampler builder"
},
{
"name": "create shader module builder",
"returns": "shader module builder"
},
{
"name": "create texture builder",
"returns": "texture builder"
},
{
"name": "copy bind groups",
"args": [
{"name": "start", "type": "uint32_t"},
{"name": "count", "type": "uint32_t"},
{"name": "source", "type": "bind group"},
{"name": "target", "type": "bind group"}
]
}
]
},
"filter mode": {
"category": "enum",
"values": [
{"value": 0, "name":"nearest"},
{"value": 1, "name":"linear"}
]
},
"index format": {
"category": "enum",
"values": [
{"value": 0, "name": "uint16"},
{"value": 1, "name": "uint32"}
]
},
"input state": {
"category": "object"
},
"input state builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "input state"
},
{
"name": "set attribute",
"args": [
{"name": "shader location", "type": "uint32_t"},
{"name": "binding slot", "type": "uint32_t"},
{"name": "format", "type": "vertex format"},
{"name": "offset", "type": "uint32_t"}
]
},
{
"name": "set input",
"args": [
{"name": "binding slot", "type": "uint32_t"},
{"name": "stride", "type": "uint32_t"},
{"name": "step mode", "type": "input step mode"}
]
}
]
},
"input step mode": {
"category": "enum",
"values": [
{"value": 0, "name": "vertex"},
{"value": 1, "name": "instance"}
]
},
"pipeline": {
"category": "object"
},
"pipeline builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "pipeline"
},
{
"name": "set layout",
"args": [
{"name": "layout", "type": "pipeline layout"}
]
},
{
"name": "set stage",
"args": [
{"name": "stage", "type": "shader stage"},
{"name": "module", "type": "shader module"},
{"name": "entry point", "type": "char", "annotation": "const*", "length": "strlen"}
]
},
{
"name": "set input state",
"args": [
{"name": "input", "type": "input state"}
]
}
]
},
"pipeline layout": {
"category": "object"
},
"pipeline layout builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "pipeline layout"
},
{
"name": "set bind group layout",
"args": [
{"name": "group index", "type": "uint32_t"},
{"name": "layout", "type": "bind group layout"}
]
}
]
},
"queue": {
"category": "object",
"methods": [
{
"name": "submit",
"args": [
{"name": "num commands", "type": "uint32_t"},
{"name": "commands", "type": "command buffer", "annotation": "const*", "length": "num commands"}
]
}
]
},
"queue builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "queue"
}
]
},
"sampler": {
"category": "object"
},
"sampler builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "sampler"
},
{
"name": "set filter mode",
"args": [
{"name": "mag filter", "type": "filter mode"},
{"name": "min filter", "type": "filter mode"},
{"name": "mipmap filter", "type": "filter mode"}
]
}
]
},
"shader module": {
"category": "object"
},
"shader module builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "shader module"
},
{
"name": "set source",
"args": [
{"name": "code size", "type": "uint32_t"},
{"name": "code", "type": "uint32_t", "annotation": "const*", "length": "code size"}
]
}
]
},
"shader stage": {
"category": "enum",
"values": [
{"value": 0, "name": "vertex"},
{"value": 1, "name": "fragment"},
{"value": 2, "name": "compute"}
]
},
"shader stage bit": {
"category": "bitmask",
"values": [
{"value": 1, "name": "vertex"},
{"value": 2, "name": "fragment"},
{"value": 4, "name": "compute"}
]
},
"texture": {
"category": "object",
"methods": [
{
"name": "create texture view builder",
"returns": "texture view builder"
},
{
"name": "transition usage",
"args": [
{"name": "usage", "type": "texture usage bit"}
]
},
{
"name": "freeze usage",
"args": [
{"name": "usage", "type": "texture usage bit"}
]
}
]
},
"texture builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "texture"
},
{
"name": "set dimension",
"args": [
{"name": "dimension", "type": "texture dimension"}
]
},
{
"name": "set extent",
"args": [
{"name": "width", "type": "uint32_t"},
{"name": "height", "type": "uint32_t"},
{"name": "depth", "type": "uint32_t"}
]
},
{
"name": "set format",
"args": [
{"name": "format", "type": "texture format"}
]
},
{
"name": "set mip levels",
"args": [
{"name": "num mip levels", "type": "uint32_t"}
]
},
{
"name": "set allowed usage",
"args": [
{"name": "usage", "type": "texture usage bit"}
]
},
{
"name": "set initial usage",
"args": [
{"name": "usage", "type": "texture usage bit"}
]
}
]
},
"texture dimension": {
"category": "enum",
"values": [
{"value": 0, "name": "2D"}
]
},
"texture usage bit": {
"category": "bitmask",
"values": [
{"value": 0, "name": "none"},
{"value": 1, "name": "transfer src"},
{"value": 2, "name": "transfer dst"},
{"value": 4, "name": "sampled"},
{"value": 8, "name": "storage"},
{"value": 16, "name": "color attachment"},
{"value": 32, "name": "depth stencil attachment"}
]
},
"texture view": {
"category": "object"
},
"texture view builder": {
"category": "object",
"methods": [
{
"name": "get result",
"returns": "texture view"
}
]
},
"texture format": {
"category": "enum",
"values": [
{"value": 0, "name": "r8 g8 b8 a8 unorm"}
]
},
"vertex format": {
"category": "enum",
"values": [
{"value": 0, "name": "float r32 g32 b32 a32"},
{"value": 1, "name": "float r32 g32 b32"},
{"value": 2, "name": "float r32 g32"}
]
},
"void": {
"category": "native"
},
"uint32_t": {
"category": "native"
}
}

133
src/backend/CMakeLists.txt Normal file
View File

@ -0,0 +1,133 @@
# Copyright 2017 The NXT Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set(COMMON_DIR ${CMAKE_CURRENT_SOURCE_DIR}/common)
set(METAL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/metal)
set(OPENGL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/opengl)
set(TESTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/tests)
list(APPEND BACKEND_SOURCES
${COMMON_DIR}/BindGroup.cpp
${COMMON_DIR}/BindGroup.h
${COMMON_DIR}/BindGroupLayout.cpp
${COMMON_DIR}/BindGroupLayout.h
${COMMON_DIR}/BitSetIterator.h
${COMMON_DIR}/Buffer.cpp
${COMMON_DIR}/Buffer.h
${COMMON_DIR}/CommandAllocator.cpp
${COMMON_DIR}/CommandAllocator.h
${COMMON_DIR}/CommandBuffer.cpp
${COMMON_DIR}/CommandBuffer.h
${COMMON_DIR}/Device.cpp
${COMMON_DIR}/Device.h
${COMMON_DIR}/Forward.h
${COMMON_DIR}/InputState.cpp
${COMMON_DIR}/InputState.h
${COMMON_DIR}/Math.cpp
${COMMON_DIR}/Math.h
${COMMON_DIR}/PerStage.cpp
${COMMON_DIR}/PerStage.h
${COMMON_DIR}/Pipeline.cpp
${COMMON_DIR}/Pipeline.h
${COMMON_DIR}/PipelineLayout.cpp
${COMMON_DIR}/PipelineLayout.h
${COMMON_DIR}/Queue.cpp
${COMMON_DIR}/Queue.h
${COMMON_DIR}/RefCounted.cpp
${COMMON_DIR}/RefCounted.h
${COMMON_DIR}/Sampler.cpp
${COMMON_DIR}/Sampler.h
${COMMON_DIR}/ShaderModule.cpp
${COMMON_DIR}/ShaderModule.h
${COMMON_DIR}/Texture.cpp
${COMMON_DIR}/Texture.h
${COMMON_DIR}/ToBackend.h
)
# OpenGL Backend
Generate(
LIB_NAME opengl_autogen
LIB_TYPE STATIC
PRINT_NAME "OpenGL backend autogenerated files"
COMMAND_LINE_ARGS
${GENERATOR_COMMON_ARGS}
-T opengl
)
target_link_libraries(opengl_autogen glfw glad nxtcpp)
target_include_directories(opengl_autogen PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories(opengl_autogen PUBLIC ${GENERATED_DIR})
SetCXX14(opengl_autogen)
SetPIC(opengl_autogen)
list(APPEND BACKEND_SOURCES
${OPENGL_DIR}/CommandBufferGL.cpp
${OPENGL_DIR}/CommandBufferGL.h
${OPENGL_DIR}/OpenGLBackend.cpp
${OPENGL_DIR}/OpenGLBackend.h
${OPENGL_DIR}/PipelineGL.cpp
${OPENGL_DIR}/PipelineGL.h
${OPENGL_DIR}/PipelineLayoutGL.cpp
${OPENGL_DIR}/PipelineLayoutGL.h
${OPENGL_DIR}/SamplerGL.cpp
${OPENGL_DIR}/SamplerGL.h
${OPENGL_DIR}/ShaderModuleGL.cpp
${OPENGL_DIR}/ShaderModuleGL.h
${OPENGL_DIR}/TextureGL.cpp
${OPENGL_DIR}/TextureGL.h
)
# Metal Backend
if (APPLE)
Generate(
LIB_NAME metal_autogen
LIB_TYPE STATIC
PRINT_NAME "Metal backend autogenerated files"
COMMAND_LINE_ARGS
${GENERATOR_COMMON_ARGS}
-T metal
)
target_link_libraries(metal_autogen glfw glad nxtcpp "-framework QuartzCore" "-framework Metal")
target_include_directories(metal_autogen PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories(metal_autogen PUBLIC ${GENERATED_DIR})
SetCXX14(metal_autogen)
SetPIC(metal_autogen)
list(APPEND BACKEND_SOURCES
${METAL_DIR}/MetalBackend.mm
${METAL_DIR}/MetalBackend.h
)
endif()
add_library(nxt_backend SHARED ${BACKEND_SOURCES})
target_link_libraries(nxt_backend opengl_autogen glfw glad spirv-cross)
if (APPLE)
target_link_libraries(nxt_backend metal_autogen)
endif()
target_include_directories(nxt_backend PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
SetCXX14(nxt_backend)
add_executable(backend_unittests
${TESTS_DIR}/BitSetIteratorTests.cpp
${TESTS_DIR}/CommandAllocatorTests.cpp
${TESTS_DIR}/MathTests.cpp
${TESTS_DIR}/PerStageTests.cpp
${TESTS_DIR}/RefCountedTests.cpp
${TESTS_DIR}/ToBackendTests.cpp
${TESTS_DIR}/UnittestsMain.cpp
)
target_link_libraries(backend_unittests nxt_backend gtest)
target_include_directories(backend_unittests PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
SetCXX14(backend_unittests)

View File

@ -0,0 +1,213 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "BindGroup.h"
#include "BindGroupLayout.h"
#include "Buffer.h"
#include "Device.h"
#include "Texture.h"
namespace backend {
// BindGroup
BindGroupBase::BindGroupBase(BindGroupBuilder* builder)
: layout(std::move(builder->layout)), usage(builder->usage), bindings(std::move(builder->bindings)) {
}
const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
return layout.Get();
}
nxt::BindGroupUsage BindGroupBase::GetUsage() const {
return usage;
}
BufferViewBase* BindGroupBase::GetBindingAsBufferView(size_t binding) {
ASSERT(binding < kMaxBindingsPerGroup);
ASSERT(layout->GetBindingInfo().mask[binding]);
ASSERT(layout->GetBindingInfo().types[binding] == nxt::BindingType::UniformBuffer ||
layout->GetBindingInfo().types[binding] == nxt::BindingType::StorageBuffer);
return reinterpret_cast<BufferViewBase*>(bindings[binding].Get());
}
SamplerBase* BindGroupBase::GetBindingAsSampler(size_t binding) {
ASSERT(binding < kMaxBindingsPerGroup);
ASSERT(layout->GetBindingInfo().mask[binding]);
ASSERT(layout->GetBindingInfo().types[binding] == nxt::BindingType::Sampler);
return reinterpret_cast<SamplerBase*>(bindings[binding].Get());
}
TextureViewBase* BindGroupBase::GetBindingAsTextureView(size_t binding) {
ASSERT(binding < kMaxBindingsPerGroup);
ASSERT(layout->GetBindingInfo().mask[binding]);
ASSERT(layout->GetBindingInfo().types[binding] == nxt::BindingType::SampledTexture);
return reinterpret_cast<TextureViewBase*>(bindings[binding].Get());
}
// BindGroupBuilder
enum BindGroupSetProperties {
BINDGROUP_PROPERTY_USAGE = 0x1,
BINDGROUP_PROPERTY_LAYOUT = 0x2,
};
BindGroupBuilder::BindGroupBuilder(DeviceBase* device)
: device(device) {
}
bool BindGroupBuilder::WasConsumed() const {
return consumed;
}
BindGroupBase* BindGroupBuilder::GetResult() {
constexpr int allProperties = BINDGROUP_PROPERTY_USAGE | BINDGROUP_PROPERTY_LAYOUT;
if ((propertiesSet & allProperties) != allProperties) {
device->HandleError("Bindgroup missing properties");
return nullptr;
}
if (setMask != layout->GetBindingInfo().mask) {
device->HandleError("Bindgroup missing bindings");
return nullptr;
}
consumed = true;
return device->CreateBindGroup(this);
}
void BindGroupBuilder::SetLayout(BindGroupLayoutBase* layout) {
if ((propertiesSet & BINDGROUP_PROPERTY_LAYOUT) != 0) {
device->HandleError("Bindgroup layout property set multiple times");
return;
}
this->layout = layout;
propertiesSet |= BINDGROUP_PROPERTY_LAYOUT;
}
void BindGroupBuilder::SetUsage(nxt::BindGroupUsage usage) {
if ((propertiesSet & BINDGROUP_PROPERTY_USAGE) != 0) {
device->HandleError("Bindgroup usage property set multiple times");
return;
}
this->usage = usage;
propertiesSet |= BINDGROUP_PROPERTY_USAGE;
}
void BindGroupBuilder::SetBufferViews(uint32_t start, uint32_t count, BufferViewBase* const * bufferViews) {
if (!SetBindingsValidationBase(start, count)) {
return;
}
const auto& layoutInfo = layout->GetBindingInfo();
for (size_t i = start, j = 0; i < start + count; ++i, ++j) {
nxt::BufferUsageBit requiredBit;
switch (layoutInfo.types[i]) {
case nxt::BindingType::UniformBuffer:
requiredBit = nxt::BufferUsageBit::Uniform;
break;
case nxt::BindingType::StorageBuffer:
requiredBit = nxt::BufferUsageBit::Storage;
break;
case nxt::BindingType::Sampler:
case nxt::BindingType::SampledTexture:
device->HandleError("Setting buffer for a wrong binding type");
return;
}
if (!(bufferViews[j]->GetBuffer()->GetAllowedUsage() & requiredBit)) {
device->HandleError("Buffer needs to allow the correct usage bit");
return;
}
}
SetBindingsBase(start, count, reinterpret_cast<RefCounted* const *>(bufferViews));
}
void BindGroupBuilder::SetSamplers(uint32_t start, uint32_t count, SamplerBase* const * samplers) {
if (!SetBindingsValidationBase(start, count)) {
return;
}
const auto& layoutInfo = layout->GetBindingInfo();
for (size_t i = start, j = 0; i < start + count; ++i, ++j) {
if (layoutInfo.types[i] != nxt::BindingType::Sampler) {
device->HandleError("Setting binding for a wrong layout binding type");
return;
}
}
SetBindingsBase(start, count, reinterpret_cast<RefCounted* const *>(samplers));
}
void BindGroupBuilder::SetTextureViews(uint32_t start, uint32_t count, TextureViewBase* const * textureViews) {
if (!SetBindingsValidationBase(start, count)) {
return;
}
const auto& layoutInfo = layout->GetBindingInfo();
for (size_t i = start, j = 0; i < start + count; ++i, ++j) {
if (layoutInfo.types[i] != nxt::BindingType::SampledTexture) {
device->HandleError("Setting binding for a wrong layout binding type");
return;
}
if (!(textureViews[j]->GetTexture()->GetAllowedUsage() & nxt::TextureUsageBit::Sampled)) {
device->HandleError("Texture needs to allow the sampled usage bit");
return;
}
}
SetBindingsBase(start, count, reinterpret_cast<RefCounted* const *>(textureViews));
}
void BindGroupBuilder::SetBindingsBase(uint32_t start, uint32_t count, RefCounted* const * objects) {
for (size_t i = start, j = 0; i < start + count; ++i, ++j) {
setMask.set(i);
bindings[i] = objects[j];
}
}
bool BindGroupBuilder::SetBindingsValidationBase(uint32_t start, uint32_t count) {
if (start + count > kMaxBindingsPerGroup) {
device->HandleError("Setting bindings type over maximum number of bindings");
return false;
}
if ((propertiesSet & BINDGROUP_PROPERTY_LAYOUT) == 0) {
device->HandleError("Bindgroup layout must be set before views");
return false;
}
const auto& layoutInfo = layout->GetBindingInfo();
for (size_t i = start, j = 0; i < start + count; ++i, ++j) {
if (setMask[i]) {
device->HandleError("Setting already set binding");
return false;
}
if (!layoutInfo.mask[i]) {
device->HandleError("Setting binding that isn't present in the layout");
return false;
}
}
return true;
}
}

View File

@ -0,0 +1,95 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_BINDGROUP_H_
#define BACKEND_COMMON_BINDGROUP_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
#include <array>
#include <bitset>
#include <type_traits>
namespace backend {
class BindGroupBase : public RefCounted {
public:
BindGroupBase(BindGroupBuilder* builder);
const BindGroupLayoutBase* GetLayout() const;
nxt::BindGroupUsage GetUsage() const;
BufferViewBase* GetBindingAsBufferView(size_t binding);
SamplerBase* GetBindingAsSampler(size_t binding);
TextureViewBase* GetBindingAsTextureView(size_t binding);
private:
Ref<BindGroupLayoutBase> layout;
nxt::BindGroupUsage usage;
std::array<Ref<RefCounted>, kMaxBindingsPerGroup> bindings;
};
class BindGroupBuilder : public RefCounted {
public:
BindGroupBuilder(DeviceBase* device);
bool WasConsumed() const;
// NXT API
BindGroupBase* GetResult();
void SetLayout(BindGroupLayoutBase* layout);
void SetUsage(nxt::BindGroupUsage usage);
template<typename T>
void SetBufferViews(uint32_t start, uint32_t count, T* const* bufferViews) {
static_assert(std::is_base_of<BufferViewBase, T>::value, "");
SetBufferViews(start, count, reinterpret_cast<BufferViewBase* const*>(bufferViews));
}
void SetBufferViews(uint32_t start, uint32_t count, BufferViewBase* const * bufferViews);
template<typename T>
void SetSamplers(uint32_t start, uint32_t count, T* const* samplers) {
static_assert(std::is_base_of<SamplerBase, T>::value, "");
SetSamplers(start, count, reinterpret_cast<SamplerBase* const*>(samplers));
}
void SetSamplers(uint32_t start, uint32_t count, SamplerBase* const * samplers);
template<typename T>
void SetTextureViews(uint32_t start, uint32_t count, T* const* textureViews) {
static_assert(std::is_base_of<TextureViewBase, T>::value, "");
SetTextureViews(start, count, reinterpret_cast<TextureViewBase* const*>(textureViews));
}
void SetTextureViews(uint32_t start, uint32_t count, TextureViewBase* const * textureViews);
private:
friend class BindGroupBase;
void SetBindingsBase(uint32_t start, uint32_t count, RefCounted* const * objects);
bool SetBindingsValidationBase(uint32_t start, uint32_t count);
DeviceBase* device;
std::bitset<kMaxBindingsPerGroup> setMask;
int propertiesSet = 0;
bool consumed = false;
Ref<BindGroupLayoutBase> layout;
nxt::BindGroupUsage usage;
std::array<Ref<RefCounted>, kMaxBindingsPerGroup> bindings;
};
}
#endif // BACKEND_COMMON_BINDGROUP_H_

View File

@ -0,0 +1,144 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "BindGroupLayout.h"
#include "Device.h"
#include <functional>
namespace backend {
namespace {
// Workaround for Chrome's stdlib having a broken std::hash for enums and bitsets
template<typename T>
typename std::enable_if<std::is_enum<T>::value, size_t>::type Hash(T value) {
using Integral = typename nxt::UnderlyingType<T>::type;
return std::hash<Integral>()(static_cast<Integral>(value));
}
template<size_t N>
size_t Hash(const std::bitset<N>& value) {
static_assert(N <= sizeof(unsigned long long) * 8, "");
return std::hash<unsigned long long>()(value.to_ullong());
}
// TODO(cwallez@chromium.org): see if we can use boost's hash combined or some equivalent
// this currently assumes that size_t is 64 bits
void CombineHashes(size_t* h1, size_t h2) {
*h1 ^= (h2 << 7) + (h2 >> (64 - 7)) + 0x304975;
}
size_t HashBindingInfo(const BindGroupLayoutBase::LayoutBindingInfo& info) {
size_t hash = Hash(info.mask);
for (size_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
if (info.mask[binding]) {
CombineHashes(&hash, Hash(info.visibilities[binding]));
CombineHashes(&hash, Hash(info.types[binding]));
}
}
return hash;
}
bool operator== (const BindGroupLayoutBase::LayoutBindingInfo& a, const BindGroupLayoutBase::LayoutBindingInfo& b) {
if (a.mask != b.mask) {
return false;
}
for (size_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
if (a.mask[binding]) {
if (a.visibilities[binding] != b.visibilities[binding]) {
return false;
}
if (a.types[binding] != b.types[binding]) {
return false;
}
}
}
return true;
}
}
// BindGroupLayoutBase
BindGroupLayoutBase::BindGroupLayoutBase(BindGroupLayoutBuilder* builder, bool blueprint)
: device(builder->device), bindingInfo(builder->bindingInfo), blueprint(blueprint) {
}
BindGroupLayoutBase::~BindGroupLayoutBase() {
// Do not register the actual cached object if we are a blueprint
if (!blueprint) {
device->UncacheBindGroupLayout(this);
}
}
const BindGroupLayoutBase::LayoutBindingInfo& BindGroupLayoutBase::GetBindingInfo() const {
return bindingInfo;
}
// BindGroupLayoutBuilder
BindGroupLayoutBuilder::BindGroupLayoutBuilder(DeviceBase* device) : device(device) {
}
bool BindGroupLayoutBuilder::WasConsumed() const {
return consumed;
}
const BindGroupLayoutBase::LayoutBindingInfo& BindGroupLayoutBuilder::GetBindingInfo() const {
return bindingInfo;
}
BindGroupLayoutBase* BindGroupLayoutBuilder::GetResult() {
consumed = true;
BindGroupLayoutBase blueprint(this, true);
auto* result = device->GetOrCreateBindGroupLayout(&blueprint, this);
result->Reference();
return result;
}
void BindGroupLayoutBuilder::SetBindingsType(nxt::ShaderStageBit visibility, nxt::BindingType bindingType, uint32_t start, uint32_t count) {
if (start + count > kMaxBindingsPerGroup) {
device->HandleError("Setting bindings type over maximum number of bindings");
return;
}
for (size_t i = start; i < start + count; i++) {
if (bindingInfo.mask[i]) {
device->HandleError("Setting already set binding type");
return;
}
bindingInfo.mask.set(i);
bindingInfo.visibilities[i] = visibility;
bindingInfo.types[i] = bindingType;
}
}
// BindGroupLayoutCacheFuncs
size_t BindGroupLayoutCacheFuncs::operator() (const BindGroupLayoutBase* bgl) const {
return HashBindingInfo(bgl->GetBindingInfo());
}
bool BindGroupLayoutCacheFuncs::operator() (const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const {
return a->GetBindingInfo() == b->GetBindingInfo();
}
}

View File

@ -0,0 +1,76 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_BINDGROUPLAYOUT_H_
#define BACKEND_COMMON_BINDGROUPLAYOUT_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
#include <array>
#include <bitset>
namespace backend {
class BindGroupLayoutBase : public RefCounted {
public:
BindGroupLayoutBase(BindGroupLayoutBuilder* builder, bool blueprint = false);
~BindGroupLayoutBase() override;
struct LayoutBindingInfo {
std::array<nxt::ShaderStageBit, kMaxBindingsPerGroup> visibilities;
std::array<nxt::BindingType, kMaxBindingsPerGroup> types;
std::bitset<kMaxBindingsPerGroup> mask;
};
const LayoutBindingInfo& GetBindingInfo() const;
private:
DeviceBase* device;
LayoutBindingInfo bindingInfo;
bool blueprint = false;
};
class BindGroupLayoutBuilder : public RefCounted {
public:
BindGroupLayoutBuilder(DeviceBase* device);
bool WasConsumed() const;
const BindGroupLayoutBase::LayoutBindingInfo& GetBindingInfo() const;
// NXT API
BindGroupLayoutBase* GetResult();
void SetBindingsType(nxt::ShaderStageBit visibility, nxt::BindingType bindingType, uint32_t start, uint32_t count);
private:
friend class BindGroupLayoutBase;
DeviceBase* device;
BindGroupLayoutBase::LayoutBindingInfo bindingInfo;
bool consumed = false;
};
// Implements the functors necessary for the unordered_set<BGL*>-based cache.
struct BindGroupLayoutCacheFuncs {
// The hash function
size_t operator() (const BindGroupLayoutBase* bgl) const;
// The equality predicate
bool operator() (const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
};
}
#endif // BACKEND_COMMON_BINDGROUPLAYOUT_H_

View File

@ -0,0 +1,135 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_BITSETITERATOR_H_
#define BACKEND_COMMON_BITSETITERATOR_H_
#include "Forward.h"
#include "Math.h"
#include <bitset>
#include <limits>
// This is ANGLE's BitSetIterator class with a customizable return type
// TODO(cwallez@chromium.org): it could be optimized, in particular when N <= 64
namespace backend {
template <typename T>
T roundUp(const T value, const T alignment) {
auto temp = value + alignment - static_cast<T>(1);
return temp - temp % alignment;
}
template <size_t N, typename T>
class BitSetIterator final {
public:
BitSetIterator(const std::bitset<N>& bitset);
BitSetIterator(const BitSetIterator& other);
BitSetIterator &operator=(const BitSetIterator& other);
class Iterator final {
public:
Iterator(const std::bitset<N>& bits);
Iterator& operator++();
bool operator==(const Iterator& other) const;
bool operator!=(const Iterator& other) const;
T operator*() const { return static_cast<T>(mCurrentBit); }
private:
unsigned long getNextBit();
static const size_t BitsPerWord = sizeof(unsigned long) * 8;
std::bitset<N> mBits;
unsigned long mCurrentBit;
unsigned long mOffset;
};
Iterator begin() const { return Iterator(mBits); }
Iterator end() const { return Iterator(std::bitset<N>(0)); }
private:
const std::bitset<N> mBits;
};
template <size_t N, typename T>
BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset)
: mBits(bitset) {
}
template <size_t N, typename T>
BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other)
: mBits(other.mBits) {
}
template <size_t N, typename T>
BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) {
mBits = other.mBits;
return *this;
}
template <size_t N, typename T>
BitSetIterator<N, T>::Iterator::Iterator(const std::bitset<N>& bits)
: mBits(bits), mCurrentBit(0), mOffset(0) {
if (bits.any()) {
mCurrentBit = getNextBit();
} else {
mOffset = static_cast<unsigned long>(roundUp(N, BitsPerWord));
}
}
template <size_t N, typename T>
typename BitSetIterator<N, T>::Iterator& BitSetIterator<N, T>::Iterator::operator++() {
ASSERT(mBits.any());
mBits.set(mCurrentBit - mOffset, 0);
mCurrentBit = getNextBit();
return *this;
}
template <size_t N, typename T>
bool BitSetIterator<N, T>::Iterator::operator==(const Iterator& other) const {
return mOffset == other.mOffset && mBits == other.mBits;
}
template <size_t N, typename T>
bool BitSetIterator<N, T>::Iterator::operator!=(const Iterator& other) const {
return !(*this == other);
}
template <size_t N, typename T>
unsigned long BitSetIterator<N, T>::Iterator::getNextBit() {
static std::bitset<N> wordMask(std::numeric_limits<unsigned long>::max());
while (mOffset < N) {
unsigned long wordBits = (mBits & wordMask).to_ulong();
if (wordBits != 0ul) {
return ScanForward(wordBits) + mOffset;
}
mBits >>= BitsPerWord;
mOffset += BitsPerWord;
}
return 0;
}
// Helper to avoid needing to specify the template parameter size
template <size_t N>
BitSetIterator<N, uint32_t> IterateBitSet(const std::bitset<N>& bitset) {
return BitSetIterator<N, uint32_t>(bitset);
}
}
#endif // BACKEND_COMMON_BITSETITERATOR_H_

View File

@ -0,0 +1,233 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Buffer.h"
#include "Device.h"
#include <utility>
#include <cstdio>
namespace backend {
// Buffer
BufferBase::BufferBase(BufferBuilder* builder)
: device(builder->device),
size(builder->size),
allowedUsage(builder->allowedUsage),
currentUsage(builder->currentUsage) {
}
BufferViewBuilder* BufferBase::CreateBufferViewBuilder() {
return new BufferViewBuilder(device, this);
}
uint32_t BufferBase::GetSize() const {
return size;
}
nxt::BufferUsageBit BufferBase::GetAllowedUsage() const {
return allowedUsage;
}
nxt::BufferUsageBit BufferBase::GetUsage() const {
return currentUsage;
}
void BufferBase::SetSubData(uint32_t start, uint32_t count, const uint32_t* data) {
if ((start + count) * sizeof(uint32_t) > GetSize()) {
device->HandleError("Buffer subdata out of range");
return;
}
if (!(currentUsage & nxt::BufferUsageBit::Mapped)) {
device->HandleError("Buffer needs the mapped usage bit");
return;
}
SetSubDataImpl(start, count, data);
}
bool BufferBase::IsFrozen() const {
return frozen;
}
bool BufferBase::HasFrozenUsage(nxt::BufferUsageBit usage) const {
return frozen && (usage & allowedUsage);
}
bool BufferBase::IsUsagePossible(nxt::BufferUsageBit allowedUsage, nxt::BufferUsageBit usage) {
const nxt::BufferUsageBit allReadBits =
nxt::BufferUsageBit::TransferSrc |
nxt::BufferUsageBit::Index |
nxt::BufferUsageBit::Vertex |
nxt::BufferUsageBit::Uniform;
bool allowed = (usage & allowedUsage) == usage;
bool readOnly = (usage & allReadBits) == usage;
bool singleUse = nxt::HasZeroOrOneBits(usage);
return allowed && (readOnly || singleUse);
}
bool BufferBase::IsTransitionPossible(nxt::BufferUsageBit usage) const {
if (frozen) {
return false;
}
return IsUsagePossible(allowedUsage, usage);
}
void BufferBase::TransitionUsageImpl(nxt::BufferUsageBit usage) {
assert(IsTransitionPossible(usage));
currentUsage = usage;
}
void BufferBase::TransitionUsage(nxt::BufferUsageBit usage) {
if (!IsTransitionPossible(usage)) {
device->HandleError("Buffer frozen or usage not allowed");
return;
}
TransitionUsageImpl(usage);
}
void BufferBase::FreezeUsage(nxt::BufferUsageBit usage) {
if (!IsTransitionPossible(usage)) {
device->HandleError("Buffer frozen or usage not allowed");
return;
}
allowedUsage = usage;
currentUsage = usage;
frozen = true;
}
// BufferBuilder
enum BufferSetProperties {
BUFFER_PROPERTY_ALLOWED_USAGE = 0x1,
BUFFER_PROPERTY_INITIAL_USAGE = 0x2,
BUFFER_PROPERTY_SIZE = 0x4,
};
BufferBuilder::BufferBuilder(DeviceBase* device) : device(device) {
}
bool BufferBuilder::WasConsumed() const {
return consumed;
}
BufferBase* BufferBuilder::GetResult() {
constexpr int allProperties = BUFFER_PROPERTY_ALLOWED_USAGE | BUFFER_PROPERTY_SIZE;
if ((propertiesSet & allProperties) != allProperties) {
device->HandleError("Buffer missing properties");
return nullptr;
}
if (!BufferBase::IsUsagePossible(allowedUsage, currentUsage)) {
device->HandleError("Initial buffer usage is not allowed");
return nullptr;
}
consumed = true;
return device->CreateBuffer(this);
}
void BufferBuilder::SetAllowedUsage(nxt::BufferUsageBit usage) {
if ((propertiesSet & BUFFER_PROPERTY_ALLOWED_USAGE) != 0) {
device->HandleError("Buffer allowedUsage property set multiple times");
return;
}
this->allowedUsage = usage;
propertiesSet |= BUFFER_PROPERTY_ALLOWED_USAGE;
}
void BufferBuilder::SetInitialUsage(nxt::BufferUsageBit usage) {
if ((propertiesSet & BUFFER_PROPERTY_INITIAL_USAGE) != 0) {
device->HandleError("Buffer initialUsage property set multiple times");
return;
}
this->currentUsage = usage;
propertiesSet |= BUFFER_PROPERTY_INITIAL_USAGE;
}
void BufferBuilder::SetSize(uint32_t size) {
if ((propertiesSet & BUFFER_PROPERTY_SIZE) != 0) {
device->HandleError("Buffer size property set multiple times");
return;
}
this->size = size;
propertiesSet |= BUFFER_PROPERTY_SIZE;
}
// BufferViewBase
BufferViewBase::BufferViewBase(BufferViewBuilder* builder)
: buffer(std::move(builder->buffer)), size(builder->size), offset(builder->offset) {
}
BufferBase* BufferViewBase::GetBuffer() {
return buffer.Get();
}
uint32_t BufferViewBase::GetSize() const {
return size;
}
uint32_t BufferViewBase::GetOffset() const {
return offset;
}
// BufferViewBuilder
enum BufferViewSetProperties {
BUFFER_VIEW_PROPERTY_EXTENT = 0x1,
};
BufferViewBuilder::BufferViewBuilder(DeviceBase* device, BufferBase* buffer)
: device(device), buffer(buffer) {
}
bool BufferViewBuilder::WasConsumed() const {
return consumed;
}
BufferViewBase* BufferViewBuilder::GetResult() {
constexpr int allProperties = BUFFER_VIEW_PROPERTY_EXTENT;
if ((propertiesSet & allProperties) != allProperties) {
device->HandleError("Buffer view missing properties");
return nullptr;
}
return device->CreateBufferView(this);
}
void BufferViewBuilder::SetExtent(uint32_t offset, uint32_t size) {
if ((propertiesSet & BUFFER_VIEW_PROPERTY_EXTENT) != 0) {
device->HandleError("Buffer view extent property set multiple times");
return;
}
uint64_t viewEnd = static_cast<uint64_t>(offset) + static_cast<uint64_t>(size);
if (viewEnd > static_cast<uint64_t>(buffer->GetSize())) {
device->HandleError("Buffer view end is OOB");
return;
}
this->offset = offset;
this->size = size;
propertiesSet |= BUFFER_VIEW_PROPERTY_EXTENT;
}
}

114
src/backend/common/Buffer.h Normal file
View File

@ -0,0 +1,114 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_BUFFER_H_
#define BACKEND_COMMON_BUFFER_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
namespace backend {
class BufferBase : public RefCounted {
public:
BufferBase(BufferBuilder* builder);
uint32_t GetSize() const;
nxt::BufferUsageBit GetAllowedUsage() const;
nxt::BufferUsageBit GetUsage() const;
static bool IsUsagePossible(nxt::BufferUsageBit allowedUsage, nxt::BufferUsageBit usage);
bool IsTransitionPossible(nxt::BufferUsageBit usage) const;
bool IsFrozen() const;
bool HasFrozenUsage(nxt::BufferUsageBit usage) const;
void TransitionUsageImpl(nxt::BufferUsageBit usage);
// NXT API
BufferViewBuilder* CreateBufferViewBuilder();
void SetSubData(uint32_t start, uint32_t count, const uint32_t* data);
void TransitionUsage(nxt::BufferUsageBit usage);
void FreezeUsage(nxt::BufferUsageBit usage);
private:
virtual void SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) = 0;
DeviceBase* device;
uint32_t size;
nxt::BufferUsageBit allowedUsage = nxt::BufferUsageBit::None;
nxt::BufferUsageBit currentUsage = nxt::BufferUsageBit::None;
bool frozen = false;
};
class BufferBuilder : public RefCounted {
public:
BufferBuilder(DeviceBase* device);
bool WasConsumed() const;
// NXT API
BufferBase* GetResult();
void SetAllowedUsage(nxt::BufferUsageBit usage);
void SetInitialUsage(nxt::BufferUsageBit usage);
void SetSize(uint32_t size);
private:
friend class BufferBase;
DeviceBase* device;
uint32_t size;
nxt::BufferUsageBit allowedUsage = nxt::BufferUsageBit::None;
nxt::BufferUsageBit currentUsage = nxt::BufferUsageBit::None;
int propertiesSet = 0;
bool consumed = false;
};
class BufferViewBase : public RefCounted {
public:
BufferViewBase(BufferViewBuilder* builder);
BufferBase* GetBuffer();
uint32_t GetSize() const;
uint32_t GetOffset() const;
private:
Ref<BufferBase> buffer;
uint32_t size;
uint32_t offset;
};
class BufferViewBuilder : public RefCounted {
public:
BufferViewBuilder(DeviceBase* device, BufferBase* buffer);
bool WasConsumed() const;
// NXT API
BufferViewBase* GetResult();
void SetExtent(uint32_t offset, uint32_t size);
private:
friend class BufferViewBase;
DeviceBase* device;
Ref<BufferBase> buffer;
uint32_t offset = 0;
uint32_t size = 0;
int propertiesSet = 0;
bool consumed = false;
};
}
#endif // BACKEND_COMMON_BUFFER_H_

View File

@ -0,0 +1,219 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "CommandAllocator.h"
#include "Math.h"
#include <cassert>
#include <climits>
#include <cstdlib>
#define ASSERT assert
namespace backend {
constexpr uint32_t EndOfBlock = UINT_MAX;//std::numeric_limits<uint32_t>::max();
constexpr uint32_t AdditionalData = UINT_MAX - 1;//std::numeric_limits<uint32_t>::max();
// TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
CommandIterator::CommandIterator()
: endOfBlock(EndOfBlock) {
Reset();
}
CommandIterator::~CommandIterator() {
ASSERT(dataWasDestroyed);
if (!IsEmpty()) {
for (auto& block : blocks) {
free(block.block);
}
}
}
CommandIterator::CommandIterator(CommandIterator&& other)
: endOfBlock(EndOfBlock) {
if (!other.IsEmpty()) {
blocks = std::move(other.blocks);
other.Reset();
}
other.DataWasDestroyed();
Reset();
}
CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
if (!other.IsEmpty()) {
blocks = std::move(other.blocks);
other.Reset();
} else {
blocks.clear();
}
other.DataWasDestroyed();
Reset();
return *this;
}
CommandIterator::CommandIterator(CommandAllocator&& allocator)
: blocks(allocator.AcquireBlocks()), endOfBlock(EndOfBlock) {
Reset();
}
CommandIterator& CommandIterator::operator=(CommandAllocator&& allocator) {
blocks = allocator.AcquireBlocks();
Reset();
return *this;
}
void CommandIterator::Reset() {
currentBlock = 0;
if (blocks.empty()) {
// This will case the first NextCommandId call to try to move to the next
// block and stop the iteration immediately, without special casing the
// initialization.
currentPtr = reinterpret_cast<uint8_t*>(&endOfBlock);
blocks.emplace_back();
blocks[0].size = sizeof(endOfBlock);
blocks[0].block = currentPtr;
} else {
currentPtr = Align(blocks[0].block, alignof(uint32_t));
}
}
void CommandIterator::DataWasDestroyed() {
dataWasDestroyed = true;
}
bool CommandIterator::IsEmpty() const {
return blocks[0].block == reinterpret_cast<const uint8_t*>(&endOfBlock);
}
bool CommandIterator::NextCommandId(uint32_t* commandId) {
uint8_t* idPtr = Align(currentPtr, alignof(uint32_t));
ASSERT(idPtr + sizeof(uint32_t) <= blocks[currentBlock].block + blocks[currentBlock].size);
uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
if (id == EndOfBlock) {
currentBlock++;
if (currentBlock >= blocks.size()) {
Reset();
return false;
}
currentPtr = Align(blocks[currentBlock].block, alignof(uint32_t));
return NextCommandId(commandId);
}
currentPtr = idPtr + sizeof(uint32_t);
*commandId = id;
return true;
}
void* CommandIterator::NextCommand(size_t commandSize, size_t commandAlignment) {
uint8_t* commandPtr = Align(currentPtr, commandAlignment);
ASSERT(commandPtr + sizeof(commandSize) <= blocks[currentBlock].block + blocks[currentBlock].size);
currentPtr = commandPtr + commandSize;
return commandPtr;
}
void* CommandIterator::NextData(size_t dataSize, size_t dataAlignment) {
uint32_t id;
bool hasId = NextCommandId(&id);
ASSERT(hasId);
ASSERT(id == AdditionalData);
return NextCommand(dataSize, dataAlignment);
}
// Potential TODO(cwallez@chromium.org):
// - Host the size and pointer to next block in the block itself to avoid having an allocation in the vector
// - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant in Allocate
// - Be able to optimize allocation to one block, for command buffers expected to live long to avoid cache misses
// - Better block allocation, maybe have NXT API to say command buffer is going to have size close to another
CommandAllocator::CommandAllocator()
: currentPtr(reinterpret_cast<uint8_t*>(&dummyEnum[0])), endPtr(reinterpret_cast<uint8_t*>(&dummyEnum[1])) {
}
CommandAllocator::~CommandAllocator() {
ASSERT(blocks.empty());
}
CommandBlocks&& CommandAllocator::AcquireBlocks() {
ASSERT(currentPtr != nullptr && endPtr != nullptr);
ASSERT(IsAligned(currentPtr, alignof(uint32_t)));
ASSERT(currentPtr + sizeof(uint32_t) <= endPtr);
*reinterpret_cast<uint32_t*>(currentPtr) = EndOfBlock;
currentPtr = nullptr;
endPtr = nullptr;
return std::move(blocks);
}
uint8_t* CommandAllocator::Allocate(uint32_t commandId, size_t commandSize, size_t commandAlignment) {
ASSERT(currentPtr != nullptr);
ASSERT(endPtr != nullptr);
ASSERT(commandId != EndOfBlock);
// It should always be possible to allocate one id, for EndOfBlock tagging,
ASSERT(IsAligned(currentPtr, alignof(uint32_t)));
ASSERT(currentPtr + sizeof(uint32_t) <= endPtr);
uint32_t* idAlloc = reinterpret_cast<uint32_t*>(currentPtr);
uint8_t* commandAlloc = Align(currentPtr + sizeof(uint32_t), commandAlignment);
uint8_t* nextPtr = Align(commandAlloc + commandSize, alignof(uint32_t));
// When there is not enough space, we signal the EndOfBlock, so that the iterator nows to
// move to the next one. EndOfBlock on the last block means the end of the commands.
if (nextPtr + sizeof(uint32_t) > endPtr) {
// Even if we are not able to get another block, the list of commands will be well-formed
// and iterable as this block will be that last one.
*idAlloc = EndOfBlock;
// Make sure we have space for current allocation, plus end of block and alignment padding
// for the first id.
if (!GetNewBlock(nextPtr - currentPtr + sizeof(uint32_t) + alignof(uint32_t))) {
return nullptr;
}
return Allocate(commandId, commandSize, commandAlignment);
}
*idAlloc = commandId;
currentPtr = nextPtr;
return commandAlloc;
}
uint8_t* CommandAllocator::AllocateData(size_t commandSize, size_t commandAlignment) {
return Allocate(AdditionalData, commandSize, commandAlignment);
}
bool CommandAllocator::GetNewBlock(size_t minimumSize) {
// Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
lastAllocationSize = std::max(minimumSize, std::min(lastAllocationSize * 2, size_t(16384)));
uint8_t* block = reinterpret_cast<uint8_t*>(malloc(lastAllocationSize));
if (block == nullptr) {
return false;
}
blocks.push_back({lastAllocationSize, block});
currentPtr = Align(block, alignof(uint32_t));
endPtr = block + lastAllocationSize;
return true;
}
}

View File

@ -0,0 +1,150 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_COMMAND_ALLOCATOR_H_
#define BACKEND_COMMON_COMMAND_ALLOCATOR_H_
#include <cstdint>
#include <cstddef>
#include <vector>
namespace backend {
// Allocation for command buffers should be fast. To avoid doing an allocation per command
// or to avoid copying commands when reallocing, we use a linear allocator in a growing set
// of large memory blocks. We also use this to have the format to be (u32 commandId, command),
// so that iteration over the commands is easy.
// Usage of the allocator and iterator:
// CommandAllocator allocator;
// DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
// // Fill command
// // Repeat allocation and filling commands
//
// CommandIterator commands(allocator);
// CommandType type;
// void* command;
// while(commands.NextCommandId(&e)) {
// switch(e) {
// case CommandType::Draw:
// DrawCommand* draw = commands.NextCommand<DrawCommand>();
// // Do the draw
// break;
// // other cases
// }
// }
// Note that you need to extract the commands from the CommandAllocator before destroying it
// and must tell the CommandIterator when the allocated commands have been processed for
// deletion.
// These are the lists of blocks, should not be used directly, only through CommandAllocator
// and CommandIterator
struct BlockDef {
size_t size;
uint8_t* block;
};
using CommandBlocks = std::vector<BlockDef>;
class CommandAllocator;
// TODO(cwallez@chromium.org): prevent copy for both iterator and allocator
class CommandIterator {
public:
CommandIterator();
~CommandIterator();
CommandIterator(CommandIterator&& other);
CommandIterator& operator=(CommandIterator&& other);
CommandIterator(CommandAllocator&& allocator);
CommandIterator& operator=(CommandAllocator&& allocator);
template<typename E>
bool NextCommandId(E* commandId) {
return NextCommandId(reinterpret_cast<uint32_t*>(commandId));
}
template<typename T>
T* NextCommand() {
return reinterpret_cast<T*>(NextCommand(sizeof(T), alignof(T)));
}
template<typename T>
T* NextData(size_t count) {
return reinterpret_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
}
// Needs to be called if iteration was stopped early.
void Reset();
void DataWasDestroyed();
private:
bool IsEmpty() const;
bool NextCommandId(uint32_t* commandId);
void* NextCommand(size_t commandSize, size_t commandAlignment);
void* NextData(size_t dataSize, size_t dataAlignment);
CommandBlocks blocks;
uint8_t* currentPtr = nullptr;
size_t currentBlock = 0;
// Used to avoid a special case for empty iterators.
uint32_t endOfBlock;
bool dataWasDestroyed = false;
};
class CommandAllocator {
public:
CommandAllocator();
~CommandAllocator();
template<typename T, typename E>
T* Allocate(E commandId) {
static_assert(sizeof(E) == sizeof(uint32_t), "");
static_assert(alignof(E) == alignof(uint32_t), "");
return reinterpret_cast<T*>(Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
}
template<typename T>
T* AllocateData(size_t count) {
return reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
}
private:
friend CommandIterator;
CommandBlocks&& AcquireBlocks();
uint8_t* Allocate(uint32_t commandId, size_t commandSize, size_t commandAlignment);
uint8_t* AllocateData(size_t dataSize, size_t dataAlignment);
bool GetNewBlock(size_t minimumSize);
CommandBlocks blocks;
size_t lastAllocationSize = 2048;
// Pointers to the current range of allocation in the block. Guaranteed to allow
// for at least one uint32_t is not nullptr, so that the special EndOfBlock command id
// can always be written.
// Nullptr iff the blocks were moved out.
uint8_t* currentPtr = nullptr;
uint8_t* endPtr = nullptr;
// Data used for the block range at initialization so that the first call to Allocate
// sees there is not enough space and calls GetNewBlock. This avoids having to special
// case the initialization in Allocate.
uint32_t dummyEnum[1] = {0};
};
}
#endif // BACKEND_COMMON_COMMAND_ALLOCATOR_H_

View File

@ -0,0 +1,623 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "CommandBuffer.h"
#include "BindGroup.h"
#include "BindGroupLayout.h"
#include "Buffer.h"
#include "Commands.h"
#include "Device.h"
#include "InputState.h"
#include "Pipeline.h"
#include "PipelineLayout.h"
#include "Texture.h"
#include <cstring>
#include <map>
namespace backend {
CommandBufferBase::CommandBufferBase(CommandBufferBuilder* builder)
: device(builder->device),
buffersTransitioned(std::move(builder->buffersTransitioned)),
texturesTransitioned(std::move(builder->texturesTransitioned)) {
}
bool CommandBufferBase::ValidateResourceUsagesImmediate() {
for (auto buffer : buffersTransitioned) {
if (buffer->IsFrozen()) {
device->HandleError("Command buffer: cannot transition buffer with frozen usage");
return false;
}
}
for (auto texture : texturesTransitioned) {
if (texture->IsFrozen()) {
device->HandleError("Command buffer: cannot transition texture with frozen usage");
return false;
}
}
return true;
}
void FreeCommands(CommandIterator* commands) {
Command type;
while(commands->NextCommandId(&type)) {
switch (type) {
case Command::CopyBufferToTexture:
{
CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
copy->~CopyBufferToTextureCmd();
}
break;
case Command::Dispatch:
{
DispatchCmd* dispatch = commands->NextCommand<DispatchCmd>();
dispatch->~DispatchCmd();
}
break;
case Command::DrawArrays:
{
DrawArraysCmd* draw = commands->NextCommand<DrawArraysCmd>();
draw->~DrawArraysCmd();
}
break;
case Command::DrawElements:
{
DrawElementsCmd* draw = commands->NextCommand<DrawElementsCmd>();
draw->~DrawElementsCmd();
}
break;
case Command::SetPipeline:
{
SetPipelineCmd* cmd = commands->NextCommand<SetPipelineCmd>();
cmd->~SetPipelineCmd();
}
break;
case Command::SetPushConstants:
{
SetPushConstantsCmd* cmd = commands->NextCommand<SetPushConstantsCmd>();
commands->NextData<uint32_t>(cmd->count);
cmd->~SetPushConstantsCmd();
}
break;
case Command::SetBindGroup:
{
SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
cmd->~SetBindGroupCmd();
}
break;
case Command::SetIndexBuffer:
{
SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
cmd->~SetIndexBufferCmd();
}
break;
case Command::SetVertexBuffers:
{
SetVertexBuffersCmd* cmd = commands->NextCommand<SetVertexBuffersCmd>();
auto buffers = commands->NextData<Ref<BufferBase>>(cmd->count);
for (size_t i = 0; i < cmd->count; ++i) {
(&buffers[i])->~Ref<BufferBase>();
}
commands->NextData<uint32_t>(cmd->count);
cmd->~SetVertexBuffersCmd();
}
break;
case Command::TransitionBufferUsage:
{
TransitionBufferUsageCmd* cmd = commands->NextCommand<TransitionBufferUsageCmd>();
cmd->~TransitionBufferUsageCmd();
}
break;
case Command::TransitionTextureUsage:
{
TransitionTextureUsageCmd* cmd = commands->NextCommand<TransitionTextureUsageCmd>();
cmd->~TransitionTextureUsageCmd();
}
break;
}
}
commands->DataWasDestroyed();
}
CommandBufferBuilder::CommandBufferBuilder(DeviceBase* device) : device(device) {
}
CommandBufferBuilder::~CommandBufferBuilder() {
if (!consumed) {
MoveToIterator();
FreeCommands(&iterator);
}
}
bool CommandBufferBuilder::WasConsumed() const {
return consumed;
}
enum ValidationAspect {
VALIDATION_ASPECT_RENDER_PIPELINE,
VALIDATION_ASPECT_COMPUTE_PIPELINE,
VALIDATION_ASPECT_BINDGROUPS,
VALIDATION_ASPECT_VERTEX_BUFFERS,
VALIDATION_ASPECT_INDEX_BUFFER,
VALIDATION_ASPECT_COUNT,
};
using ValidationAspects = std::bitset<VALIDATION_ASPECT_COUNT>;
bool CommandBufferBuilder::ValidateGetResult() {
MoveToIterator();
ValidationAspects aspects;
std::bitset<kMaxBindGroups> bindgroupsSet;
std::bitset<kMaxVertexInputs> inputsSet;
PipelineBase* lastPipeline = nullptr;
std::map<BufferBase*, nxt::BufferUsageBit> mostRecentBufferUsages;
auto bufferHasGuaranteedUsageBit = [&](BufferBase* buffer, nxt::BufferUsageBit usage) -> bool {
assert(usage != nxt::BufferUsageBit::None && nxt::HasZeroOrOneBits(usage));
if (buffer->HasFrozenUsage(usage)) {
return true;
}
auto it = mostRecentBufferUsages.find(buffer);
return it != mostRecentBufferUsages.end() && (it->second & usage);
};
std::map<TextureBase*, nxt::TextureUsageBit> mostRecentTextureUsages;
auto textureHasGuaranteedUsageBit = [&](TextureBase* texture, nxt::TextureUsageBit usage) -> bool {
assert(usage != nxt::TextureUsageBit::None && nxt::HasZeroOrOneBits(usage));
if (texture->HasFrozenUsage(usage)) {
return true;
}
auto it = mostRecentTextureUsages.find(texture);
return it != mostRecentTextureUsages.end() && (it->second & usage);
};
auto validateBindGroupUsages = [&](BindGroupBase* group) -> bool {
const auto& layoutInfo = group->GetLayout()->GetBindingInfo();
for (size_t i = 0; i < kMaxBindingsPerGroup; ++i) {
if (!layoutInfo.mask[i]) {
continue;
}
nxt::BindingType type = layoutInfo.types[i];
switch (type) {
case nxt::BindingType::UniformBuffer:
case nxt::BindingType::StorageBuffer:
{
nxt::BufferUsageBit requiredUsage;
switch (type) {
case nxt::BindingType::UniformBuffer:
requiredUsage = nxt::BufferUsageBit::Uniform;
break;
case nxt::BindingType::StorageBuffer:
requiredUsage = nxt::BufferUsageBit::Storage;
break;
default:
assert(false);
return false;
}
auto buffer = group->GetBindingAsBufferView(i)->GetBuffer();
if (!bufferHasGuaranteedUsageBit(buffer, requiredUsage)) {
device->HandleError("Can't guarantee buffer usage needed by bind group");
return false;
}
}
break;
case nxt::BindingType::SampledTexture:
{
auto requiredUsage = nxt::TextureUsageBit::Sampled;
auto texture = group->GetBindingAsTextureView(i)->GetTexture();
if (!textureHasGuaranteedUsageBit(texture, requiredUsage)) {
device->HandleError("Can't guarantee texture usage needed by bind group");
return false;
}
}
break;
case nxt::BindingType::Sampler:
continue;
}
}
return true;
};
Command type;
while(iterator.NextCommandId(&type)) {
switch (type) {
case Command::CopyBufferToTexture:
{
CopyBufferToTextureCmd* copy = iterator.NextCommand<CopyBufferToTextureCmd>();
BufferBase* buffer = copy->buffer.Get();
TextureBase* texture = copy->texture.Get();
uint64_t width = copy->width;
uint64_t height = copy->height;
uint64_t depth = copy->depth;
uint64_t x = copy->x;
uint64_t y = copy->y;
uint64_t z = copy->z;
uint32_t level = copy->level;
if (!bufferHasGuaranteedUsageBit(buffer, nxt::BufferUsageBit::TransferSrc)) {
device->HandleError("Buffer needs the transfer source usage bit");
return false;
}
if (!textureHasGuaranteedUsageBit(texture, nxt::TextureUsageBit::TransferDst)) {
device->HandleError("Texture needs the transfer destination usage bit");
return false;
}
if (width == 0 || height == 0 || depth == 0) {
device->HandleError("Empty copy");
return false;
}
// TODO(cwallez@chromium.org): check for overflows
uint64_t pixelSize = TextureFormatPixelSize(texture->GetFormat());
uint64_t dataSize = width * height * depth * pixelSize;
// TODO(cwallez@chromium.org): handle buffer offset when it is in the command.
if (dataSize > static_cast<uint64_t>(buffer->GetSize())) {
device->HandleError("Copy would read after end of the buffer");
return false;
}
if (x + width > static_cast<uint64_t>(texture->GetWidth()) ||
y + height > static_cast<uint64_t>(texture->GetHeight()) ||
z + depth > static_cast<uint64_t>(texture->GetDepth()) ||
level > texture->GetNumMipLevels()) {
device->HandleError("Copy would write outside of the texture");
return false;
}
}
break;
case Command::Dispatch:
{
DispatchCmd* cmd = iterator.NextCommand<DispatchCmd>();
constexpr ValidationAspects requiredDispatchAspects =
1 << VALIDATION_ASPECT_COMPUTE_PIPELINE |
1 << VALIDATION_ASPECT_BINDGROUPS |
1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
if ((requiredDispatchAspects & ~aspects).any()) {
// Compute the lazily computed aspects
if (bindgroupsSet.all()) {
aspects.set(VALIDATION_ASPECT_BINDGROUPS);
}
auto requiredInputs = lastPipeline->GetInputState()->GetInputsSetMask();
if ((inputsSet & ~requiredInputs).none()) {
aspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
}
// Check again if anything is missing
if ((requiredDispatchAspects & ~aspects).any()) {
device->HandleError("Some dispatch state is missing");
return false;
}
}
}
break;
case Command::DrawArrays:
case Command::DrawElements:
{
constexpr ValidationAspects requiredDrawAspects =
1 << VALIDATION_ASPECT_RENDER_PIPELINE |
1 << VALIDATION_ASPECT_BINDGROUPS |
1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
if ((requiredDrawAspects & ~aspects).any()) {
// Compute the lazily computed aspects
if (bindgroupsSet.all()) {
aspects.set(VALIDATION_ASPECT_BINDGROUPS);
}
auto requiredInputs = lastPipeline->GetInputState()->GetInputsSetMask();
if ((inputsSet & ~requiredInputs).none()) {
aspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
}
// Check again if anything is missing
if ((requiredDrawAspects & ~aspects).any()) {
device->HandleError("Some draw state is missing");
return false;
}
}
if (type == Command::DrawArrays) {
DrawArraysCmd* draw = iterator.NextCommand<DrawArraysCmd>();
} else {
ASSERT(type == Command::DrawElements);
DrawElementsCmd* draw = iterator.NextCommand<DrawElementsCmd>();
if (!aspects[VALIDATION_ASPECT_INDEX_BUFFER]) {
device->HandleError("Draw elements requires an index buffer");
return false;
}
}
}
break;
case Command::SetPipeline:
{
SetPipelineCmd* cmd = iterator.NextCommand<SetPipelineCmd>();
PipelineBase* pipeline = cmd->pipeline.Get();
PipelineLayoutBase* layout = pipeline->GetLayout();
if (pipeline->IsCompute()) {
aspects.set(VALIDATION_ASPECT_COMPUTE_PIPELINE);
aspects.reset(VALIDATION_ASPECT_RENDER_PIPELINE);
} else {
aspects.set(VALIDATION_ASPECT_RENDER_PIPELINE);
aspects.reset(VALIDATION_ASPECT_COMPUTE_PIPELINE);
}
aspects.reset(VALIDATION_ASPECT_BINDGROUPS);
aspects.reset(VALIDATION_ASPECT_VERTEX_BUFFERS);
bindgroupsSet = ~layout->GetBindGroupsLayoutMask();
// Only bindgroups that were not the same layout in the last pipeline need to be set again.
if (lastPipeline) {
PipelineLayoutBase* lastLayout = lastPipeline->GetLayout();
for (uint32_t i = 0; i < kMaxBindGroups; ++i) {
if (lastLayout->GetBindGroupLayout(i) == layout->GetBindGroupLayout(i)) {
bindgroupsSet |= uint64_t(1) << i;
}
}
}
lastPipeline = pipeline;
}
break;
case Command::SetPushConstants:
{
SetPushConstantsCmd* cmd = iterator.NextCommand<SetPushConstantsCmd>();
iterator.NextData<uint32_t>(cmd->count);
if (cmd->count + cmd->offset > kMaxPushConstants) {
device->HandleError("Setting pushconstants past the limit");
return false;
}
}
break;
case Command::SetBindGroup:
{
SetBindGroupCmd* cmd = iterator.NextCommand<SetBindGroupCmd>();
uint32_t index = cmd->index;
if (cmd->group->GetLayout() != lastPipeline->GetLayout()->GetBindGroupLayout(index)) {
device->HandleError("Bind group layout mismatch");
return false;
}
if (!validateBindGroupUsages(cmd->group.Get())) {
return false;
}
bindgroupsSet |= uint64_t(1) << index;
}
break;
case Command::SetIndexBuffer:
{
SetIndexBufferCmd* cmd = iterator.NextCommand<SetIndexBufferCmd>();
auto buffer = cmd->buffer;
auto usage = nxt::BufferUsageBit::Index;
if (!bufferHasGuaranteedUsageBit(buffer.Get(), usage)) {
device->HandleError("Buffer needs the index usage bit to be guaranteed");
return false;
}
aspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
}
break;
case Command::SetVertexBuffers:
{
SetVertexBuffersCmd* cmd = iterator.NextCommand<SetVertexBuffersCmd>();
auto buffers = iterator.NextData<Ref<BufferBase>>(cmd->count);
iterator.NextData<uint32_t>(cmd->count);
for (uint32_t i = 0; i < cmd->count; ++i) {
auto buffer = buffers[i];
auto usage = nxt::BufferUsageBit::Vertex;
if (!bufferHasGuaranteedUsageBit(buffer.Get(), usage)) {
device->HandleError("Buffer needs vertex usage bit to be guaranteed");
return false;
}
inputsSet.set(cmd->startSlot + i);
}
}
break;
case Command::TransitionBufferUsage:
{
TransitionBufferUsageCmd* cmd = iterator.NextCommand<TransitionBufferUsageCmd>();
auto buffer = cmd->buffer.Get();
auto usage = cmd->usage;
if (!cmd->buffer->IsTransitionPossible(cmd->usage)) {
device->HandleError("Buffer frozen or usage not allowed");
return false;
}
mostRecentBufferUsages[buffer] = usage;
buffersTransitioned.insert(buffer);
}
break;
case Command::TransitionTextureUsage:
{
TransitionTextureUsageCmd* cmd = iterator.NextCommand<TransitionTextureUsageCmd>();
auto texture = cmd->texture.Get();
auto usage = cmd->usage;
if (!cmd->texture->IsTransitionPossible(cmd->usage)) {
device->HandleError("Texture frozen or usage not allowed");
return false;
}
mostRecentTextureUsages[texture] = usage;
texturesTransitioned.insert(texture);
}
break;
}
}
return true;
}
CommandIterator CommandBufferBuilder::AcquireCommands() {
return std::move(iterator);
}
CommandBufferBase* CommandBufferBuilder::GetResult() {
MoveToIterator();
consumed = true;
return device->CreateCommandBuffer(this);
}
void CommandBufferBuilder::CopyBufferToTexture(BufferBase* buffer, TextureBase* texture, uint32_t x, uint32_t y, uint32_t z,
uint32_t width, uint32_t height, uint32_t depth, uint32_t level) {
CopyBufferToTextureCmd* copy = allocator.Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
new(copy) CopyBufferToTextureCmd;
copy->buffer = buffer;
copy->texture = texture;
copy->x = x;
copy->y = y;
copy->z = z;
copy->width = width;
copy->height = height;
copy->depth = depth;
copy->level = level;
}
void CommandBufferBuilder::Dispatch(uint32_t x, uint32_t y, uint32_t z) {
DispatchCmd* dispatch = allocator.Allocate<DispatchCmd>(Command::Dispatch);
new(dispatch) DispatchCmd;
dispatch->x = x;
dispatch->y = y;
dispatch->z = z;
}
void CommandBufferBuilder::DrawArrays(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) {
DrawArraysCmd* draw = allocator.Allocate<DrawArraysCmd>(Command::DrawArrays);
new(draw) DrawArraysCmd;
draw->vertexCount = vertexCount;
draw->instanceCount = instanceCount;
draw->firstVertex = firstVertex;
draw->firstInstance = firstInstance;
}
void CommandBufferBuilder::DrawElements(uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, uint32_t firstInstance) {
DrawElementsCmd* draw = allocator.Allocate<DrawElementsCmd>(Command::DrawElements);
new(draw) DrawElementsCmd;
draw->indexCount = indexCount;
draw->instanceCount = instanceCount;
draw->firstIndex = firstIndex;
draw->firstInstance = firstInstance;
}
void CommandBufferBuilder::SetPipeline(PipelineBase* pipeline) {
SetPipelineCmd* cmd = allocator.Allocate<SetPipelineCmd>(Command::SetPipeline);
new(cmd) SetPipelineCmd;
cmd->pipeline = pipeline;
}
void CommandBufferBuilder::SetPushConstants(nxt::ShaderStageBit stage, uint32_t offset, uint32_t count, const void* data) {
if (offset + count > kMaxPushConstants) {
device->HandleError("Setting too many push constants");
return;
}
SetPushConstantsCmd* cmd = allocator.Allocate<SetPushConstantsCmd>(Command::SetPushConstants);
new(cmd) SetPushConstantsCmd;
cmd->stage = stage;
cmd->offset = offset;
cmd->count = count;
uint32_t* values = allocator.AllocateData<uint32_t>(count);
memcpy(values, data, count * sizeof(uint32_t));
}
void CommandBufferBuilder::SetBindGroup(uint32_t groupIndex, BindGroupBase* group) {
if (groupIndex >= kMaxBindGroups) {
device->HandleError("Setting bind group over the max");
return;
}
SetBindGroupCmd* cmd = allocator.Allocate<SetBindGroupCmd>(Command::SetBindGroup);
new(cmd) SetBindGroupCmd;
cmd->index = groupIndex;
cmd->group = group;
}
void CommandBufferBuilder::SetIndexBuffer(BufferBase* buffer, uint32_t offset, nxt::IndexFormat format) {
// TODO(kainino@chromium.org): validation
SetIndexBufferCmd* cmd = allocator.Allocate<SetIndexBufferCmd>(Command::SetIndexBuffer);
new(cmd) SetIndexBufferCmd;
cmd->buffer = buffer;
cmd->offset = offset;
cmd->format = format;
}
void CommandBufferBuilder::SetVertexBuffers(uint32_t startSlot, uint32_t count, BufferBase* const* buffers, uint32_t const* offsets){
// TODO(kainino@chromium.org): validation
SetVertexBuffersCmd* cmd = allocator.Allocate<SetVertexBuffersCmd>(Command::SetVertexBuffers);
new(cmd) SetVertexBuffersCmd;
cmd->startSlot = startSlot;
cmd->count = count;
Ref<BufferBase>* cmdBuffers = allocator.AllocateData<Ref<BufferBase>>(count);
for (size_t i = 0; i < count; ++i) {
new(&cmdBuffers[i]) Ref<BufferBase>(buffers[i]);
}
uint32_t* cmdOffsets = allocator.AllocateData<uint32_t>(count);
memcpy(cmdOffsets, offsets, count * sizeof(uint32_t));
}
void CommandBufferBuilder::TransitionBufferUsage(BufferBase* buffer, nxt::BufferUsageBit usage) {
TransitionBufferUsageCmd* cmd = allocator.Allocate<TransitionBufferUsageCmd>(Command::TransitionBufferUsage);
new(cmd) TransitionBufferUsageCmd;
cmd->buffer = buffer;
cmd->usage = usage;
}
void CommandBufferBuilder::TransitionTextureUsage(TextureBase* texture, nxt::TextureUsageBit usage) {
TransitionTextureUsageCmd* cmd = allocator.Allocate<TransitionTextureUsageCmd>(Command::TransitionTextureUsage);
new(cmd) TransitionTextureUsageCmd;
cmd->texture = texture;
cmd->usage = usage;
}
void CommandBufferBuilder::MoveToIterator() {
if (!movedToIterator) {
iterator = std::move(allocator);
movedToIterator = true;
}
}
}

View File

@ -0,0 +1,98 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_COMMANDBUFFERGL_H_
#define BACKEND_COMMON_COMMANDBUFFERGL_H_
#include "nxt/nxtcpp.h"
#include "CommandAllocator.h"
#include "RefCounted.h"
#include <set>
#include <utility>
namespace backend {
class BindGroupBase;
class BufferBase;
class DeviceBase;
class PipelineBase;
class TextureBase;
class CommandBufferBuilder;
class CommandBufferBase : public RefCounted {
public:
CommandBufferBase(CommandBufferBuilder* builder);
bool ValidateResourceUsagesImmediate();
private:
DeviceBase* device;
std::set<BufferBase*> buffersTransitioned;
std::set<TextureBase*> texturesTransitioned;
};
class CommandBufferBuilder : public RefCounted {
public:
CommandBufferBuilder(DeviceBase* device);
~CommandBufferBuilder();
bool WasConsumed() const;
bool ValidateGetResult();
CommandIterator AcquireCommands();
// NXT API
CommandBufferBase* GetResult();
void CopyBufferToTexture(BufferBase* buffer, TextureBase* texture, uint32_t x, uint32_t y, uint32_t z,
uint32_t width, uint32_t height, uint32_t depth, uint32_t level);
void Dispatch(uint32_t x, uint32_t y, uint32_t z);
void DrawArrays(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance);
void DrawElements(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstIndex, uint32_t firstInstance);
void SetPushConstants(nxt::ShaderStageBit stage, uint32_t offset, uint32_t count, const void* data);
void SetPipeline(PipelineBase* pipeline);
void SetBindGroup(uint32_t groupIndex, BindGroupBase* group);
void SetIndexBuffer(BufferBase* buffer, uint32_t offset, nxt::IndexFormat format);
template<typename T>
void SetVertexBuffers(uint32_t startSlot, uint32_t count, T* const* buffers, uint32_t const* offsets) {
static_assert(std::is_base_of<BufferBase, T>::value, "");
SetVertexBuffers(startSlot, count, reinterpret_cast<BufferBase* const*>(buffers), offsets);
}
void SetVertexBuffers(uint32_t startSlot, uint32_t count, BufferBase* const* buffers, uint32_t const* offsets);
void TransitionBufferUsage(BufferBase* buffer, nxt::BufferUsageBit usage);
void TransitionTextureUsage(TextureBase* texture, nxt::TextureUsageBit usage);
private:
friend class CommandBufferBase;
void MoveToIterator();
DeviceBase* device;
CommandAllocator allocator;
CommandIterator iterator;
bool consumed = false;
bool movedToIterator = false;
// These pointers will remain valid since they are referenced by
// the bind groups which are referenced by this command buffer.
std::set<BufferBase*> buffersTransitioned;
std::set<TextureBase*> texturesTransitioned;
};
}
#endif // BACKEND_COMMON_COMMANDBUFFERGL_H_

View File

@ -0,0 +1,114 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_COMMANDS_H_
#define BACKEND_COMMON_COMMANDS_H_
#include "Texture.h"
#include "nxt/nxtcpp.h"
namespace backend {
// Definition of the commands that are present in the CommandIterator given by the
// CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header
// dependencies: Ref<Object> needs Object to be defined.
enum class Command {
CopyBufferToTexture,
Dispatch,
DrawArrays,
DrawElements,
SetPipeline,
SetPushConstants,
SetBindGroup,
SetIndexBuffer,
SetVertexBuffers,
TransitionBufferUsage,
TransitionTextureUsage,
};
struct CopyBufferToTextureCmd {
Ref<BufferBase> buffer;
Ref<TextureBase> texture;
uint32_t x, y, z;
uint32_t width, height, depth;
uint32_t level;
};
struct DispatchCmd {
uint32_t x;
uint32_t y;
uint32_t z;
};
struct DrawArraysCmd {
uint32_t vertexCount;
uint32_t instanceCount;
uint32_t firstVertex;
uint32_t firstInstance;
};
struct DrawElementsCmd {
uint32_t indexCount;
uint32_t instanceCount;
uint32_t firstIndex;
uint32_t firstInstance;
};
struct SetPipelineCmd {
Ref<PipelineBase> pipeline;
};
struct SetPushConstantsCmd {
nxt::ShaderStageBit stage;
uint32_t offset;
uint32_t count;
};
struct SetBindGroupCmd {
uint32_t index;
Ref<BindGroupBase> group;
};
struct SetIndexBufferCmd {
Ref<BufferBase> buffer;
uint32_t offset;
nxt::IndexFormat format;
};
struct SetVertexBuffersCmd {
uint32_t startSlot;
uint32_t count;
};
struct TransitionBufferUsageCmd {
Ref<BufferBase> buffer;
nxt::BufferUsageBit usage;
};
struct TransitionTextureUsageCmd {
Ref<TextureBase> texture;
uint32_t startLevel;
uint32_t levelCount;
nxt::TextureUsageBit usage;
};
// This needs to be called before the CommandIterator is freed so that the Ref<> present in
// the commands have a chance to run their destructor and remove internal references.
void FreeCommands(CommandIterator* commands);
}
#endif // BACKEND_COMMON_COMMANDS_H_

View File

@ -0,0 +1,126 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Device.h"
#include "BindGroup.h"
#include "BindGroupLayout.h"
#include "Buffer.h"
#include "CommandBuffer.h"
#include "InputState.h"
#include "Pipeline.h"
#include "PipelineLayout.h"
#include "Queue.h"
#include "Sampler.h"
#include "ShaderModule.h"
#include "Texture.h"
#include <unordered_set>
namespace backend {
void RegisterSynchronousErrorCallback(nxtDevice device, ErrorCallback callback, void* userData) {
auto deviceBase = reinterpret_cast<DeviceBase*>(device);
deviceBase->RegisterErrorCallback(callback, userData);
}
// DeviceBase::Caches
// The caches are unordered_sets of pointers with special hash and compare functions
// to compare the value of the objects, instead of the pointers.
using BindGroupLayoutCache = std::unordered_set<BindGroupLayoutBase*, BindGroupLayoutCacheFuncs, BindGroupLayoutCacheFuncs>;
struct DeviceBase::Caches {
BindGroupLayoutCache bindGroupLayouts;
};
// DeviceBase
DeviceBase::DeviceBase() {
caches = new DeviceBase::Caches();
}
DeviceBase::~DeviceBase() {
delete caches;
}
void DeviceBase::HandleError(const char* message) {
if (errorCallback) {
errorCallback(message, errorUserData);
}
}
void DeviceBase::RegisterErrorCallback(ErrorCallback callback, void* userData) {
this->errorCallback = callback;
this->errorUserData = userData;
}
BindGroupLayoutBase* DeviceBase::GetOrCreateBindGroupLayout(const BindGroupLayoutBase* blueprint, BindGroupLayoutBuilder* builder) {
// The blueprint is only used to search in the cache and is not modified. However cached
// objects can be modified, and unordered_set cannot search for a const pointer in a non
// const pointer set. That's why we do a const_cast here, but the blueprint won't be
// modified.
auto iter = caches->bindGroupLayouts.find(const_cast<BindGroupLayoutBase*>(blueprint));
if (iter != caches->bindGroupLayouts.end()) {
return *iter;
}
BindGroupLayoutBase* backendObj = CreateBindGroupLayout(builder);
caches->bindGroupLayouts.insert(backendObj);
return backendObj;
}
void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
caches->bindGroupLayouts.erase(obj);
}
BindGroupBuilder* DeviceBase::CreateBindGroupBuilder() {
return new BindGroupBuilder(this);
}
BindGroupLayoutBuilder* DeviceBase::CreateBindGroupLayoutBuilder() {
return new BindGroupLayoutBuilder(this);
}
BufferBuilder* DeviceBase::CreateBufferBuilder() {
return new BufferBuilder(this);
}
CommandBufferBuilder* DeviceBase::CreateCommandBufferBuilder() {
return new CommandBufferBuilder(this);
}
InputStateBuilder* DeviceBase::CreateInputStateBuilder() {
return new InputStateBuilder(this);
}
PipelineBuilder* DeviceBase::CreatePipelineBuilder() {
return new PipelineBuilder(this);
}
PipelineLayoutBuilder* DeviceBase::CreatePipelineLayoutBuilder() {
return new PipelineLayoutBuilder(this);
}
QueueBuilder* DeviceBase::CreateQueueBuilder() {
return new QueueBuilder(this);
}
SamplerBuilder* DeviceBase::CreateSamplerBuilder() {
return new SamplerBuilder(this);
}
ShaderModuleBuilder* DeviceBase::CreateShaderModuleBuilder() {
return new ShaderModuleBuilder(this);
}
TextureBuilder* DeviceBase::CreateTextureBuilder() {
return new TextureBuilder(this);
}
void DeviceBase::CopyBindGroups(uint32_t start, uint32_t count, BindGroupBase* source, BindGroupBase* target) {
// TODO(cwallez@chromium.org): update state tracking then call the backend
}
}

View File

@ -0,0 +1,94 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_DEVICEBASE_H_
#define BACKEND_COMMON_DEVICEBASE_H_
#include "common/Forward.h"
#include "common/RefCounted.h"
#include "nxt/nxtcpp.h"
namespace backend {
using ErrorCallback = void (*)(const char* errorMessage, void* userData);
class DeviceBase {
public:
DeviceBase();
~DeviceBase();
void HandleError(const char* message);
void RegisterErrorCallback(ErrorCallback callback, void* userData);
virtual BindGroupBase* CreateBindGroup(BindGroupBuilder* builder) = 0;
virtual BindGroupLayoutBase* CreateBindGroupLayout(BindGroupLayoutBuilder* builder) = 0;
virtual BufferBase* CreateBuffer(BufferBuilder* builder) = 0;
virtual BufferViewBase* CreateBufferView(BufferViewBuilder* builder) = 0;
virtual CommandBufferBase* CreateCommandBuffer(CommandBufferBuilder* builder) = 0;
virtual InputStateBase* CreateInputState(InputStateBuilder* builder) = 0;
virtual PipelineBase* CreatePipeline(PipelineBuilder* builder) = 0;
virtual PipelineLayoutBase* CreatePipelineLayout(PipelineLayoutBuilder* builder) = 0;
virtual QueueBase* CreateQueue(QueueBuilder* builder) = 0;
virtual SamplerBase* CreateSampler(SamplerBuilder* builder) = 0;
virtual ShaderModuleBase* CreateShaderModule(ShaderModuleBuilder* builder) = 0;
virtual TextureBase* CreateTexture(TextureBuilder* builder) = 0;
virtual TextureViewBase* CreateTextureView(TextureViewBuilder* builder) = 0;
// Many NXT objects are completely immutable once created which means that if two
// builders are given the same arguments, they can return the same object. Reusing
// objects will help make comparisons between objects by a single pointer comparison.
//
// Technically no object is immutable as they have a reference count, and an
// application with reference-counting issues could "see" that objects are reused.
// This is solved by automatic-reference counting, and also the fact that when using
// the client-server wire every creation will get a different proxy object, with a
// different reference count.
//
// When trying to create an object, we give both the builder and an example of what
// the built object will be, the "blueprint". The blueprint is just a FooBase object
// instead of a backend Foo object. If the blueprint doesn't match an object in the
// cache, then the builder is used to make a new object.
BindGroupLayoutBase* GetOrCreateBindGroupLayout(const BindGroupLayoutBase* blueprint, BindGroupLayoutBuilder* builder);
void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
// NXT API
BindGroupBuilder* CreateBindGroupBuilder();
BindGroupLayoutBuilder* CreateBindGroupLayoutBuilder();
BufferBuilder* CreateBufferBuilder();
BufferViewBuilder* CreateBufferViewBuilder();
CommandBufferBuilder* CreateCommandBufferBuilder();
InputStateBuilder* CreateInputStateBuilder();
PipelineBuilder* CreatePipelineBuilder();
PipelineLayoutBuilder* CreatePipelineLayoutBuilder();
QueueBuilder* CreateQueueBuilder();
SamplerBuilder* CreateSamplerBuilder();
ShaderModuleBuilder* CreateShaderModuleBuilder();
TextureBuilder* CreateTextureBuilder();
void CopyBindGroups(uint32_t start, uint32_t count, BindGroupBase* source, BindGroupBase* target);
private:
// The object caches aren't exposed in the header as they would require a lot of
// additional includes.
struct Caches;
Caches* caches = nullptr;
ErrorCallback errorCallback = nullptr;
void* errorUserData = nullptr;
};
}
#endif // BACKEND_COMMON_DEVICEBASE_H_

View File

@ -0,0 +1,71 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_FORWARD_H_
#define BACKEND_COMMON_FORWARD_H_
#include <cassert>
#include <cstdint>
#define ASSERT assert
namespace backend {
class BindGroupBase;
class BindGroupBuilder;
class BindGroupLayoutBase;
class BindGroupLayoutBuilder;
class BufferBase;
class BufferBuilder;
class BufferViewBase;
class BufferViewBuilder;
class CommandBufferBase;
class CommandBufferBuilder;
class InputStateBase;
class InputStateBuilder;
class PipelineBase;
class PipelineBuilder;
class PipelineLayoutBase;
class PipelineLayoutBuilder;
class QueueBase;
class QueueBuilder;
class SamplerBase;
class SamplerBuilder;
class ShaderModuleBase;
class ShaderModuleBuilder;
class TextureBase;
class TextureBuilder;
class TextureViewBase;
class TextureViewBuilder;
class DeviceBase;
template<typename T>
class Ref;
template<typename T>
class PerStage;
// TODO(cwallez@chromium.org): where should constants live?
static constexpr uint32_t kMaxPushConstants = 32u;
static constexpr uint32_t kMaxBindGroups = 4u;
static constexpr uint32_t kMaxBindingsPerGroup = 16u; // TODO(cwallez@chromium.org): investigate bindgroup limits
static constexpr uint32_t kMaxVertexAttributes = 16u;
static constexpr uint32_t kMaxVertexInputs = 16u;
static constexpr uint32_t kNumStages = 3;
enum PushConstantType : uint8_t;
}
#endif // BACKEND_COMMON_FORWARD_H_

View File

@ -0,0 +1,139 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "InputState.h"
#include "Device.h"
namespace backend {
// InputState helpers
size_t IndexFormatSize(nxt::IndexFormat format) {
switch (format) {
case nxt::IndexFormat::Uint16:
return sizeof(uint16_t);
case nxt::IndexFormat::Uint32:
return sizeof(uint32_t);
}
}
uint32_t VertexFormatNumComponents(nxt::VertexFormat format) {
switch (format) {
case nxt::VertexFormat::FloatR32G32B32A32:
return 4;
case nxt::VertexFormat::FloatR32G32B32:
return 3;
case nxt::VertexFormat::FloatR32G32:
return 2;
}
}
size_t VertexFormatSize(nxt::VertexFormat format) {
switch (format) {
case nxt::VertexFormat::FloatR32G32B32A32:
case nxt::VertexFormat::FloatR32G32B32:
case nxt::VertexFormat::FloatR32G32:
return VertexFormatNumComponents(format) * sizeof(float);
}
}
// InputStateBase
InputStateBase::InputStateBase(InputStateBuilder* builder) {
attributesSetMask = builder->attributesSetMask;
attributeInfos = builder->attributeInfos;
inputsSetMask = builder->inputsSetMask;
inputInfos = builder->inputInfos;
}
const std::bitset<kMaxVertexAttributes>& InputStateBase::GetAttributesSetMask() const {
return attributesSetMask;
}
const InputStateBase::AttributeInfo& InputStateBase::GetAttribute(uint32_t location) const {
ASSERT(attributesSetMask[location]);
return attributeInfos[location];
}
const std::bitset<kMaxVertexInputs>& InputStateBase::GetInputsSetMask() const {
return inputsSetMask;
}
const InputStateBase::InputInfo& InputStateBase::GetInput(uint32_t slot) const {
ASSERT(inputsSetMask[slot]);
return inputInfos[slot];
}
// InputStateBuilder
InputStateBuilder::InputStateBuilder(DeviceBase* device) : device(device) {
}
bool InputStateBuilder::WasConsumed() const {
return consumed;
}
InputStateBase* InputStateBuilder::GetResult() {
for (uint32_t location = 0; location < kMaxVertexAttributes; ++location) {
if (attributesSetMask[location] &&
!inputsSetMask[attributeInfos[location].bindingSlot]) {
device->HandleError("Attribute uses unset input");
return nullptr;
}
}
consumed = true;
return device->CreateInputState(this);
}
void InputStateBuilder::SetAttribute(uint32_t shaderLocation,
uint32_t bindingSlot, nxt::VertexFormat format, uint32_t offset) {
if (shaderLocation >= kMaxVertexAttributes) {
device->HandleError("Setting attribute out of bounds");
return;
}
if (bindingSlot >= kMaxVertexInputs) {
device->HandleError("Binding slot out of bounds");
return;
}
if (attributesSetMask[shaderLocation]) {
device->HandleError("Setting already set attribute");
return;
}
attributesSetMask.set(shaderLocation);
auto& info = attributeInfos[shaderLocation];
info.bindingSlot = bindingSlot;
info.format = format;
info.offset = offset;
}
void InputStateBuilder::SetInput(uint32_t bindingSlot, uint32_t stride,
nxt::InputStepMode stepMode) {
if (bindingSlot >= kMaxVertexInputs) {
device->HandleError("Setting input out of bounds");
return;
}
if (inputsSetMask[bindingSlot]) {
device->HandleError("Setting already set input");
return;
}
inputsSetMask.set(bindingSlot);
auto& info = inputInfos[bindingSlot];
info.stride = stride;
info.stepMode = stepMode;
}
}

View File

@ -0,0 +1,85 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_INPUTSTATE_H_
#define BACKEND_COMMON_INPUTSTATE_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
#include <array>
#include <bitset>
namespace backend {
size_t IndexFormatSize(nxt::IndexFormat format);
uint32_t VertexFormatNumComponents(nxt::VertexFormat format);
size_t VertexFormatSize(nxt::VertexFormat format);
class InputStateBase : public RefCounted {
public:
InputStateBase(InputStateBuilder* builder);
struct AttributeInfo {
uint32_t bindingSlot;
nxt::VertexFormat format;
uint32_t offset;
};
struct InputInfo {
uint32_t stride;
nxt::InputStepMode stepMode;
};
const std::bitset<kMaxVertexAttributes>& GetAttributesSetMask() const;
const AttributeInfo& GetAttribute(uint32_t location) const;
const std::bitset<kMaxVertexInputs>& GetInputsSetMask() const;
const InputInfo& GetInput(uint32_t slot) const;
private:
std::bitset<kMaxVertexAttributes> attributesSetMask;
std::array<AttributeInfo, kMaxVertexAttributes> attributeInfos;
std::bitset<kMaxVertexInputs> inputsSetMask;
std::array<InputInfo, kMaxVertexInputs> inputInfos;
};
class InputStateBuilder : public RefCounted {
public:
InputStateBuilder(DeviceBase* device);
bool WasConsumed() const;
// NXT API
InputStateBase* GetResult();
void SetAttribute(uint32_t shaderLocation, uint32_t bindingSlot,
nxt::VertexFormat format, uint32_t offset);
void SetInput(uint32_t bindingSlot, uint32_t stride,
nxt::InputStepMode stepMode);
private:
friend class InputStateBase;
DeviceBase* device;
std::bitset<kMaxVertexAttributes> attributesSetMask;
std::array<InputStateBase::AttributeInfo, kMaxVertexAttributes> attributeInfos;
std::bitset<kMaxVertexInputs> inputsSetMask;
std::array<InputStateBase::InputInfo, kMaxVertexInputs> inputInfos;
bool consumed = false;
};
}
#endif // BACKEND_COMMON_INPUTSTATE_H_

View File

@ -0,0 +1,52 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Math.h"
#include "Forward.h"
namespace backend {
unsigned long ScanForward(unsigned long bits) {
ASSERT(bits != 0);
// TODO(cwallez@chromium.org): handle non-posix platforms
// unsigned long firstBitIndex = 0ul;
// unsigned char ret = _BitScanForward(&firstBitIndex, bits);
// ASSERT(ret != 0);
// return firstBitIndex;
return static_cast<unsigned long>(__builtin_ctzl(bits));
}
uint32_t Log2(uint32_t value) {
ASSERT(value != 0);
return 31 - __builtin_clz(value);
}
bool IsPowerOfTwo(size_t n) {
ASSERT(n != 0);
return (n & (n - 1)) == 0;
}
bool IsAligned(const void* ptr, size_t alignment) {
ASSERT(IsPowerOfTwo(alignment));
ASSERT(alignment != 0);
return (reinterpret_cast<intptr_t>(ptr) & (alignment - 1)) == 0;
}
void* AlignVoidPtr(void* ptr, size_t alignment) {
ASSERT(alignment != 0);
return reinterpret_cast<void*>((reinterpret_cast<intptr_t>(ptr) + (alignment - 1)) & ~(alignment - 1));
}
}

43
src/backend/common/Math.h Normal file
View File

@ -0,0 +1,43 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_MATH_H_
#define BACKEND_COMMON_MATH_H_
#include <cstddef>
#include "cstdint"
namespace backend {
// The following are not valid for 0
unsigned long ScanForward(unsigned long bits);
uint32_t Log2(uint32_t value);
bool IsPowerOfTwo(size_t n);
bool IsAligned(const void* ptr, size_t alignment);
void* AlignVoidPtr(void* ptr, size_t alignment);
template<typename T>
T* Align(T* ptr, size_t alignment) {
return reinterpret_cast<T*>(AlignVoidPtr(ptr, alignment));
}
template<typename T>
const T* Align(const T* ptr, size_t alignment) {
return reinterpret_cast<const T*>(AlignVoidPtr(const_cast<T*>(ptr), alignment));
}
}
#endif // BACKEND_COMMON_MATH_H_

View File

@ -0,0 +1,29 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "PerStage.h"
namespace backend {
BitSetIterator<kNumStages, nxt::ShaderStage> IterateStages(nxt::ShaderStageBit stages) {
std::bitset<kNumStages> bits(static_cast<uint32_t>(stages));
return BitSetIterator<kNumStages, nxt::ShaderStage>(bits);
}
nxt::ShaderStageBit StageBit(nxt::ShaderStage stage) {
ASSERT(static_cast<uint32_t>(stage) < kNumStages);
return static_cast<nxt::ShaderStageBit>(1 << static_cast<uint32_t>(stage));
}
}

View File

@ -0,0 +1,68 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_PERSTAGE_H_
#define BACKEND_COMMON_PERSTAGE_H_
#include "BitSetIterator.h"
#include "nxt/nxtcpp.h"
#include <array>
namespace backend {
static_assert(static_cast<uint32_t>(nxt::ShaderStage::Vertex) < kNumStages, "");
static_assert(static_cast<uint32_t>(nxt::ShaderStage::Fragment) < kNumStages, "");
static_assert(static_cast<uint32_t>(nxt::ShaderStage::Compute) < kNumStages, "");
static_assert(static_cast<uint32_t>(nxt::ShaderStageBit::Vertex) == (1 << static_cast<uint32_t>(nxt::ShaderStage::Vertex)), "");
static_assert(static_cast<uint32_t>(nxt::ShaderStageBit::Fragment) == (1 << static_cast<uint32_t>(nxt::ShaderStage::Fragment)), "");
static_assert(static_cast<uint32_t>(nxt::ShaderStageBit::Compute) == (1 << static_cast<uint32_t>(nxt::ShaderStage::Compute)), "");
BitSetIterator<kNumStages, nxt::ShaderStage> IterateStages(nxt::ShaderStageBit stages);
nxt::ShaderStageBit StageBit(nxt::ShaderStage stage);
static constexpr nxt::ShaderStageBit kAllStages = static_cast<nxt::ShaderStageBit>((1 << kNumStages) - 1);
template<typename T>
class PerStage {
public:
T& operator[](nxt::ShaderStage stage) {
ASSERT(static_cast<uint32_t>(stage) < kNumStages);
return data[static_cast<uint32_t>(stage)];
}
const T& operator[](nxt::ShaderStage stage) const {
ASSERT(static_cast<uint32_t>(stage) < kNumStages);
return data[static_cast<uint32_t>(stage)];
}
T& operator[](nxt::ShaderStageBit stageBit) {
uint32_t bit = static_cast<uint32_t>(stageBit);
ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
return data[Log2(bit)];
}
const T& operator[](nxt::ShaderStageBit stageBit) const {
uint32_t bit = static_cast<uint32_t>(stageBit);
ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
return data[Log2(bit)];
}
private:
std::array<T, kNumStages> data;
};
}
#endif // BACKEND_COMMON_PERSTAGE_H_

View File

@ -0,0 +1,149 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Pipeline.h"
#include "Device.h"
#include "InputState.h"
#include "PipelineLayout.h"
#include "ShaderModule.h"
namespace backend {
// PipelineBase
PipelineBase::PipelineBase(PipelineBuilder* builder)
: device(builder->device), stageMask(builder->stageMask), layout(std::move(builder->layout)),
inputState(std::move(builder->inputState)) {
if (stageMask != (nxt::ShaderStageBit::Vertex | nxt::ShaderStageBit::Fragment) &&
stageMask != nxt::ShaderStageBit::Compute) {
device->HandleError("Wrong combination of stage for pipeline");
return;
}
auto FillPushConstants = [](const ShaderModuleBase* module, PushConstantInfo* info) {
const auto& moduleInfo = module->GetPushConstants();
info->mask = moduleInfo.mask;
for (uint32_t i = 0; i < moduleInfo.names.size(); i++) {
unsigned int size = moduleInfo.sizes[i];
if (size == 0) {
continue;
}
for (uint32_t offset = 0; offset < size; offset++) {
info->types[i + offset] = moduleInfo.types[i];
}
i += size - 1;
}
};
for (auto stageBit : IterateStages(builder->stageMask)) {
if (!builder->stages[stageBit].module->IsCompatibleWithPipelineLayout(layout.Get())) {
device->HandleError("Stage not compatible with layout");
return;
}
FillPushConstants(builder->stages[stageBit].module.Get(), &pushConstants[stageBit]);
}
if (!IsCompute()) {
if ((builder->stages[nxt::ShaderStage::Vertex].module->GetUsedVertexAttributes() & ~inputState->GetAttributesSetMask()).any()) {
device->HandleError("Pipeline vertex stage uses inputs not in the input state");
return;
}
}
}
const PipelineBase::PushConstantInfo& PipelineBase::GetPushConstants(nxt::ShaderStage stage) const {
return pushConstants[stage];
}
nxt::ShaderStageBit PipelineBase::GetStageMask() const {
return stageMask;
}
PipelineLayoutBase* PipelineBase::GetLayout() {
return layout.Get();
}
InputStateBase* PipelineBase::GetInputState() {
return inputState.Get();
}
bool PipelineBase::IsCompute() const {
return stageMask == nxt::ShaderStageBit::Compute;
}
// PipelineBuilder
PipelineBuilder::PipelineBuilder(DeviceBase* device)
: device(device), stageMask(static_cast<nxt::ShaderStageBit>(0)) {
}
bool PipelineBuilder::WasConsumed() const {
return consumed;
}
const PipelineBuilder::StageInfo& PipelineBuilder::GetStageInfo(nxt::ShaderStage stage) const {
ASSERT(stageMask & StageBit(stage));
return stages[stage];
}
PipelineBase* PipelineBuilder::GetResult() {
// TODO(cwallez@chromium.org): the layout should be required, and put the default objects in the device
if (!layout) {
layout = device->CreatePipelineLayoutBuilder()->GetResult();
}
if (!inputState) {
inputState = device->CreateInputStateBuilder()->GetResult();
}
consumed = true;
return device->CreatePipeline(this);
}
void PipelineBuilder::SetLayout(PipelineLayoutBase* layout) {
this->layout = layout;
}
void PipelineBuilder::SetStage(nxt::ShaderStage stage, ShaderModuleBase* module, const char* entryPoint) {
if (entryPoint != std::string("main")) {
device->HandleError("Currently the entry point has to be main()");
return;
}
if (stage != module->GetExecutionModel()) {
device->HandleError("Setting module with wrong execution model");
return;
}
nxt::ShaderStageBit bit = StageBit(stage);
if (stageMask & bit) {
device->HandleError("Setting already set stage");
return;
}
stageMask |= bit;
stages[stage].module = module;
stages[stage].entryPoint = entryPoint;
}
void PipelineBuilder::SetInputState(InputStateBase* inputState) {
this->inputState = inputState;
}
}

View File

@ -0,0 +1,92 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_PIPELINE_H_
#define BACKEND_COMMON_PIPELINE_H_
#include "Forward.h"
#include "PerStage.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
#include <array>
#include <bitset>
namespace backend {
enum PushConstantType : uint8_t {
Int,
UInt,
Float,
};
class PipelineBase : public RefCounted {
public:
PipelineBase(PipelineBuilder* builder);
struct PushConstantInfo {
std::bitset<kMaxPushConstants> mask;
std::array<PushConstantType, kMaxPushConstants> types;
};
const PushConstantInfo& GetPushConstants(nxt::ShaderStage stage) const;
nxt::ShaderStageBit GetStageMask() const;
PipelineLayoutBase* GetLayout();
InputStateBase* GetInputState();
// TODO(cwallez@chromium.org): split compute and render pipelines
bool IsCompute() const;
private:
DeviceBase* device;
nxt::ShaderStageBit stageMask;
Ref<PipelineLayoutBase> layout;
PerStage<PushConstantInfo> pushConstants;
Ref<InputStateBase> inputState;
};
class PipelineBuilder : public RefCounted {
public:
PipelineBuilder(DeviceBase* device);
bool WasConsumed() const;
struct StageInfo {
std::string entryPoint;
Ref<ShaderModuleBase> module;
};
const StageInfo& GetStageInfo(nxt::ShaderStage stage) const;
// NXT API
PipelineBase* GetResult();
void SetLayout(PipelineLayoutBase* layout);
void SetStage(nxt::ShaderStage stage, ShaderModuleBase* module, const char* entryPoint);
void SetInputState(InputStateBase* inputState);
private:
friend class PipelineBase;
DeviceBase* device;
Ref<PipelineLayoutBase> layout;
nxt::ShaderStageBit stageMask;
PerStage<StageInfo> stages;
Ref<InputStateBase> inputState;
bool consumed = false;
};
}
#endif // BACKEND_COMMON_PIPELINE_H_

View File

@ -0,0 +1,73 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "PipelineLayout.h"
#include "BindGroupLayout.h"
#include "Device.h"
namespace backend {
// PipelineLayoutBase
PipelineLayoutBase::PipelineLayoutBase(PipelineLayoutBuilder* builder)
: bindGroupLayouts(std::move(builder->bindGroupLayouts)), mask(builder->mask) {
}
const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(size_t group) const {
ASSERT(group < kMaxBindGroups);
return bindGroupLayouts[group].Get();
}
const std::bitset<kMaxBindGroups> PipelineLayoutBase::GetBindGroupsLayoutMask() const {
return mask;
}
// PipelineLayoutBuilder
PipelineLayoutBuilder::PipelineLayoutBuilder(DeviceBase* device) : device(device) {
}
bool PipelineLayoutBuilder::WasConsumed() const {
return consumed;
}
PipelineLayoutBase* PipelineLayoutBuilder::GetResult() {
// TODO(cwallez@chromium.org): this is a hack, have the null bind group layout somewhere in the device
// once we have a cache of BGL
for (size_t group = 0; group < kMaxBindGroups; ++group) {
if (!bindGroupLayouts[group]) {
bindGroupLayouts[group] = device->CreateBindGroupLayoutBuilder()->GetResult();
}
}
consumed = true;
return device->CreatePipelineLayout(this);
}
void PipelineLayoutBuilder::SetBindGroupLayout(uint32_t groupIndex, BindGroupLayoutBase* layout) {
if (groupIndex >= kMaxBindGroups) {
device->HandleError("groupIndex is over the maximum allowed");
return;
}
if (mask[groupIndex]) {
device->HandleError("Bind group layout already specified");
return;
}
bindGroupLayouts[groupIndex] = layout;
mask.set(groupIndex);
}
}

View File

@ -0,0 +1,63 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_PIPELINELAYOUT_H_
#define BACKEND_COMMON_PIPELINELAYOUT_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
#include <array>
#include <bitset>
namespace backend {
using BindGroupLayoutArray = std::array<Ref<BindGroupLayoutBase>, kMaxBindGroups>;
class PipelineLayoutBase : public RefCounted {
public:
PipelineLayoutBase(PipelineLayoutBuilder* builder);
const BindGroupLayoutBase* GetBindGroupLayout(size_t group) const;
const std::bitset<kMaxBindGroups> GetBindGroupsLayoutMask() const;
protected:
BindGroupLayoutArray bindGroupLayouts;
std::bitset<kMaxBindGroups> mask;
};
class PipelineLayoutBuilder : public RefCounted {
public:
PipelineLayoutBuilder(DeviceBase* device);
bool WasConsumed() const;
// NXT API
PipelineLayoutBase* GetResult();
void SetBindGroupLayout(uint32_t groupIndex, BindGroupLayoutBase* layout);
private:
friend class PipelineLayoutBase;
DeviceBase* device;
BindGroupLayoutArray bindGroupLayouts;
std::bitset<kMaxBindGroups> mask;
bool consumed = false;
};
}
#endif // BACKEND_COMMON_PIPELINELAYOUT_H_

View File

@ -0,0 +1,42 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Queue.h"
#include "Device.h"
#include "CommandBuffer.h"
namespace backend {
// QueueBase
bool QueueBase::ValidateSubmitCommand(CommandBufferBase* command) {
return command->ValidateResourceUsagesImmediate();
}
// QueueBuilder
QueueBuilder::QueueBuilder(DeviceBase* device) : device(device) {
}
bool QueueBuilder::WasConsumed() const {
return consumed;
}
QueueBase* QueueBuilder::GetResult() {
consumed = true;
return device->CreateQueue(this);
}
}

View File

@ -0,0 +1,59 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_QUEUE_H_
#define BACKEND_COMMON_QUEUE_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
namespace backend {
class QueueBase : public RefCounted {
private:
bool ValidateSubmitCommand(CommandBufferBase* command);
public:
template<typename T>
bool ValidateSubmit(uint32_t numCommands, T* const * commands) {
static_assert(std::is_base_of<CommandBufferBase, T>::value, "invalid command buffer type");
for (uint32_t i = 0; i < numCommands; ++i) {
if (!ValidateSubmitCommand(commands[i])) {
return false;
}
}
return true;
}
};
class QueueBuilder : public RefCounted {
public:
QueueBuilder(DeviceBase* device);
bool WasConsumed() const;
// NXT API
QueueBase* GetResult();
private:
DeviceBase* device;
bool consumed = false;
};
}
#endif // BACKEND_COMMON_QUEUE_H_

View File

@ -0,0 +1,66 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "RefCounted.h"
#include <cassert>
#define ASSERT assert
namespace backend {
RefCounted::RefCounted() {
}
RefCounted::~RefCounted() {
}
void RefCounted::ReferenceInternal() {
ASSERT(internalRefs != 0);
// TODO(cwallez@chromium.org): what to do on overflow?
internalRefs ++;
}
void RefCounted::ReleaseInternal() {
ASSERT(internalRefs != 0);
internalRefs --;
if (internalRefs == 0) {
ASSERT(externalRefs == 0);
// TODO(cwallez@chromium.org): would this work with custom allocators?
delete this;
}
}
uint32_t RefCounted::GetExternalRefs() const {
return externalRefs;
}
uint32_t RefCounted::GetInternalRefs() const {
return internalRefs;
}
void RefCounted::Reference() {
ASSERT(externalRefs != 0);
// TODO(cwallez@chromium.org): what to do on overflow?
externalRefs ++;
}
void RefCounted::Release() {
ASSERT(externalRefs != 0);
externalRefs --;
if (externalRefs == 0) {
ReleaseInternal();
}
}
}

View File

@ -0,0 +1,126 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_REFCOUNTED_H_
#define BACKEND_COMMON_REFCOUNTED_H_
#include <cstdint>
namespace backend {
class RefCounted {
public:
RefCounted();
virtual ~RefCounted();
void ReferenceInternal();
void ReleaseInternal();
uint32_t GetExternalRefs() const;
uint32_t GetInternalRefs() const;
// NXT API
void Reference();
void Release();
protected:
uint32_t externalRefs = 1;
uint32_t internalRefs = 1;
};
template<typename T>
class Ref {
public:
Ref() {}
Ref(T* p): pointee(p) {
Reference();
}
Ref(Ref<T>& other): pointee(other.pointee) {
Reference();
}
Ref<T>& operator=(const Ref<T>& other) {
if (&other == this) return *this;
other.Reference();
Release();
pointee = other.pointee;
return *this;
}
Ref(Ref<T>&& other) {
pointee = other.pointee;
other.pointee = nullptr;
}
Ref<T>& operator=(Ref<T>&& other) {
if (&other == this) return *this;
Release();
pointee = other.pointee;
other.pointee = nullptr;
return *this;
}
~Ref() {
Release();
pointee = nullptr;
}
operator bool() {
return pointee != nullptr;
}
const T& operator*() const {
return *pointee;
}
T& operator*() {
return *pointee;
}
const T* operator->() const {
return pointee;
}
T* operator->() {
return pointee;
}
const T* Get() const {
return pointee;
}
T* Get() {
return pointee;
}
private:
void Reference() const {
if (pointee != nullptr) {
pointee->ReferenceInternal();
}
}
void Release() const {
if (pointee != nullptr) {
pointee->ReleaseInternal();
}
}
//static_assert(std::is_base_of<RefCounted, T>::value, "");
T* pointee = nullptr;
};
}
#endif // BACKEND_COMMON_REFCOUNTED_H_

View File

@ -0,0 +1,67 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Sampler.h"
#include "Device.h"
namespace backend {
// SamplerBase
SamplerBase::SamplerBase(SamplerBuilder* builder) {
}
// SamplerBuilder
enum SamplerSetProperties {
SAMPLER_PROPERTY_FILTER = 0x1,
};
SamplerBuilder::SamplerBuilder(DeviceBase* device)
:device(device) {
}
nxt::FilterMode SamplerBuilder::GetMagFilter() const {
return magFilter;
}
nxt::FilterMode SamplerBuilder::GetMinFilter() const {
return minFilter;
}
nxt::FilterMode SamplerBuilder::GetMipMapFilter() const {
return mipMapFilter;
}
bool SamplerBuilder::WasConsumed() const {
return consumed;
}
SamplerBase* SamplerBuilder::GetResult() {
consumed = true;
return device->CreateSampler(this);
}
void SamplerBuilder::SetFilterMode(nxt::FilterMode magFilter, nxt::FilterMode minFilter, nxt::FilterMode mipMapFilter) {
if ((propertiesSet & SAMPLER_PROPERTY_FILTER) != 0) {
device->HandleError("Sampler filter property set multiple times");
return;
}
this->magFilter = magFilter;
this->minFilter = minFilter;
this->mipMapFilter = mipMapFilter;
propertiesSet |= SAMPLER_PROPERTY_FILTER;
}
}

View File

@ -0,0 +1,58 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_SAMPLER_H_
#define BACKEND_COMMON_SAMPLER_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
namespace backend {
class SamplerBase : public RefCounted {
public:
SamplerBase(SamplerBuilder* builder);
};
class SamplerBuilder : public RefCounted {
public:
SamplerBuilder(DeviceBase* device);
nxt::FilterMode GetMagFilter() const;
nxt::FilterMode GetMinFilter() const;
nxt::FilterMode GetMipMapFilter() const;
bool WasConsumed() const;
// NXT API
SamplerBase* GetResult();
void SetFilterMode(nxt::FilterMode magFilter, nxt::FilterMode minFilter, nxt::FilterMode mipMapFilter);
private:
friend class SamplerBase;
DeviceBase* device;
int propertiesSet = 0;
bool consumed = false;
nxt::FilterMode magFilter = nxt::FilterMode::Nearest;
nxt::FilterMode minFilter = nxt::FilterMode::Nearest;
nxt::FilterMode mipMapFilter = nxt::FilterMode::Nearest;
};
}
#endif // BACKEND_COMMON_SAMPLER_H_

View File

@ -0,0 +1,217 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ShaderModule.h"
#include "BindGroupLayout.h"
#include "Device.h"
#include "Pipeline.h"
#include "PipelineLayout.h"
#include <spirv-cross/spirv_cross.hpp>
namespace backend {
ShaderModuleBase::ShaderModuleBase(ShaderModuleBuilder* builder)
: device(builder->device) {
}
void ShaderModuleBase::ExtractSpirvInfo(const spirv_cross::Compiler& compiler) {
const auto& resources = compiler.get_shader_resources();
switch (compiler.get_execution_model()) {
case spv::ExecutionModelVertex:
executionModel = nxt::ShaderStage::Vertex;
break;
case spv::ExecutionModelFragment:
executionModel = nxt::ShaderStage::Fragment;
break;
case spv::ExecutionModelGLCompute:
executionModel = nxt::ShaderStage::Compute;
break;
default:
ASSERT(false);
break;
}
// Extract push constants
pushConstants.mask.reset();
pushConstants.sizes.fill(0);
pushConstants.types.fill(PushConstantType::Int);
if (resources.push_constant_buffers.size() > 0) {
auto interfaceBlock = resources.push_constant_buffers[0];
const auto& blockType = compiler.get_type(interfaceBlock.type_id);
ASSERT(blockType.basetype == spirv_cross::SPIRType::Struct);
for (uint32_t i = 0; i < blockType.member_types.size(); i++) {
ASSERT(compiler.get_member_decoration_mask(blockType.self, i) & 1ull << spv::DecorationOffset);
uint32_t offset = compiler.get_member_decoration(blockType.self, i, spv::DecorationOffset);
ASSERT(offset % 4 == 0);
offset /= 4;
ASSERT(offset < kMaxPushConstants);
auto memberType = compiler.get_type(blockType.member_types[i]);
PushConstantType constantType;
if (memberType.basetype == spirv_cross::SPIRType::Int) {
constantType = PushConstantType::Int;
} else if (memberType.basetype == spirv_cross::SPIRType::UInt) {
constantType = PushConstantType::UInt;
} else {
ASSERT(memberType.basetype == spirv_cross::SPIRType::Float);
constantType = PushConstantType::Float;
}
pushConstants.mask.set(offset);
pushConstants.names[offset] = interfaceBlock.name + "." + compiler.get_member_name(blockType.self, i);
pushConstants.sizes[offset] = memberType.vecsize * memberType.columns;
pushConstants.types[offset] = constantType;
}
}
// Fill in bindingInfo with the SPIRV bindings
auto ExtractResourcesBinding = [this](const std::vector<spirv_cross::Resource>& resources,
const spirv_cross::Compiler& compiler, nxt::BindingType type) {
constexpr uint64_t requiredBindingDecorationMask = (1ull << spv::DecorationBinding) | (1ull << spv::DecorationDescriptorSet);
for (const auto& resource : resources) {
ASSERT((compiler.get_decoration_mask(resource.id) & requiredBindingDecorationMask) == requiredBindingDecorationMask);
uint32_t binding = compiler.get_decoration(resource.id, spv::DecorationBinding);
uint32_t set = compiler.get_decoration(resource.id, spv::DecorationDescriptorSet);
if (binding >= kMaxBindingsPerGroup || set >= kMaxBindGroups) {
device->HandleError("Binding over limits in the SPIRV");
continue;
}
auto& info = bindingInfo[set][binding];
info.used = true;
info.id = resource.id;
info.base_type_id = resource.base_type_id;
info.type = type;
}
};
ExtractResourcesBinding(resources.uniform_buffers, compiler, nxt::BindingType::UniformBuffer);
ExtractResourcesBinding(resources.separate_images, compiler, nxt::BindingType::SampledTexture);
ExtractResourcesBinding(resources.separate_samplers, compiler, nxt::BindingType::Sampler);
ExtractResourcesBinding(resources.storage_buffers, compiler, nxt::BindingType::StorageBuffer);
// Extract the vertex attributes
if (executionModel == nxt::ShaderStage::Vertex) {
for (const auto& attrib : resources.stage_inputs) {
ASSERT(compiler.get_decoration_mask(attrib.id) & (1ull << spv::DecorationLocation));
uint32_t location = compiler.get_decoration(attrib.id, spv::DecorationLocation);
if (location >= kMaxVertexAttributes) {
device->HandleError("Attribute location over limits in the SPIRV");
return;
}
usedVertexAttributes.set(location);
}
// Without a location qualifier on vertex outputs, spirv_cross::CompilerMSL gives them all
// the location 0, causing a compile error.
for (const auto& attrib : resources.stage_outputs) {
if (!(compiler.get_decoration_mask(attrib.id) & (1ull << spv::DecorationLocation))) {
device->HandleError("Need location qualifier on vertex output");
return;
}
}
}
if (executionModel == nxt::ShaderStage::Fragment) {
// Without a location qualifier on vertex inputs, spirv_cross::CompilerMSL gives them all
// the location 0, causing a compile error.
for (const auto& attrib : resources.stage_inputs) {
if (!(compiler.get_decoration_mask(attrib.id) & (1ull << spv::DecorationLocation))) {
device->HandleError("Need location qualifier on fragment input");
return;
}
}
}
}
const ShaderModuleBase::PushConstantInfo& ShaderModuleBase::GetPushConstants() const {
return pushConstants;
}
const ShaderModuleBase::ModuleBindingInfo& ShaderModuleBase::GetBindingInfo() const {
return bindingInfo;
}
const std::bitset<kMaxVertexAttributes>& ShaderModuleBase::GetUsedVertexAttributes() const {
return usedVertexAttributes;
}
nxt::ShaderStage ShaderModuleBase::GetExecutionModel() const {
return executionModel;
}
bool ShaderModuleBase::IsCompatibleWithPipelineLayout(const PipelineLayoutBase* layout) {
for (size_t group = 0; group < kMaxBindGroups; ++group) {
if (!IsCompatibleWithBindGroupLayout(group, layout->GetBindGroupLayout(group))) {
return false;
}
}
return true;
}
bool ShaderModuleBase::IsCompatibleWithBindGroupLayout(size_t group, const BindGroupLayoutBase* layout) {
const auto& layoutInfo = layout->GetBindingInfo();
for (size_t i = 0; i < kMaxBindingsPerGroup; ++i) {
const auto& moduleInfo = bindingInfo[group][i];
if (!moduleInfo.used) {
continue;
}
if (moduleInfo.type != layoutInfo.types[i]) {
return false;
}
if ((layoutInfo.visibilities[i] & StageBit(executionModel)) == 0) {
return false;
}
}
return true;
}
ShaderModuleBuilder::ShaderModuleBuilder(DeviceBase* device) : device(device) {}
bool ShaderModuleBuilder::WasConsumed() const {
return consumed;
}
std::vector<uint32_t> ShaderModuleBuilder::AcquireSpirv() {
return std::move(spirv);
}
ShaderModuleBase* ShaderModuleBuilder::GetResult() {
if (spirv.size() == 0) {
device->HandleError("Shader module needs to have the source set");
return nullptr;
}
consumed = true;
return device->CreateShaderModule(this);
}
void ShaderModuleBuilder::SetSource(uint32_t codeSize, const uint32_t* code) {
spirv.assign(code, code + codeSize);
}
}

View File

@ -0,0 +1,95 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_SHADERMODULE_H_
#define BACKEND_COMMON_SHADERMODULE_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
#include <array>
#include <bitset>
#include <vector>
namespace spirv_cross {
class Compiler;
}
namespace backend {
class ShaderModuleBase : public RefCounted {
public:
ShaderModuleBase(ShaderModuleBuilder* builder);
void ExtractSpirvInfo(const spirv_cross::Compiler& compiler);
struct PushConstantInfo {
std::bitset<kMaxPushConstants> mask;
std::array<std::string, kMaxPushConstants> names;
std::array<int, kMaxPushConstants> sizes;
std::array<PushConstantType, kMaxPushConstants> types;
};
struct BindingInfo {
// The SPIRV ID of the resource.
uint32_t id;
uint32_t base_type_id;
nxt::BindingType type;
bool used = false;
};
using ModuleBindingInfo = std::array<std::array<BindingInfo, kMaxBindingsPerGroup>, kMaxBindGroups>;
const PushConstantInfo& GetPushConstants() const;
const ModuleBindingInfo& GetBindingInfo() const;
const std::bitset<kMaxVertexAttributes>& GetUsedVertexAttributes() const;
nxt::ShaderStage GetExecutionModel() const;
bool IsCompatibleWithPipelineLayout(const PipelineLayoutBase* layout);
private:
bool IsCompatibleWithBindGroupLayout(size_t group, const BindGroupLayoutBase* layout);
DeviceBase* device;
PushConstantInfo pushConstants = {};
ModuleBindingInfo bindingInfo;
std::bitset<kMaxVertexAttributes> usedVertexAttributes;
nxt::ShaderStage executionModel;
};
class ShaderModuleBuilder : public RefCounted {
public:
ShaderModuleBuilder(DeviceBase* device);
bool WasConsumed() const;
std::vector<uint32_t> AcquireSpirv();
// NXT API
ShaderModuleBase* GetResult();
void SetSource(uint32_t codeSize, const uint32_t* code);
private:
friend class ShaderModuleBase;
DeviceBase* device;
std::vector<uint32_t> spirv;
bool consumed = false;
};
}
#endif // BACKEND_COMMON_SHADERMODULE_H_

View File

@ -0,0 +1,239 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "Texture.h"
#include "Device.h"
namespace backend {
size_t TextureFormatPixelSize(nxt::TextureFormat format) {
switch (format) {
case nxt::TextureFormat::R8G8B8A8Unorm:
return 4;
}
}
// TextureBase
TextureBase::TextureBase(TextureBuilder* builder)
: device(builder->device), dimension(builder->dimension), format(builder->format), width(builder->width),
height(builder->height), depth(builder->depth), numMipLevels(builder->numMipLevels),
allowedUsage(builder->allowedUsage), currentUsage(builder->currentUsage) {
}
nxt::TextureDimension TextureBase::GetDimension() const {
return dimension;
}
nxt::TextureFormat TextureBase::GetFormat() const {
return format;
}
uint32_t TextureBase::GetWidth() const {
return width;
}
uint32_t TextureBase::GetHeight() const {
return height;
}
uint32_t TextureBase::GetDepth() const {
return depth;
}
uint32_t TextureBase::GetNumMipLevels() const {
return numMipLevels;
}
nxt::TextureUsageBit TextureBase::GetAllowedUsage() const {
return allowedUsage;
}
nxt::TextureUsageBit TextureBase::GetUsage() const {
return currentUsage;
}
TextureViewBuilder* TextureBase::CreateTextureViewBuilder() {
return new TextureViewBuilder(device, this);
}
bool TextureBase::IsFrozen() const {
return frozen;
}
bool TextureBase::HasFrozenUsage(nxt::TextureUsageBit usage) const {
return frozen && (usage & allowedUsage);
}
bool TextureBase::IsUsagePossible(nxt::TextureUsageBit allowedUsage, nxt::TextureUsageBit usage) {
bool allowed = (usage & allowedUsage) == usage;
bool singleUse = nxt::HasZeroOrOneBits(usage);
return allowed && singleUse;
}
bool TextureBase::IsTransitionPossible(nxt::TextureUsageBit usage) const {
if (frozen) {
return false;
}
return IsUsagePossible(allowedUsage, usage);
}
void TextureBase::TransitionUsageImpl(nxt::TextureUsageBit usage) {
assert(IsTransitionPossible(usage));
currentUsage = usage;
}
void TextureBase::TransitionUsage(nxt::TextureUsageBit usage) {
if (!IsTransitionPossible(usage)) {
device->HandleError("Texture frozen or usage not allowed");
return;
}
TransitionUsageImpl(usage);
}
void TextureBase::FreezeUsage(nxt::TextureUsageBit usage) {
if (!IsTransitionPossible(usage)) {
device->HandleError("Texture frozen or usage not allowed");
return;
}
allowedUsage = usage;
currentUsage = usage;
frozen = true;
}
// TextureBuilder
enum TextureSetProperties {
TEXTURE_PROPERTY_DIMENSION = 0x1,
TEXTURE_PROPERTY_EXTENT = 0x2,
TEXTURE_PROPERTY_FORMAT = 0x4,
TEXTURE_PROPERTY_MIP_LEVELS = 0x8,
TEXTURE_PROPERTY_ALLOWED_USAGE = 0x10,
TEXTURE_PROPERTY_INITIAL_USAGE = 0x20,
};
TextureBuilder::TextureBuilder(DeviceBase* device)
: device(device) {
}
bool TextureBuilder::WasConsumed() const {
return consumed;
}
TextureBase* TextureBuilder::GetResult() {
constexpr int allProperties = TEXTURE_PROPERTY_DIMENSION | TEXTURE_PROPERTY_EXTENT |
TEXTURE_PROPERTY_FORMAT | TEXTURE_PROPERTY_MIP_LEVELS | TEXTURE_PROPERTY_ALLOWED_USAGE;
if ((propertiesSet & allProperties) != allProperties) {
device->HandleError("Texture missing properties");
return nullptr;
}
if (!TextureBase::IsUsagePossible(allowedUsage, currentUsage)) {
device->HandleError("Initial texture usage is not allowed");
return nullptr;
}
// TODO(cwallez@chromium.org): check stuff based on the dimension
consumed = true;
return device->CreateTexture(this);
}
void TextureBuilder::SetDimension(nxt::TextureDimension dimension) {
if ((propertiesSet & TEXTURE_PROPERTY_DIMENSION) != 0) {
device->HandleError("Texture dimension property set multiple times");
return;
}
propertiesSet |= TEXTURE_PROPERTY_DIMENSION;
this->dimension = dimension;
}
void TextureBuilder::SetExtent(uint32_t width, uint32_t height, uint32_t depth) {
if ((propertiesSet & TEXTURE_PROPERTY_EXTENT) != 0) {
device->HandleError("Texture extent property set multiple times");
return;
}
if (width == 0 || height == 0 || depth == 0) {
device->HandleError("Cannot create an empty texture");
return;
}
propertiesSet |= TEXTURE_PROPERTY_EXTENT;
this->width = width;
this->height = height;
this->depth = depth;
}
void TextureBuilder::SetFormat(nxt::TextureFormat format) {
if ((propertiesSet & TEXTURE_PROPERTY_FORMAT) != 0) {
device->HandleError("Texture format property set multiple times");
return;
}
propertiesSet |= TEXTURE_PROPERTY_FORMAT;
this->format = format;
}
void TextureBuilder::SetMipLevels(uint32_t numMipLevels) {
if ((propertiesSet & TEXTURE_PROPERTY_MIP_LEVELS) != 0) {
device->HandleError("Texture mip levels property set multiple times");
return;
}
propertiesSet |= TEXTURE_PROPERTY_MIP_LEVELS;
this->numMipLevels = numMipLevels;
}
void TextureBuilder::SetAllowedUsage(nxt::TextureUsageBit usage) {
if ((propertiesSet & TEXTURE_PROPERTY_ALLOWED_USAGE) != 0) {
device->HandleError("Texture allowed usage property set multiple times");
return;
}
propertiesSet |= TEXTURE_PROPERTY_ALLOWED_USAGE;
this->allowedUsage = usage;
}
void TextureBuilder::SetInitialUsage(nxt::TextureUsageBit usage) {
if ((propertiesSet & TEXTURE_PROPERTY_INITIAL_USAGE) != 0) {
device->HandleError("Texture initial usage property set multiple times");
return;
}
propertiesSet |= TEXTURE_PROPERTY_INITIAL_USAGE;
this->currentUsage = usage;
}
// TextureViewBase
TextureViewBase::TextureViewBase(TextureViewBuilder* builder)
: texture(builder->texture) {
}
TextureBase* TextureViewBase::GetTexture() {
return texture.Get();
}
// TextureViewBuilder
TextureViewBuilder::TextureViewBuilder(DeviceBase* device, TextureBase* texture)
: device(device), texture(texture) {
}
bool TextureViewBuilder::WasConsumed() const {
return false;
}
TextureViewBase* TextureViewBuilder::GetResult() {
consumed = true;
return device->CreateTextureView(this);
}
}

View File

@ -0,0 +1,121 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_TEXTURE_H_
#define BACKEND_COMMON_TEXTURE_H_
#include "Forward.h"
#include "RefCounted.h"
#include "nxt/nxtcpp.h"
namespace backend {
size_t TextureFormatPixelSize(nxt::TextureFormat format);
class TextureBase : public RefCounted {
public:
TextureBase(TextureBuilder* builder);
nxt::TextureDimension GetDimension() const;
nxt::TextureFormat GetFormat() const;
uint32_t GetWidth() const;
uint32_t GetHeight() const;
uint32_t GetDepth() const;
uint32_t GetNumMipLevels() const;
nxt::TextureUsageBit GetAllowedUsage() const;
nxt::TextureUsageBit GetUsage() const;
bool IsFrozen() const;
bool HasFrozenUsage(nxt::TextureUsageBit usage) const;
static bool IsUsagePossible(nxt::TextureUsageBit allowedUsage, nxt::TextureUsageBit usage);
bool IsTransitionPossible(nxt::TextureUsageBit usage) const;
void TransitionUsageImpl(nxt::TextureUsageBit usage);
// NXT API
TextureViewBuilder* CreateTextureViewBuilder();
void TransitionUsage(nxt::TextureUsageBit usage);
void FreezeUsage(nxt::TextureUsageBit usage);
private:
DeviceBase* device;
nxt::TextureDimension dimension;
nxt::TextureFormat format;
uint32_t width, height, depth;
uint32_t numMipLevels;
nxt::TextureUsageBit allowedUsage = nxt::TextureUsageBit::None;
nxt::TextureUsageBit currentUsage = nxt::TextureUsageBit::None;
bool frozen = false;
};
class TextureBuilder : public RefCounted {
public:
TextureBuilder(DeviceBase* device);
bool WasConsumed() const;
// NXT API
TextureBase* GetResult();
void SetDimension(nxt::TextureDimension dimension);
void SetExtent(uint32_t width, uint32_t height, uint32_t depth);
void SetFormat(nxt::TextureFormat format);
void SetMipLevels(uint32_t numMipLevels);
void SetAllowedUsage(nxt::TextureUsageBit usage);
void SetInitialUsage(nxt::TextureUsageBit usage);
private:
friend class TextureBase;
DeviceBase* device;
int propertiesSet = 0;
bool consumed = false;
nxt::TextureDimension dimension;
uint32_t width, height, depth;
nxt::TextureFormat format;
uint32_t numMipLevels;
nxt::TextureUsageBit allowedUsage = nxt::TextureUsageBit::None;
nxt::TextureUsageBit currentUsage = nxt::TextureUsageBit::None;
};
class TextureViewBase : public RefCounted {
public:
TextureViewBase(TextureViewBuilder* builder);
TextureBase* GetTexture();
private:
Ref<TextureBase> texture;
};
class TextureViewBuilder : public RefCounted {
public:
TextureViewBuilder(DeviceBase* device, TextureBase* texture);
bool WasConsumed() const;
// NXT API
TextureViewBase* GetResult();
private:
friend class TextureViewBase;
DeviceBase* device;
bool consumed = false;
Ref<TextureBase> texture;
};
}
#endif // BACKEND_COMMON_TEXTURE_H_

View File

@ -0,0 +1,120 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_COMMON_TOBACKEND_H_
#define BACKEND_COMMON_TOBACKEND_H_
#include "Forward.h"
namespace backend {
// ToBackendTraits implements the mapping from base type to member type of BackendTraits
template<typename T, typename BackendTraits>
struct ToBackendTraits;
template<typename BackendTraits>
struct ToBackendTraits<BindGroupBase, BackendTraits> {
using BackendType = typename BackendTraits::BindGroupType;
};
template<typename BackendTraits>
struct ToBackendTraits<BindGroupLayoutBase, BackendTraits> {
using BackendType = typename BackendTraits::BindGroupLayoutType;
};
template<typename BackendTraits>
struct ToBackendTraits<BufferBase, BackendTraits> {
using BackendType = typename BackendTraits::BufferType;
};
template<typename BackendTraits>
struct ToBackendTraits<BufferViewBase, BackendTraits> {
using BackendType = typename BackendTraits::BufferViewType;
};
template<typename BackendTraits>
struct ToBackendTraits<CommandBufferBase, BackendTraits> {
using BackendType = typename BackendTraits::CommandBufferType;
};
template<typename BackendTraits>
struct ToBackendTraits<InputStateBase, BackendTraits> {
using BackendType = typename BackendTraits::InputStateType;
};
template<typename BackendTraits>
struct ToBackendTraits<PipelineBase, BackendTraits> {
using BackendType = typename BackendTraits::PipelineType;
};
template<typename BackendTraits>
struct ToBackendTraits<PipelineLayoutBase, BackendTraits> {
using BackendType = typename BackendTraits::PipelineLayoutType;
};
template<typename BackendTraits>
struct ToBackendTraits<QueueBase, BackendTraits> {
using BackendType = typename BackendTraits::QueueType;
};
template<typename BackendTraits>
struct ToBackendTraits<SamplerBase, BackendTraits> {
using BackendType = typename BackendTraits::SamplerType;
};
template<typename BackendTraits>
struct ToBackendTraits<ShaderModuleBase, BackendTraits> {
using BackendType = typename BackendTraits::ShaderModuleType;
};
template<typename BackendTraits>
struct ToBackendTraits<TextureBase, BackendTraits> {
using BackendType = typename BackendTraits::TextureType;
};
template<typename BackendTraits>
struct ToBackendTraits<TextureViewBase, BackendTraits> {
using BackendType = typename BackendTraits::TextureViewType;
};
// ToBackendBase implements conversion to the given BackendTraits
// To use it in a backend, use the following:
// template<typename T>
// auto ToBackend(T&& common) -> decltype(ToBackendBase<MyBackendTraits>(common)) {
// return ToBackendBase<MyBackendTraits>(common);
// }
template<typename BackendTraits, typename T>
Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(Ref<T>& common) {
return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(common);
}
template<typename BackendTraits, typename T>
const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(const Ref<T>& common) {
return reinterpret_cast<const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(common);
}
template<typename BackendTraits, typename T>
typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(T* common) {
return reinterpret_cast<typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
}
template<typename BackendTraits, typename T>
const typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(const T* common) {
return reinterpret_cast<const typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
}
}
#endif // BACKEND_COMMON_TOBACKEND_H_

View File

@ -0,0 +1,18 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "MetalBackend.h"
#include "common/Device.h"
#include "common/CommandBuffer.h"

View File

@ -0,0 +1,282 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_METAL_METALBACKEND_H_
#define BACKEND_METAL_METALBACKEND_H_
#include "nxt/nxtcpp.h"
#include <map>
#include <mutex>
#include <unordered_set>
#include "common/Buffer.h"
#include "common/BindGroup.h"
#include "common/BindGroupLayout.h"
#include "common/Device.h"
#include "common/CommandBuffer.h"
#include "common/InputState.h"
#include "common/Pipeline.h"
#include "common/PipelineLayout.h"
#include "common/Queue.h"
#include "common/Sampler.h"
#include "common/ShaderModule.h"
#include "common/Texture.h"
#include "common/ToBackend.h"
#include <type_traits>
#import <Metal/Metal.h>
#import <QuartzCore/CAMetalLayer.h>
namespace spirv_cross {
class CompilerMSL;
}
namespace backend {
namespace metal {
class BindGroup;
class BindGroupLayout;
class Buffer;
class BufferView;
class CommandBuffer;
class InputState;
class Pipeline;
class PipelineLayout;
class Queue;
class Sampler;
class ShaderModule;
class Texture;
class TextureView;
struct MetalBackendTraits {
using BindGroupType = BindGroup;
using BindGroupLayoutType = BindGroupLayout;
using BufferType = Buffer;
using BufferViewType = BufferView;
using CommandBufferType = CommandBuffer;
using InputStateType = InputState;
using PipelineType = Pipeline;
using PipelineLayoutType = PipelineLayout;
using QueueType = Queue;
using SamplerType = Sampler;
using ShaderModuleType = ShaderModule;
using TextureType = Texture;
using TextureViewType = TextureView;
};
template<typename T>
auto ToBackend(T&& common) -> decltype(ToBackendBase<MetalBackendTraits>(common)) {
return ToBackendBase<MetalBackendTraits>(common);
}
class Device : public DeviceBase {
public:
Device(id<MTLDevice> mtlDevice);
~Device();
BindGroupBase* CreateBindGroup(BindGroupBuilder* builder) override;
BindGroupLayoutBase* CreateBindGroupLayout(BindGroupLayoutBuilder* builder) override;
BufferBase* CreateBuffer(BufferBuilder* builder) override;
BufferViewBase* CreateBufferView(BufferViewBuilder* builder) override;
CommandBufferBase* CreateCommandBuffer(CommandBufferBuilder* builder) override;
InputStateBase* CreateInputState(InputStateBuilder* builder) override;
PipelineBase* CreatePipeline(PipelineBuilder* builder) override;
PipelineLayoutBase* CreatePipelineLayout(PipelineLayoutBuilder* builder) override;
QueueBase* CreateQueue(QueueBuilder* builder) override;
SamplerBase* CreateSampler(SamplerBuilder* builder) override;
ShaderModuleBase* CreateShaderModule(ShaderModuleBuilder* builder) override;
TextureBase* CreateTexture(TextureBuilder* builder) override;
TextureViewBase* CreateTextureView(TextureViewBuilder* builder) override;
void SetNextDrawable(id<CAMetalDrawable> drawable);
void Present();
id<MTLDevice> GetMTLDevice();
id<MTLTexture> GetCurrentTexture();
id<MTLTexture> GetCurrentDepthTexture();
// NXT API
void Reference();
void Release();
private:
id<MTLDevice> mtlDevice = nil;
id<MTLCommandQueue> commandQueue = nil;
id<CAMetalDrawable> currentDrawable = nil;
id<MTLTexture> currentTexture = nil;
id<MTLTexture> currentDepthTexture = nil;
};
class BindGroup : public BindGroupBase {
public:
BindGroup(Device* device, BindGroupBuilder* builder);
private:
Device* device;
};
class BindGroupLayout : public BindGroupLayoutBase {
public:
BindGroupLayout(Device* device, BindGroupLayoutBuilder* builder);
private:
Device* device;
};
class Buffer : public BufferBase {
public:
Buffer(Device* device, BufferBuilder* builder);
~Buffer();
id<MTLBuffer> GetMTLBuffer();
std::mutex& GetMutex();
private:
void SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) override;
Device* device;
std::mutex mutex;
id<MTLBuffer> mtlBuffer = nil;
};
class BufferView : public BufferViewBase {
public:
BufferView(Device* device, BufferViewBuilder* builder);
private:
Device* device;
};
class CommandBuffer : public CommandBufferBase {
public:
CommandBuffer(Device* device, CommandBufferBuilder* builder);
~CommandBuffer();
void FillCommands(id<MTLCommandBuffer> commandBuffer, std::unordered_set<std::mutex*>* mutexes);
private:
Device* device;
CommandIterator commands;
};
class InputState : public InputStateBase {
public:
InputState(Device* device, InputStateBuilder* builder);
~InputState();
MTLVertexDescriptor* GetMTLVertexDescriptor();
private:
Device* device;
MTLVertexDescriptor* mtlVertexDescriptor = nil;
};
class Pipeline : public PipelineBase {
public:
Pipeline(Device* device, PipelineBuilder* builder);
~Pipeline();
void Encode(id<MTLRenderCommandEncoder> encoder);
void Encode(id<MTLComputeCommandEncoder> encoder);
MTLSize GetLocalWorkGroupSize() const;
private:
Device* device;
id<MTLRenderPipelineState> mtlRenderPipelineState = nil;
id<MTLDepthStencilState> mtlDepthStencilState = nil;
id<MTLComputePipelineState> mtlComputePipelineState = nil;
MTLSize localWorkgroupSize;
};
class PipelineLayout : public PipelineLayoutBase {
public:
PipelineLayout(Device* device, PipelineLayoutBuilder* builder);
using BindingIndexInfo = std::array<std::array<uint32_t, kMaxBindingsPerGroup>, kMaxBindGroups>;
const BindingIndexInfo& GetBindingIndexInfo(nxt::ShaderStage stage) const;
private:
Device* device;
PerStage<BindingIndexInfo> indexInfo;
};
class Queue : public QueueBase {
public:
Queue(Device* device, QueueBuilder* builder);
~Queue();
id<MTLCommandQueue> GetMTLCommandQueue();
// NXT API
void Submit(uint32_t numCommands, CommandBuffer* const * commands);
private:
Device* device;
id<MTLCommandQueue> commandQueue = nil;
};
class Sampler : public SamplerBase {
public:
Sampler(Device* device, SamplerBuilder* builder);
~Sampler();
id<MTLSamplerState> GetMTLSamplerState();
private:
Device* device;
id<MTLSamplerState> mtlSamplerState = nil;
};
class ShaderModule : public ShaderModuleBase {
public:
ShaderModule(Device* device, ShaderModuleBuilder* builder);
~ShaderModule();
id<MTLFunction> GetFunction(const char* functionName) const;
MTLSize GetLocalWorkGroupSize(const std::string& entryPoint) const;
private:
Device* device;
id<MTLLibrary> mtlLibrary = nil;
spirv_cross::CompilerMSL* compiler = nullptr;
};
class Texture : public TextureBase {
public:
Texture(Device* device, TextureBuilder* builder);
~Texture();
id<MTLTexture> GetMTLTexture();
private:
Device* device;
id<MTLTexture> mtlTexture = nil;
};
class TextureView : public TextureViewBase {
public:
TextureView(Device* device, TextureViewBuilder* builder);
private:
Device* device;
};
}
}
#endif // BACKEND_METAL_METALBACKEND_H_

View File

@ -0,0 +1,968 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "MetalBackend.h"
#include <spirv-cross/spirv_msl.hpp>
#include <sstream>
#include "common/Commands.h"
namespace backend {
namespace metal {
nxtProcTable GetNonValidatingProcs();
nxtProcTable GetValidatingProcs();
void Init(id<MTLDevice> metalDevice, nxtProcTable* procs, nxtDevice* device) {
*device = nullptr;
*procs = GetValidatingProcs();
*device = reinterpret_cast<nxtDevice>(new Device(metalDevice));
}
void SetNextDrawable(nxtDevice device, id<CAMetalDrawable> drawable) {
Device* backendDevice = reinterpret_cast<Device*>(device);
backendDevice->SetNextDrawable(drawable);
}
void Present(nxtDevice device) {
Device* backendDevice = reinterpret_cast<Device*>(device);
backendDevice->Present();
}
// Device
Device::Device(id<MTLDevice> mtlDevice) : mtlDevice(mtlDevice) {
[mtlDevice retain];
commandQueue = [mtlDevice newCommandQueue];
}
Device::~Device() {
[mtlDevice release];
mtlDevice = nil;
[commandQueue release];
commandQueue = nil;
[currentTexture release];
currentTexture = nil;
[currentDepthTexture release];
currentDepthTexture = nil;
}
BindGroupBase* Device::CreateBindGroup(BindGroupBuilder* builder) {
return new BindGroup(this, builder);
}
BindGroupLayoutBase* Device::CreateBindGroupLayout(BindGroupLayoutBuilder* builder) {
return new BindGroupLayout(this, builder);
}
BufferBase* Device::CreateBuffer(BufferBuilder* builder) {
return new Buffer(this, builder);
}
BufferViewBase* Device::CreateBufferView(BufferViewBuilder* builder) {
return new BufferView(this, builder);
}
CommandBufferBase* Device::CreateCommandBuffer(CommandBufferBuilder* builder) {
return new CommandBuffer(this, builder);
}
InputStateBase* Device::CreateInputState(InputStateBuilder* builder) {
return new InputState(this, builder);
}
PipelineBase* Device::CreatePipeline(PipelineBuilder* builder) {
return new Pipeline(this, builder);
}
PipelineLayoutBase* Device::CreatePipelineLayout(PipelineLayoutBuilder* builder) {
return new PipelineLayout(this, builder);
}
QueueBase* Device::CreateQueue(QueueBuilder* builder) {
return new Queue(this, builder);
}
SamplerBase* Device::CreateSampler(SamplerBuilder* builder) {
return new Sampler(this, builder);
}
ShaderModuleBase* Device::CreateShaderModule(ShaderModuleBuilder* builder) {
return new ShaderModule(this, builder);
}
TextureBase* Device::CreateTexture(TextureBuilder* builder) {
return new Texture(this, builder);
}
TextureViewBase* Device::CreateTextureView(TextureViewBuilder* builder) {
return new TextureView(this, builder);
}
void Device::SetNextDrawable(id<CAMetalDrawable> drawable) {
[currentDrawable release];
currentDrawable = drawable;
[currentDrawable retain];
[currentTexture release];
currentTexture = drawable.texture;
[currentTexture retain];
if (currentDepthTexture == nil ||
currentTexture.width != currentDepthTexture.width ||
currentTexture.height != currentDepthTexture.height) {
if (currentDepthTexture != nil) {
[currentDepthTexture release];
}
MTLTextureDescriptor* depthDescriptor = [MTLTextureDescriptor
texture2DDescriptorWithPixelFormat:MTLPixelFormatDepth32Float
width:currentTexture.width
height:currentTexture.height
mipmapped:NO];
depthDescriptor.textureType = MTLTextureType2D;
depthDescriptor.usage = MTLTextureUsageRenderTarget;
depthDescriptor.storageMode = MTLStorageModePrivate;
currentDepthTexture = [mtlDevice newTextureWithDescriptor:depthDescriptor];
}
MTLRenderPassDescriptor* passDescriptor = [MTLRenderPassDescriptor renderPassDescriptor];
passDescriptor.colorAttachments[0].texture = currentTexture;
passDescriptor.colorAttachments[0].loadAction = MTLLoadActionClear;
passDescriptor.colorAttachments[0].storeAction = MTLStoreActionStore;
passDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(0.0, 0.0, 0.0, 1.0);
passDescriptor.depthAttachment.texture = currentDepthTexture;
passDescriptor.depthAttachment.loadAction = MTLLoadActionClear;
passDescriptor.depthAttachment.storeAction = MTLStoreActionStore;
passDescriptor.depthAttachment.clearDepth = 1.0;
id<MTLCommandBuffer> commandBuffer = [commandQueue commandBuffer];
id<MTLRenderCommandEncoder> commandEncoder = [commandBuffer
renderCommandEncoderWithDescriptor:passDescriptor];
[commandEncoder endEncoding];
[commandBuffer commit];
}
void Device::Present() {
id<MTLCommandBuffer> commandBuffer = [commandQueue commandBuffer];
[commandBuffer presentDrawable: currentDrawable];
[commandBuffer commit];
}
id<MTLDevice> Device::GetMTLDevice() {
return mtlDevice;
}
id<MTLTexture> Device::GetCurrentTexture() {
return currentTexture;
}
id<MTLTexture> Device::GetCurrentDepthTexture() {
return currentDepthTexture;
}
void Device::Reference() {
}
void Device::Release() {
}
// Bind Group
BindGroup::BindGroup(Device* device, BindGroupBuilder* builder)
: BindGroupBase(builder), device(device) {
}
// Bind Group Layout
BindGroupLayout::BindGroupLayout(Device* device, BindGroupLayoutBuilder* builder)
: BindGroupLayoutBase(builder), device(device) {
}
// Buffer
Buffer::Buffer(Device* device, BufferBuilder* builder)
: BufferBase(builder), device(device) {
mtlBuffer = [device->GetMTLDevice() newBufferWithLength:GetSize()
options:MTLResourceStorageModeManaged];
}
Buffer::~Buffer() {
std::lock_guard<std::mutex> lock(mutex);
[mtlBuffer release];
mtlBuffer = nil;
}
id<MTLBuffer> Buffer::GetMTLBuffer() {
return mtlBuffer;
}
std::mutex& Buffer::GetMutex() {
return mutex;
}
void Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) {
uint32_t* dest = reinterpret_cast<uint32_t*>([mtlBuffer contents]);
{
std::lock_guard<std::mutex> lock(mutex);
memcpy(&dest[start], data, count * sizeof(uint32_t));
}
[mtlBuffer didModifyRange:NSMakeRange(start * sizeof(uint32_t), count * sizeof(uint32_t))];
}
// BufferView
BufferView::BufferView(Device* device, BufferViewBuilder* builder)
: BufferViewBase(builder), device(device) {
}
// CommandBuffer
static MTLIndexType IndexFormatType(nxt::IndexFormat format) {
switch (format) {
case nxt::IndexFormat::Uint16:
return MTLIndexTypeUInt16;
case nxt::IndexFormat::Uint32:
return MTLIndexTypeUInt32;
}
}
CommandBuffer::CommandBuffer(Device* device, CommandBufferBuilder* builder)
: CommandBufferBase(builder), device(device), commands(builder->AcquireCommands()) {
}
CommandBuffer::~CommandBuffer() {
FreeCommands(&commands);
}
namespace {
struct CurrentEncoders {
Device* device;
id<MTLBlitCommandEncoder> blit = nil;
id<MTLComputeCommandEncoder> compute = nil;
id<MTLRenderCommandEncoder> render = nil;
void FinishEncoders() {
if (blit != nil) {
[blit endEncoding];
blit = nil;
}
if (compute != nil) {
[compute endEncoding];
compute = nil;
}
if (render != nil) {
[render endEncoding];
render = nil;
}
}
void EnsureBlit(id<MTLCommandBuffer> commandBuffer) {
if (blit == nil) {
FinishEncoders();
blit = [commandBuffer blitCommandEncoder];
}
}
void EnsureCompute(id<MTLCommandBuffer> commandBuffer) {
if (compute == nil) {
FinishEncoders();
compute = [commandBuffer computeCommandEncoder];
// TODO(cwallez@chromium.org): does any state need to be reset?
}
}
void EnsureRender(id<MTLCommandBuffer> commandBuffer) {
if (render == nil) {
FinishEncoders();
// TODO(cwallez@chromium.org): this should be created from a renderpass subpass
MTLRenderPassDescriptor* descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
descriptor.colorAttachments[0].texture = device->GetCurrentTexture();
descriptor.colorAttachments[0].loadAction = MTLLoadActionLoad;
descriptor.colorAttachments[0].storeAction = MTLStoreActionStore;
descriptor.depthAttachment.texture = device->GetCurrentDepthTexture();
descriptor.depthAttachment.loadAction = MTLLoadActionLoad;
descriptor.depthAttachment.storeAction = MTLStoreActionStore;
render = [commandBuffer renderCommandEncoderWithDescriptor:descriptor];
// TODO(cwallez@chromium.org): does any state need to be reset?
}
}
};
}
void CommandBuffer::FillCommands(id<MTLCommandBuffer> commandBuffer, std::unordered_set<std::mutex*>* mutexes) {
Command type;
Pipeline* lastPipeline = nullptr;
id<MTLBuffer> indexBuffer = nil;
uint32_t indexBufferOffset = 0;
MTLIndexType indexType = MTLIndexTypeUInt32;
CurrentEncoders encoders;
encoders.device = device;
while (commands.NextCommandId(&type)) {
switch (type) {
case Command::CopyBufferToTexture:
{
CopyBufferToTextureCmd* copy = commands.NextCommand<CopyBufferToTextureCmd>();
Buffer* buffer = ToBackend(copy->buffer.Get());
Texture* texture = ToBackend(copy->texture.Get());
// TODO(kainino@chromium.org): this has to be in a Blit encoder, not a Render encoder, so ordering is lost here
unsigned rowSize = copy->width * TextureFormatPixelSize(texture->GetFormat());
MTLOrigin origin;
origin.x = copy->x;
origin.y = copy->y;
origin.z = copy->z;
MTLSize size;
size.width = copy->width;
size.height = copy->height;
size.depth = copy->depth;
encoders.EnsureBlit(commandBuffer);
[encoders.blit
copyFromBuffer:buffer->GetMTLBuffer()
sourceOffset:0
sourceBytesPerRow:rowSize
sourceBytesPerImage:(rowSize * copy->height)
sourceSize:size
toTexture:texture->GetMTLTexture()
destinationSlice:0
destinationLevel:copy->level
destinationOrigin:origin];
}
break;
case Command::Dispatch:
{
DispatchCmd* dispatch = commands.NextCommand<DispatchCmd>();
encoders.EnsureCompute(commandBuffer);
ASSERT(lastPipeline->IsCompute());
[encoders.compute dispatchThreadgroups:MTLSizeMake(dispatch->x, dispatch->y, dispatch->z)
threadsPerThreadgroup: lastPipeline->GetLocalWorkGroupSize()];
}
break;
case Command::DrawArrays:
{
DrawArraysCmd* draw = commands.NextCommand<DrawArraysCmd>();
encoders.EnsureRender(commandBuffer);
[encoders.render
drawPrimitives:MTLPrimitiveTypeTriangle
vertexStart:draw->firstVertex
vertexCount:draw->vertexCount
instanceCount:draw->instanceCount
baseInstance:draw->firstInstance];
}
break;
case Command::DrawElements:
{
DrawElementsCmd* draw = commands.NextCommand<DrawElementsCmd>();
encoders.EnsureRender(commandBuffer);
[encoders.render
drawIndexedPrimitives:MTLPrimitiveTypeTriangle
indexCount:draw->indexCount
indexType:indexType
indexBuffer:indexBuffer
indexBufferOffset:indexBufferOffset
instanceCount:draw->instanceCount
baseVertex:0
baseInstance:draw->firstInstance];
}
break;
case Command::SetPipeline:
{
SetPipelineCmd* cmd = commands.NextCommand<SetPipelineCmd>();
lastPipeline = ToBackend(cmd->pipeline).Get();
if (lastPipeline->IsCompute()) {
encoders.EnsureCompute(commandBuffer);
lastPipeline->Encode(encoders.compute);
} else {
encoders.EnsureRender(commandBuffer);
lastPipeline->Encode(encoders.render);
}
}
break;
case Command::SetPushConstants:
{
SetPushConstantsCmd* cmd = commands.NextCommand<SetPushConstantsCmd>();
uint32_t* valuesUInt = commands.NextData<uint32_t>(cmd->count);
int32_t* valuesInt = reinterpret_cast<int32_t*>(valuesUInt);
float* valuesFloat = reinterpret_cast<float*>(valuesUInt);
// TODO(kainino@chromium.org): implement SetPushConstants
}
break;
case Command::SetBindGroup:
{
SetBindGroupCmd* cmd = commands.NextCommand<SetBindGroupCmd>();
BindGroup* group = ToBackend(cmd->group.Get());
uint32_t groupIndex = cmd->index;
const auto& layout = group->GetLayout()->GetBindingInfo();
if (lastPipeline->IsCompute()) {
encoders.EnsureCompute(commandBuffer);
} else {
encoders.EnsureRender(commandBuffer);
}
// TODO(kainino@chromium.org): Maintain buffers and offsets arrays in BindGroup so that we
// only have to do one setVertexBuffers and one setFragmentBuffers call here.
for (size_t binding = 0; binding < layout.mask.size(); ++binding) {
if (!layout.mask[binding]) {
continue;
}
auto stage = layout.visibilities[binding];
bool vertStage = stage & nxt::ShaderStageBit::Vertex;
bool fragStage = stage & nxt::ShaderStageBit::Fragment;
bool computeStage = stage & nxt::ShaderStageBit::Compute;
uint32_t vertIndex = 0;
uint32_t fragIndex = 0;
uint32_t computeIndex = 0;
if (vertStage) {
vertIndex = ToBackend(lastPipeline->GetLayout())->
GetBindingIndexInfo(nxt::ShaderStage::Vertex)[groupIndex][binding];
}
if (fragStage) {
fragIndex = ToBackend(lastPipeline->GetLayout())->
GetBindingIndexInfo(nxt::ShaderStage::Fragment)[groupIndex][binding];
}
if (computeStage) {
computeIndex = ToBackend(lastPipeline->GetLayout())->
GetBindingIndexInfo(nxt::ShaderStage::Compute)[groupIndex][binding];
}
switch (layout.types[binding]) {
case nxt::BindingType::UniformBuffer:
case nxt::BindingType::StorageBuffer:
{
BufferView* view = ToBackend(group->GetBindingAsBufferView(binding));
auto b = ToBackend(view->GetBuffer());
mutexes->insert(&b->GetMutex());
const id<MTLBuffer> buffer = b->GetMTLBuffer();
const NSUInteger offset = view->GetOffset();
if (vertStage) {
[encoders.render
setVertexBuffers:&buffer
offsets:&offset
withRange:NSMakeRange(vertIndex, 1)];
}
if (fragStage) {
[encoders.render
setFragmentBuffers:&buffer
offsets:&offset
withRange:NSMakeRange(fragIndex, 1)];
}
if (computeStage) {
[encoders.compute
setBuffers:&buffer
offsets:&offset
withRange:NSMakeRange(computeIndex, 1)];
}
}
break;
case nxt::BindingType::Sampler:
{
auto sampler = ToBackend(group->GetBindingAsSampler(binding));
if (vertStage) {
[encoders.render
setVertexSamplerState:sampler->GetMTLSamplerState()
atIndex:vertIndex];
}
if (fragStage) {
[encoders.render
setFragmentSamplerState:sampler->GetMTLSamplerState()
atIndex:fragIndex];
}
if (computeStage) {
[encoders.compute
setSamplerState:sampler->GetMTLSamplerState()
atIndex:computeIndex];
}
}
break;
case nxt::BindingType::SampledTexture:
{
auto texture = ToBackend(group->GetBindingAsTextureView(binding)->GetTexture());
if (vertStage) {
[encoders.render
setVertexTexture:texture->GetMTLTexture()
atIndex:vertIndex];
}
if (fragStage) {
[encoders.render
setFragmentTexture:texture->GetMTLTexture()
atIndex:fragIndex];
}
if (computeStage) {
[encoders.compute
setTexture:texture->GetMTLTexture()
atIndex:computeIndex];
}
}
break;
}
}
}
break;
case Command::SetIndexBuffer:
{
SetIndexBufferCmd* cmd = commands.NextCommand<SetIndexBufferCmd>();
auto b = ToBackend(cmd->buffer.Get());
mutexes->insert(&b->GetMutex());
indexBuffer = b->GetMTLBuffer();
indexBufferOffset = cmd->offset;
indexType = IndexFormatType(cmd->format);
}
break;
case Command::SetVertexBuffers:
{
SetVertexBuffersCmd* cmd = commands.NextCommand<SetVertexBuffersCmd>();
auto buffers = commands.NextData<Ref<BufferBase>>(cmd->count);
auto offsets = commands.NextData<uint32_t>(cmd->count);
auto inputState = lastPipeline->GetInputState();
std::array<id<MTLBuffer>, kMaxVertexInputs> mtlBuffers;
std::array<NSUInteger, kMaxVertexInputs> mtlOffsets;
// Perhaps an "array of vertex buffers(+offsets?)" should be
// a NXT API primitive to avoid reconstructing this array?
for (uint32_t i = 0; i < cmd->count; ++i) {
Buffer* buffer = ToBackend(buffers[i].Get());
mutexes->insert(&buffer->GetMutex());
mtlBuffers[i] = buffer->GetMTLBuffer();
mtlOffsets[i] = offsets[i];
}
encoders.EnsureRender(commandBuffer);
[encoders.render
setVertexBuffers:mtlBuffers.data()
offsets:mtlOffsets.data()
withRange:NSMakeRange(kMaxBindingsPerGroup + cmd->startSlot, cmd->count)];
}
break;
case Command::TransitionBufferUsage:
{
TransitionBufferUsageCmd* cmd = commands.NextCommand<TransitionBufferUsageCmd>();
cmd->buffer->TransitionUsageImpl(cmd->usage);
}
break;
case Command::TransitionTextureUsage:
{
TransitionTextureUsageCmd* cmd = commands.NextCommand<TransitionTextureUsageCmd>();
cmd->texture->TransitionUsageImpl(cmd->usage);
}
break;
;
}
}
encoders.FinishEncoders();
}
// InputState
static MTLVertexFormat VertexFormatType(nxt::VertexFormat format) {
switch (format) {
case nxt::VertexFormat::FloatR32G32B32A32:
return MTLVertexFormatFloat4;
case nxt::VertexFormat::FloatR32G32B32:
return MTLVertexFormatFloat3;
case nxt::VertexFormat::FloatR32G32:
return MTLVertexFormatFloat2;
}
}
static MTLVertexStepFunction InputStepModeFunction(nxt::InputStepMode mode) {
switch (mode) {
case nxt::InputStepMode::Vertex:
return MTLVertexStepFunctionPerVertex;
case nxt::InputStepMode::Instance:
return MTLVertexStepFunctionPerInstance;
}
}
InputState::InputState(Device* device, InputStateBuilder* builder)
: InputStateBase(builder), device(device) {
mtlVertexDescriptor = [MTLVertexDescriptor new];
const auto& attributesSetMask = GetAttributesSetMask();
for (size_t i = 0; i < attributesSetMask.size(); ++i) {
if (!attributesSetMask[i]) {
continue;
}
const AttributeInfo& info = GetAttribute(i);
auto attribDesc = [MTLVertexAttributeDescriptor new];
attribDesc.format = VertexFormatType(info.format);
attribDesc.offset = info.offset;
attribDesc.bufferIndex = kMaxBindingsPerGroup + info.bindingSlot;
mtlVertexDescriptor.attributes[i] = attribDesc;
[attribDesc release];
}
const auto& inputsSetMask = GetInputsSetMask();
for (size_t i = 0; i < inputsSetMask.size(); ++i) {
if (!inputsSetMask[i]) {
continue;
}
const InputInfo& info = GetInput(i);
auto layoutDesc = [MTLVertexBufferLayoutDescriptor new];
if (info.stride == 0) {
// For MTLVertexStepFunctionConstant, the stepRate must be 0,
// but the stride must NOT be 0, so I made up a value (256).
layoutDesc.stepFunction = MTLVertexStepFunctionConstant;
layoutDesc.stepRate = 0;
layoutDesc.stride = 256;
} else {
layoutDesc.stepFunction = InputStepModeFunction(info.stepMode);
layoutDesc.stepRate = 1;
layoutDesc.stride = info.stride;
}
mtlVertexDescriptor.layouts[kMaxBindingsPerGroup + i] = layoutDesc;
[layoutDesc release];
}
}
InputState::~InputState() {
[mtlVertexDescriptor release];
mtlVertexDescriptor = nil;
}
MTLVertexDescriptor* InputState::GetMTLVertexDescriptor() {
return mtlVertexDescriptor;
}
// Pipeline
Pipeline::Pipeline(Device* device, PipelineBuilder* builder)
: PipelineBase(builder), device(device) {
if (IsCompute()) {
const auto& module = ToBackend(builder->GetStageInfo(nxt::ShaderStage::Compute).module);
const auto& entryPoint = builder->GetStageInfo(nxt::ShaderStage::Compute).entryPoint;
id<MTLFunction> function = module->GetFunction(entryPoint.c_str());
NSError *error = nil;
mtlComputePipelineState = [device->GetMTLDevice()
newComputePipelineStateWithFunction:function error:&error];
if (error != nil) {
NSLog(@" error => %@", error);
device->HandleError("Error creating pipeline state");
return;
}
// Copy over the local workgroup size as it is passed to dispatch explicitly in Metal
localWorkgroupSize = module->GetLocalWorkGroupSize(entryPoint);
} else {
MTLRenderPipelineDescriptor* descriptor = [MTLRenderPipelineDescriptor new];
for (auto stage : IterateStages(GetStageMask())) {
const auto& module = ToBackend(builder->GetStageInfo(stage).module);
const auto& entryPoint = builder->GetStageInfo(stage).entryPoint;
id<MTLFunction> function = module->GetFunction(entryPoint.c_str());
switch (stage) {
case nxt::ShaderStage::Vertex:
descriptor.vertexFunction = function;
break;
case nxt::ShaderStage::Fragment:
descriptor.fragmentFunction = function;
break;
case nxt::ShaderStage::Compute:
ASSERT(false);
break;
}
}
descriptor.colorAttachments[0].pixelFormat = MTLPixelFormatBGRA8Unorm;
descriptor.depthAttachmentPixelFormat = MTLPixelFormatDepth32Float;
InputState* inputState = ToBackend(GetInputState());
descriptor.vertexDescriptor = inputState->GetMTLVertexDescriptor();
// TODO(kainino@chromium.org): push constants, textures, samplers
NSError *error = nil;
mtlRenderPipelineState = [device->GetMTLDevice()
newRenderPipelineStateWithDescriptor:descriptor error:&error];
if (error != nil) {
NSLog(@" error => %@", error);
device->HandleError("Error creating pipeline state");
return;
}
MTLDepthStencilDescriptor* dsDesc = [MTLDepthStencilDescriptor new];
dsDesc.depthWriteEnabled = true;
dsDesc.depthCompareFunction = MTLCompareFunctionLess;
mtlDepthStencilState = [device->GetMTLDevice()
newDepthStencilStateWithDescriptor:dsDesc];
[dsDesc release];
[descriptor release];
}
}
Pipeline::~Pipeline() {
[mtlRenderPipelineState release];
[mtlDepthStencilState release];
[mtlComputePipelineState release];
}
void Pipeline::Encode(id<MTLRenderCommandEncoder> encoder) {
ASSERT(!IsCompute());
[encoder setDepthStencilState:mtlDepthStencilState];
[encoder setRenderPipelineState:mtlRenderPipelineState];
}
void Pipeline::Encode(id<MTLComputeCommandEncoder> encoder) {
ASSERT(IsCompute());
[encoder setComputePipelineState:mtlComputePipelineState];
}
MTLSize Pipeline::GetLocalWorkGroupSize() const {
return localWorkgroupSize;
}
// PipelineLayout
PipelineLayout::PipelineLayout(Device* device, PipelineLayoutBuilder* builder)
: PipelineLayoutBase(builder), device(device) {
// Each stage has its own numbering namespace in CompilerMSL.
for (auto stage : IterateStages(kAllStages)) {
uint32_t bufferIndex = 0;
uint32_t samplerIndex = 0;
uint32_t textureIndex = 0;
for (size_t group = 0; group < kMaxBindGroups; ++group) {
const auto& groupInfo = GetBindGroupLayout(group)->GetBindingInfo();
for (size_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
if (!(groupInfo.visibilities[binding] & StageBit(stage))) {
continue;
}
if (!groupInfo.mask[binding]) {
continue;
}
switch (groupInfo.types[binding]) {
case nxt::BindingType::UniformBuffer:
case nxt::BindingType::StorageBuffer:
indexInfo[stage][group][binding] = bufferIndex;
bufferIndex++;
break;
case nxt::BindingType::Sampler:
indexInfo[stage][group][binding] = samplerIndex;
samplerIndex++;
break;
case nxt::BindingType::SampledTexture:
indexInfo[stage][group][binding] = textureIndex;
textureIndex++;
break;
}
}
}
}
}
const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo(nxt::ShaderStage stage) const {
return indexInfo[stage];
}
// Queue
Queue::Queue(Device* device, QueueBuilder* builder)
: device(device) {
commandQueue = [device->GetMTLDevice() newCommandQueue];
}
Queue::~Queue() {
[commandQueue release];
commandQueue = nil;
}
id<MTLCommandQueue> Queue::GetMTLCommandQueue() {
return commandQueue;
}
void Queue::Submit(uint32_t numCommands, CommandBuffer* const * commands) {
id<MTLCommandBuffer> commandBuffer = [commandQueue commandBuffer];
// Mutexes are necessary to prevent buffers from being written from the
// CPU before their previous value has been read from the GPU.
// https://developer.apple.com/library/content/documentation/3DDrawing/Conceptual/MTLBestPracticesGuide/TripleBuffering.html
// TODO(kainino@chromium.org): When we have resource transitions, all of these mutexes will be replaced.
std::unordered_set<std::mutex*> mutexes;
for (uint32_t i = 0; i < numCommands; ++i) {
commands[i]->FillCommands(commandBuffer, &mutexes);
}
for (auto mutex : mutexes) {
mutex->lock();
}
[commandBuffer addCompletedHandler:^(id<MTLCommandBuffer> commandBuffer) {
// 'mutexes' is copied into this Block
for (auto mutex : mutexes) {
mutex->unlock();
}
}];
[commandBuffer commit];
}
// Sampler
MTLSamplerMinMagFilter FilterModeToMinMagFilter(nxt::FilterMode mode) {
switch (mode) {
case nxt::FilterMode::Nearest:
return MTLSamplerMinMagFilterNearest;
case nxt::FilterMode::Linear:
return MTLSamplerMinMagFilterLinear;
}
}
MTLSamplerMipFilter FilterModeToMipFilter(nxt::FilterMode mode) {
switch (mode) {
case nxt::FilterMode::Nearest:
return MTLSamplerMipFilterNearest;
case nxt::FilterMode::Linear:
return MTLSamplerMipFilterLinear;
}
}
Sampler::Sampler(Device* device, SamplerBuilder* builder)
: SamplerBase(builder), device(device) {
auto desc = [MTLSamplerDescriptor new];
[desc autorelease];
desc.minFilter = FilterModeToMinMagFilter(builder->GetMinFilter());
desc.magFilter = FilterModeToMinMagFilter(builder->GetMagFilter());
desc.mipFilter = FilterModeToMipFilter(builder->GetMipMapFilter());
// TODO(kainino@chromium.org): wrap modes
mtlSamplerState = [device->GetMTLDevice() newSamplerStateWithDescriptor:desc];
}
Sampler::~Sampler() {
[mtlSamplerState release];
}
id<MTLSamplerState> Sampler::GetMTLSamplerState() {
return mtlSamplerState;
}
// ShaderModule
ShaderModule::ShaderModule(Device* device, ShaderModuleBuilder* builder)
: ShaderModuleBase(builder), device(device) {
compiler = new spirv_cross::CompilerMSL(builder->AcquireSpirv());
ExtractSpirvInfo(*compiler);
spirv_cross::MSLConfiguration mslConfig;
mslConfig.flip_vert_y = false;
mslConfig.flip_frag_y = false;
std::string msl = compiler->compile(mslConfig);
NSString* mslSource = [NSString stringWithFormat:@"%s", msl.c_str()];
NSError *error = nil;
mtlLibrary = [device->GetMTLDevice() newLibraryWithSource:mslSource options:nil error:&error];
if (error != nil) {
NSLog(@"MTLDevice newLibraryWithSource => %@", error);
device->HandleError("Error creating MTLLibrary from MSL source");
}
}
ShaderModule::~ShaderModule() {
delete compiler;
}
id<MTLFunction> ShaderModule::GetFunction(const char* functionName) const {
// TODO(kainino@chromium.org): make this somehow more robust; it needs to behave like clean_func_name:
// https://github.com/KhronosGroup/SPIRV-Cross/blob/4e915e8c483e319d0dd7a1fa22318bef28f8cca3/spirv_msl.cpp#L1213
if (strcmp(functionName, "main") == 0) {
functionName = "main0";
}
NSString* name = [NSString stringWithFormat:@"%s", functionName];
return [mtlLibrary newFunctionWithName:name];
}
MTLSize ShaderModule::GetLocalWorkGroupSize(const std::string& entryPoint) const {
auto size = compiler->get_entry_point(entryPoint).workgroup_size;
return MTLSizeMake(size.x, size.y, size.z);
}
// Texture
MTLPixelFormat TextureFormatPixelFormat(nxt::TextureFormat format) {
switch (format) {
case nxt::TextureFormat::R8G8B8A8Unorm:
return MTLPixelFormatRGBA8Unorm;
}
}
Texture::Texture(Device* device, TextureBuilder* builder)
: TextureBase(builder), device(device) {
auto desc = [MTLTextureDescriptor new];
[desc autorelease];
switch (GetDimension()) {
case nxt::TextureDimension::e2D:
desc.textureType = MTLTextureType2D;
break;
}
desc.usage = MTLTextureUsageShaderRead;
desc.pixelFormat = TextureFormatPixelFormat(GetFormat());
desc.width = GetWidth();
desc.height = GetHeight();
desc.depth = GetDepth();
desc.mipmapLevelCount = GetNumMipLevels();
desc.arrayLength = 1;
mtlTexture = [device->GetMTLDevice() newTextureWithDescriptor:desc];
}
Texture::~Texture() {
[mtlTexture release];
}
id<MTLTexture> Texture::GetMTLTexture() {
return mtlTexture;
}
// TextureView
TextureView::TextureView(Device* device, TextureViewBuilder* builder)
: TextureViewBase(builder), device(device) {
}
}
}

View File

@ -0,0 +1,303 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "CommandBufferGL.h"
#include "common/Commands.h"
#include "OpenGLBackend.h"
#include "PipelineGL.h"
#include "PipelineLayoutGL.h"
#include "SamplerGL.h"
#include "TextureGL.h"
#include <cstring>
namespace backend {
namespace opengl {
CommandBuffer::CommandBuffer(Device* device, CommandBufferBuilder* builder)
: CommandBufferBase(builder), device(device), commands(builder->AcquireCommands()) {
}
CommandBuffer::~CommandBuffer() {
FreeCommands(&commands);
}
static GLenum IndexFormatType(nxt::IndexFormat format) {
switch (format) {
case nxt::IndexFormat::Uint16:
return GL_UNSIGNED_SHORT;
case nxt::IndexFormat::Uint32:
return GL_UNSIGNED_INT;
}
}
static GLenum VertexFormatType(nxt::VertexFormat format) {
switch (format) {
case nxt::VertexFormat::FloatR32G32B32A32:
case nxt::VertexFormat::FloatR32G32B32:
case nxt::VertexFormat::FloatR32G32:
return GL_FLOAT;
}
}
void CommandBuffer::Execute() {
Command type;
Pipeline* lastPipeline = nullptr;
uint32_t indexBufferOffset = 0;
nxt::IndexFormat indexBufferFormat = nxt::IndexFormat::Uint16;
while(commands.NextCommandId(&type)) {
switch (type) {
case Command::CopyBufferToTexture:
{
CopyBufferToTextureCmd* copy = commands.NextCommand<CopyBufferToTextureCmd>();
Buffer* buffer = ToBackend(copy->buffer.Get());
Texture* texture = ToBackend(copy->texture.Get());
GLenum target = texture->GetGLTarget();
auto format = texture->GetGLFormat();
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
glActiveTexture(GL_TEXTURE0);
glBindTexture(target, texture->GetHandle());
glTexSubImage2D(target, copy->level, copy->x, copy->y, copy->width, copy->height,
format.format, format.type, nullptr);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
break;
case Command::Dispatch:
{
DispatchCmd* dispatch = commands.NextCommand<DispatchCmd>();
glDispatchCompute(dispatch->x, dispatch->y, dispatch->z);
// TODO(cwallez@chromium.org): add barriers to the API
glMemoryBarrier(GL_ALL_BARRIER_BITS);
}
break;
case Command::DrawArrays:
{
DrawArraysCmd* draw = commands.NextCommand<DrawArraysCmd>();
if (draw->firstInstance > 0) {
glDrawArraysInstancedBaseInstance(GL_TRIANGLES,
draw->firstVertex, draw->vertexCount, draw->instanceCount, draw->firstInstance);
} else {
// This branch is only needed on OpenGL < 4.2
glDrawArraysInstanced(GL_TRIANGLES,
draw->firstVertex, draw->vertexCount, draw->instanceCount);
}
}
break;
case Command::DrawElements:
{
DrawElementsCmd* draw = commands.NextCommand<DrawElementsCmd>();
size_t formatSize = IndexFormatSize(indexBufferFormat);
GLenum formatType = IndexFormatType(indexBufferFormat);
if (draw->firstInstance > 0) {
glDrawElementsInstancedBaseInstance(GL_TRIANGLES,
draw->indexCount, formatType,
reinterpret_cast<void*>(draw->firstIndex * formatSize + indexBufferOffset),
draw->instanceCount, draw->firstInstance);
} else {
// This branch is only needed on OpenGL < 4.2
glDrawElementsInstanced(GL_TRIANGLES,
draw->indexCount, formatType,
reinterpret_cast<void*>(draw->firstIndex * formatSize + indexBufferOffset),
draw->instanceCount);
}
}
break;
case Command::SetPipeline:
{
SetPipelineCmd* cmd = commands.NextCommand<SetPipelineCmd>();
ToBackend(cmd->pipeline)->ApplyNow();
lastPipeline = ToBackend(cmd->pipeline).Get();
}
break;
case Command::SetPushConstants:
{
SetPushConstantsCmd* cmd = commands.NextCommand<SetPushConstantsCmd>();
uint32_t* valuesUInt = commands.NextData<uint32_t>(cmd->count);
int32_t* valuesInt = reinterpret_cast<int32_t*>(valuesUInt);
float* valuesFloat = reinterpret_cast<float*>(valuesUInt);
for (auto stage : IterateStages(cmd->stage)) {
const auto& pushConstants = lastPipeline->GetPushConstants(stage);
const auto& glPushConstants = lastPipeline->GetGLPushConstants(stage);
for (size_t i = 0; i < cmd->count; i++) {
GLint location = glPushConstants[cmd->offset + i];
switch (pushConstants.types[cmd->offset + i]) {
case PushConstantType::Int:
glUniform1i(location, valuesInt[i]);
break;
case PushConstantType::UInt:
glUniform1ui(location, valuesUInt[i]);
break;
case PushConstantType::Float:
glUniform1f(location, valuesFloat[i]);
break;
}
}
}
}
break;
case Command::SetBindGroup:
{
SetBindGroupCmd* cmd = commands.NextCommand<SetBindGroupCmd>();
size_t index = cmd->index;
BindGroup* group = ToBackend(cmd->group.Get());
const auto& indices = ToBackend(lastPipeline->GetLayout())->GetBindingIndexInfo()[index];
const auto& layout = group->GetLayout()->GetBindingInfo();
// TODO(cwallez@chromium.org): iterate over the layout bitmask instead
for (size_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
if (!layout.mask[binding]) {
continue;
}
switch (layout.types[binding]) {
case nxt::BindingType::UniformBuffer:
{
BufferView* view = ToBackend(group->GetBindingAsBufferView(binding));
GLuint buffer = ToBackend(view->GetBuffer())->GetHandle();
GLuint index = indices[binding];
glBindBufferRange(GL_UNIFORM_BUFFER, index, buffer, view->GetOffset(), view->GetSize());
}
break;
case nxt::BindingType::Sampler:
{
GLuint sampler = ToBackend(group->GetBindingAsSampler(binding))->GetHandle();
GLuint index = indices[binding];
for (auto unit : lastPipeline->GetTextureUnitsForSampler(index)) {
glBindSampler(unit, sampler);
}
}
break;
case nxt::BindingType::SampledTexture:
{
TextureView* view = ToBackend(group->GetBindingAsTextureView(binding));
Texture* texture = ToBackend(view->GetTexture());
GLuint handle = texture->GetHandle();
GLenum target = texture->GetGLTarget();
GLuint index = indices[binding];
for (auto unit : lastPipeline->GetTextureUnitsForTexture(index)) {
glActiveTexture(GL_TEXTURE0 + unit);
glBindTexture(target, handle);
}
}
break;
case nxt::BindingType::StorageBuffer:
{
BufferView* view = ToBackend(group->GetBindingAsBufferView(binding));
GLuint buffer = ToBackend(view->GetBuffer())->GetHandle();
GLuint index = indices[binding];
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, index, buffer, view->GetOffset(), view->GetSize());
}
break;
}
}
}
break;
case Command::SetIndexBuffer:
{
SetIndexBufferCmd* cmd = commands.NextCommand<SetIndexBufferCmd>();
GLuint buffer = ToBackend(cmd->buffer.Get())->GetHandle();
indexBufferOffset = cmd->offset;
indexBufferFormat = cmd->format;
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer);
}
break;
case Command::SetVertexBuffers:
{
SetVertexBuffersCmd* cmd = commands.NextCommand<SetVertexBuffersCmd>();
auto buffers = commands.NextData<Ref<BufferBase>>(cmd->count);
auto offsets = commands.NextData<uint32_t>(cmd->count);
auto inputState = lastPipeline->GetInputState();
auto& attributesSetMask = inputState->GetAttributesSetMask();
for (uint32_t location = 0; location < attributesSetMask.size(); ++location) {
if (!attributesSetMask[location]) {
// This slot is not used in the input state
continue;
}
auto attribute = inputState->GetAttribute(location);
auto slot = attribute.bindingSlot;
ASSERT(slot < kMaxVertexInputs);
if (slot < cmd->startSlot || slot >= cmd->startSlot + cmd->count) {
// This slot is not affected by this call
continue;
}
size_t bufferIndex = slot - cmd->startSlot;
GLuint buffer = ToBackend(buffers[bufferIndex])->GetHandle();
uint32_t bufferOffset = offsets[bufferIndex];
auto input = inputState->GetInput(slot);
auto components = VertexFormatNumComponents(attribute.format);
auto formatType = VertexFormatType(attribute.format);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glVertexAttribPointer(
location, components, formatType, GL_FALSE,
input.stride,
reinterpret_cast<void*>(static_cast<intptr_t>(bufferOffset + attribute.offset)));
}
}
break;
case Command::TransitionBufferUsage:
{
TransitionBufferUsageCmd* cmd = commands.NextCommand<TransitionBufferUsageCmd>();
cmd->buffer->TransitionUsageImpl(cmd->usage);
}
break;
case Command::TransitionTextureUsage:
{
TransitionTextureUsageCmd* cmd = commands.NextCommand<TransitionTextureUsageCmd>();
cmd->texture->TransitionUsageImpl(cmd->usage);
}
break;
}
}
// HACK: cleanup a tiny bit of state to make this work with
// virtualized contexts enabled in Chromium
glBindSampler(0, 0);
}
}
}

View File

@ -0,0 +1,45 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_COMMANDBUFFER_H_
#define BACKEND_OPENGL_COMMANDBUFFER_H_
#include "common/CommandAllocator.h"
#include "common/CommandBuffer.h"
namespace backend {
class CommandBufferBuilder;
}
namespace backend {
namespace opengl {
class Device;
class CommandBuffer : public CommandBufferBase {
public:
CommandBuffer(Device* device, CommandBufferBuilder* builder);
~CommandBuffer();
void Execute();
private:
Device* device;
CommandIterator commands;
};
}
}
#endif // BACKEND_OPENGL_COMMANDBUFFER_H_

View File

@ -0,0 +1,21 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "OpenGLBackend.h"
#include "CommandBufferGL.h"
#include "PipelineGL.h"
#include "PipelineLayoutGL.h"
#include "SamplerGL.h"
#include "ShaderModuleGL.h"
#include "TextureGL.h"

View File

@ -0,0 +1,180 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "OpenGLBackend.h"
#include "CommandBufferGL.h"
#include "PipelineGL.h"
#include "PipelineLayoutGL.h"
#include "ShaderModuleGL.h"
#include "SamplerGL.h"
#include "TextureGL.h"
namespace backend {
namespace opengl {
nxtProcTable GetNonValidatingProcs();
nxtProcTable GetValidatingProcs();
void HACKCLEAR() {
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}
void Init(void* (*getProc)(const char*), nxtProcTable* procs, nxtDevice* device) {
*device = nullptr;
gladLoadGLLoader(reinterpret_cast<GLADloadproc>(getProc));
glEnable(GL_DEPTH_TEST);
HACKCLEAR();
*procs = GetValidatingProcs();
*device = reinterpret_cast<nxtDevice>(new Device);
}
// Device
BindGroupBase* Device::CreateBindGroup(BindGroupBuilder* builder) {
return new BindGroup(this, builder);
}
BindGroupLayoutBase* Device::CreateBindGroupLayout(BindGroupLayoutBuilder* builder) {
return new BindGroupLayout(this, builder);
}
BufferBase* Device::CreateBuffer(BufferBuilder* builder) {
return new Buffer(this, builder);
}
BufferViewBase* Device::CreateBufferView(BufferViewBuilder* builder) {
return new BufferView(this, builder);
}
CommandBufferBase* Device::CreateCommandBuffer(CommandBufferBuilder* builder) {
return new CommandBuffer(this, builder);
}
InputStateBase* Device::CreateInputState(InputStateBuilder* builder) {
return new InputState(this, builder);
}
PipelineBase* Device::CreatePipeline(PipelineBuilder* builder) {
return new Pipeline(this, builder);
}
PipelineLayoutBase* Device::CreatePipelineLayout(PipelineLayoutBuilder* builder) {
return new PipelineLayout(this, builder);
}
QueueBase* Device::CreateQueue(QueueBuilder* builder) {
return new Queue(this, builder);
}
SamplerBase* Device::CreateSampler(SamplerBuilder* builder) {
return new Sampler(this, builder);
}
ShaderModuleBase* Device::CreateShaderModule(ShaderModuleBuilder* builder) {
return new ShaderModule(this, builder);
}
TextureBase* Device::CreateTexture(TextureBuilder* builder) {
return new Texture(this, builder);
}
TextureViewBase* Device::CreateTextureView(TextureViewBuilder* builder) {
return new TextureView(this, builder);
}
void Device::Reference() {
}
void Device::Release() {
}
// Bind Group
BindGroup::BindGroup(Device* device, BindGroupBuilder* builder)
: BindGroupBase(builder), device(device) {
}
// Bind Group Layout
BindGroupLayout::BindGroupLayout(Device* device, BindGroupLayoutBuilder* builder)
: BindGroupLayoutBase(builder), device(device) {
}
// Buffer
Buffer::Buffer(Device* device, BufferBuilder* builder)
: BufferBase(builder), device(device) {
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, GetSize(), nullptr, GL_STATIC_DRAW);
}
GLuint Buffer::GetHandle() const {
return buffer;
}
void Buffer::SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) {
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferSubData(GL_ARRAY_BUFFER, start * sizeof(uint32_t), count * sizeof(uint32_t), data);
}
// BufferView
BufferView::BufferView(Device* device, BufferViewBuilder* builder)
: BufferViewBase(builder), device(device) {
}
// InputState
InputState::InputState(Device* device, InputStateBuilder* builder)
: InputStateBase(builder), device(device) {
glGenVertexArrays(1, &vertexArrayObject);
glBindVertexArray(vertexArrayObject);
auto& attributesSetMask = GetAttributesSetMask();
for (uint32_t location = 0; location < attributesSetMask.size(); ++location) {
if (!attributesSetMask[location]) {
continue;
}
auto attribute = GetAttribute(location);
glEnableVertexAttribArray(location);
auto input = GetInput(attribute.bindingSlot);
if (input.stride == 0) {
// Emulate a stride of zero (constant vertex attribute) by
// setting the attribute instance divisor to a huge number.
glVertexAttribDivisor(location, 0xffffffff);
} else {
switch (input.stepMode) {
case nxt::InputStepMode::Vertex:
break;
case nxt::InputStepMode::Instance:
glVertexAttribDivisor(location, 1);
break;
default:
ASSERT(false);
break;
}
}
}
}
GLuint InputState::GetVAO() {
return vertexArrayObject;
}
// Queue
Queue::Queue(Device* device, QueueBuilder* builder) : device(device) {
}
void Queue::Submit(uint32_t numCommands, CommandBuffer* const * commands) {
for (uint32_t i = 0; i < numCommands; ++i) {
commands[i]->Execute();
}
}
}
}

View File

@ -0,0 +1,151 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_OPENGLBACKEND_H_
#define BACKEND_OPENGL_OPENGLBACKEND_H_
#include "nxt/nxtcpp.h"
#include "common/Buffer.h"
#include "common/BindGroup.h"
#include "common/BindGroupLayout.h"
#include "common/Device.h"
#include "common/InputState.h"
#include "common/Queue.h"
#include "common/ToBackend.h"
#include "glad/glad.h"
namespace backend {
namespace opengl {
class BindGroup;
class BindGroupLayout;
class Buffer;
class BufferView;
class CommandBuffer;
class InputState;
class Pipeline;
class PipelineLayout;
class Queue;
class Sampler;
class ShaderModule;
class Texture;
class TextureView;
struct OpenGLBackendTraits {
using BindGroupType = BindGroup;
using BindGroupLayoutType = BindGroupLayout;
using BufferType = Buffer;
using BufferViewType = BufferView;
using CommandBufferType = CommandBuffer;
using InputStateType = InputState;
using PipelineType = Pipeline;
using PipelineLayoutType = PipelineLayout;
using QueueType = Queue;
using SamplerType = Sampler;
using ShaderModuleType = ShaderModule;
using TextureType = Texture;
using TextureViewType = TextureView;
};
template<typename T>
auto ToBackend(T&& common) -> decltype(ToBackendBase<OpenGLBackendTraits>(common)) {
return ToBackendBase<OpenGLBackendTraits>(common);
}
// Definition of backend types
class Device : public DeviceBase {
public:
BindGroupBase* CreateBindGroup(BindGroupBuilder* builder) override;
BindGroupLayoutBase* CreateBindGroupLayout(BindGroupLayoutBuilder* builder) override;
BufferBase* CreateBuffer(BufferBuilder* builder) override;
BufferViewBase* CreateBufferView(BufferViewBuilder* builder) override;
CommandBufferBase* CreateCommandBuffer(CommandBufferBuilder* builder) override;
InputStateBase* CreateInputState(InputStateBuilder* builder) override;
PipelineBase* CreatePipeline(PipelineBuilder* builder) override;
PipelineLayoutBase* CreatePipelineLayout(PipelineLayoutBuilder* builder) override;
QueueBase* CreateQueue(QueueBuilder* builder) override;
SamplerBase* CreateSampler(SamplerBuilder* builder) override;
ShaderModuleBase* CreateShaderModule(ShaderModuleBuilder* builder) override;
TextureBase* CreateTexture(TextureBuilder* builder) override;
TextureViewBase* CreateTextureView(TextureViewBuilder* builder) override;
// NXT API
void Reference();
void Release();
};
class BindGroup : public BindGroupBase {
public:
BindGroup(Device* device, BindGroupBuilder* builder);
private:
Device* device;
};
class BindGroupLayout : public BindGroupLayoutBase {
public:
BindGroupLayout(Device* device, BindGroupLayoutBuilder* builder);
private:
Device* device;
};
class Buffer : public BufferBase {
public:
Buffer(Device* device, BufferBuilder* builder);
GLuint GetHandle() const;
private:
void SetSubDataImpl(uint32_t start, uint32_t count, const uint32_t* data) override;
Device* device;
GLuint buffer = 0;
};
class BufferView : public BufferViewBase {
public:
BufferView(Device* device, BufferViewBuilder* builder);
private:
Device* device;
};
class InputState : public InputStateBase {
public:
InputState(Device* device, InputStateBuilder* builder);
GLuint GetVAO();
private:
Device* device;
GLuint vertexArrayObject;
};
class Queue : public QueueBase {
public:
Queue(Device* device, QueueBuilder* builder);
// NXT API
void Submit(uint32_t numCommands, CommandBuffer* const * commands);
private:
Device* device;
};
}
}
#endif // BACKEND_OPENGL_OPENGLBACKEND_H_

View File

@ -0,0 +1,213 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "PipelineGL.h"
#include "OpenGLBackend.h"
#include "PipelineLayoutGL.h"
#include "ShaderModuleGL.h"
#include <iostream>
#include <set>
namespace backend {
namespace opengl {
namespace {
GLenum GLShaderType(nxt::ShaderStage stage) {
switch (stage) {
case nxt::ShaderStage::Vertex:
return GL_VERTEX_SHADER;
case nxt::ShaderStage::Fragment:
return GL_FRAGMENT_SHADER;
case nxt::ShaderStage::Compute:
return GL_COMPUTE_SHADER;
}
}
}
Pipeline::Pipeline(Device* device, PipelineBuilder* builder) : PipelineBase(builder), device(device) {
auto CreateShader = [](GLenum type, const char* source) -> GLuint {
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, &source, nullptr);
glCompileShader(shader);
GLint compileStatus = GL_FALSE;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
if (compileStatus == GL_FALSE) {
GLint infoLogLength = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLogLength);
if (infoLogLength > 1) {
std::vector<char> buffer(infoLogLength);
glGetShaderInfoLog(shader, infoLogLength, nullptr, &buffer[0]);
std::cout << source << std::endl;
std::cout << "Program compilation failed:\n";
std::cout << buffer.data() << std::endl;
}
}
return shader;
};
auto FillPushConstants = [](const ShaderModule* module, GLPushConstantInfo* info, GLuint program) {
const auto& moduleInfo = module->GetPushConstants();
for (uint32_t i = 0; i < moduleInfo.names.size(); i++) {
(*info)[i] = -1;
unsigned int size = moduleInfo.sizes[i];
if (size == 0) {
continue;
}
GLint location = glGetUniformLocation(program, moduleInfo.names[i].c_str());
if (location == -1) {
continue;
}
for (uint32_t offset = 0; offset < size; offset++) {
(*info)[i + offset] = location + offset;
}
i += size - 1;
}
};
program = glCreateProgram();
for (auto stage : IterateStages(GetStageMask())) {
const ShaderModule* module = ToBackend(builder->GetStageInfo(stage).module.Get());
GLuint shader = CreateShader(GLShaderType(stage), module->GetSource());
glAttachShader(program, shader);
}
glLinkProgram(program);
GLint linkStatus = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
if (linkStatus == GL_FALSE) {
GLint infoLogLength = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &infoLogLength);
if (infoLogLength > 1) {
std::vector<char> buffer(infoLogLength);
glGetProgramInfoLog(program, infoLogLength, nullptr, &buffer[0]);
std::cout << "Program link failed:\n";
std::cout << buffer.data() << std::endl;
}
}
for (auto stage : IterateStages(GetStageMask())) {
const ShaderModule* module = ToBackend(builder->GetStageInfo(stage).module.Get());
FillPushConstants(module, &glPushConstants[stage], program);
}
glUseProgram(program);
// The uniforms are part of the program state so we can pre-bind buffer units, texture units etc.
const auto& layout = ToBackend(GetLayout());
const auto& indices = layout->GetBindingIndexInfo();
for (uint32_t group = 0; group < kMaxBindGroups; ++group) {
const auto& groupInfo = layout->GetBindGroupLayout(group)->GetBindingInfo();
for (uint32_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
if (!groupInfo.mask[binding]) {
continue;
}
std::string name = GetBindingName(group, binding);
switch (groupInfo.types[binding]) {
case nxt::BindingType::UniformBuffer:
{
GLint location = glGetUniformBlockIndex(program, name.c_str());
glUniformBlockBinding(program, location, indices[group][binding]);
}
break;
case nxt::BindingType::StorageBuffer:
{
GLuint location = glGetProgramResourceIndex(program, GL_SHADER_STORAGE_BLOCK, name.c_str());
glShaderStorageBlockBinding(program, location, indices[group][binding]);
}
break;
case nxt::BindingType::Sampler:
case nxt::BindingType::SampledTexture:
// These binding types are handled in the separate sampler and texture emulation
break;
}
}
}
// Compute links between stages for combined samplers, then bind them to texture units
{
std::set<CombinedSampler> combinedSamplersSet;
for (auto stage : IterateStages(GetStageMask())) {
const auto& module = ToBackend(builder->GetStageInfo(stage).module);
for (const auto& combined : module->GetCombinedSamplerInfo()) {
combinedSamplersSet.insert(combined);
}
}
unitsForSamplers.resize(layout->GetNumSamplers());
unitsForTextures.resize(layout->GetNumSampledTextures());
GLuint textureUnit = layout->GetTextureUnitsUsed();
for (const auto& combined : combinedSamplersSet) {
std::string name = combined.GetName();
GLint location = glGetUniformLocation(program, name.c_str());
glUniform1i(location, textureUnit);
GLuint samplerIndex = indices[combined.samplerLocation.group][combined.samplerLocation.binding];
unitsForSamplers[samplerIndex].push_back(textureUnit);
GLuint textureIndex = indices[combined.textureLocation.group][combined.textureLocation.binding];
unitsForTextures[textureIndex].push_back(textureUnit);
textureUnit ++;
}
}
}
const Pipeline::GLPushConstantInfo& Pipeline::GetGLPushConstants(nxt::ShaderStage stage) const {
return glPushConstants[stage];
}
const std::vector<GLuint>& Pipeline::GetTextureUnitsForSampler(GLuint index) const {
ASSERT(index >= 0 && index < unitsForSamplers.size());
return unitsForSamplers[index];
}
const std::vector<GLuint>& Pipeline::GetTextureUnitsForTexture(GLuint index) const {
ASSERT(index >= 0 && index < unitsForSamplers.size());
return unitsForTextures[index];
}
GLuint Pipeline::GetProgramHandle() const {
return program;
}
void Pipeline::ApplyNow() {
glUseProgram(program);
auto inputState = ToBackend(GetInputState());
glBindVertexArray(inputState->GetVAO());
}
}
}

View File

@ -0,0 +1,55 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_PIPELINEGL_H_
#define BACKEND_OPENGL_PIPELINEGL_H_
#include "common/Pipeline.h"
#include "glad/glad.h"
#include <vector>
namespace backend {
namespace opengl {
class Device;
class ShaderModule;
class Pipeline : public PipelineBase {
public:
Pipeline(Device* device, PipelineBuilder* builder);
using GLPushConstantInfo = std::array<GLint, kMaxPushConstants>;
using BindingLocations = std::array<std::array<GLint, kMaxBindingsPerGroup>, kMaxBindGroups>;
const GLPushConstantInfo& GetGLPushConstants(nxt::ShaderStage stage) const;
const std::vector<GLuint>& GetTextureUnitsForSampler(GLuint index) const;
const std::vector<GLuint>& GetTextureUnitsForTexture(GLuint index) const;
GLuint GetProgramHandle() const;
void ApplyNow();
private:
GLuint program;
PerStage<GLPushConstantInfo> glPushConstants;
std::vector<std::vector<GLuint>> unitsForSamplers;
std::vector<std::vector<GLuint>> unitsForTextures;
Device* device;
};
}
}
#endif // BACKEND_OPENGL_PIPELINEGL_H_

View File

@ -0,0 +1,80 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "PipelineLayoutGL.h"
#include "OpenGLBackend.h"
namespace backend {
namespace opengl {
PipelineLayout::PipelineLayout(Device* device, PipelineLayoutBuilder* builder)
: PipelineLayoutBase(builder), device(device) {
GLuint uboIndex = 0;
GLuint samplerIndex = 0;
GLuint sampledTextureIndex = 0;
GLuint ssboIndex = 0;
for (size_t group = 0; group < kMaxBindGroups; ++group) {
const auto& groupInfo = GetBindGroupLayout(group)->GetBindingInfo();
for (size_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
if (!groupInfo.mask[binding]) {
continue;
}
switch (groupInfo.types[binding]) {
case nxt::BindingType::UniformBuffer:
indexInfo[group][binding] = uboIndex;
uboIndex ++;
break;
case nxt::BindingType::Sampler:
indexInfo[group][binding] = samplerIndex;
samplerIndex ++;
break;
case nxt::BindingType::SampledTexture:
indexInfo[group][binding] = sampledTextureIndex;
sampledTextureIndex ++;
break;
case nxt::BindingType::StorageBuffer:
indexInfo[group][binding] = ssboIndex;
ssboIndex ++;
break;
}
}
}
numSamplers = samplerIndex;
numSampledTextures = sampledTextureIndex;
}
const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo() const {
return indexInfo;
}
GLuint PipelineLayout::GetTextureUnitsUsed() const {
return 0;
}
size_t PipelineLayout::GetNumSamplers() const {
return numSamplers;
}
size_t PipelineLayout::GetNumSampledTextures() const {
return numSampledTextures;
}
}
}

View File

@ -0,0 +1,48 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_PIPELINELAYOUTGL_H_
#define BACKEND_OPENGL_PIPELINELAYOUTGL_H_
#include "common/PipelineLayout.h"
#include "glad/glad.h"
namespace backend {
namespace opengl {
class Device;
class PipelineLayout : public PipelineLayoutBase {
public:
PipelineLayout(Device* device, PipelineLayoutBuilder* builder);
using BindingIndexInfo = std::array<std::array<GLuint, kMaxBindingsPerGroup>, kMaxBindGroups>;
const BindingIndexInfo& GetBindingIndexInfo() const;
GLuint GetTextureUnitsUsed() const;
size_t GetNumSamplers() const;
size_t GetNumSampledTextures() const;
private:
Device* device;
BindingIndexInfo indexInfo;
size_t numSamplers;
size_t numSampledTextures;
};
}
}
#endif // BACKEND_OPENGL_PIPELINELAYOUTGL_H_

View File

@ -0,0 +1,62 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "SamplerGL.h"
namespace backend {
namespace opengl {
namespace {
GLenum MagFilterMode(nxt::FilterMode filter) {
switch (filter) {
case nxt::FilterMode::Nearest:
return GL_NEAREST;
case nxt::FilterMode::Linear:
return GL_LINEAR;
}
}
GLenum MinFilterMode(nxt::FilterMode minFilter, nxt::FilterMode mipMapFilter) {
switch (minFilter) {
case nxt::FilterMode::Nearest:
switch (mipMapFilter) {
case nxt::FilterMode::Nearest:
return GL_NEAREST_MIPMAP_NEAREST;
case nxt::FilterMode::Linear:
return GL_NEAREST_MIPMAP_LINEAR;
}
case nxt::FilterMode::Linear:
switch (mipMapFilter) {
case nxt::FilterMode::Nearest:
return GL_LINEAR_MIPMAP_NEAREST;
case nxt::FilterMode::Linear:
return GL_LINEAR_MIPMAP_LINEAR;
}
}
}
}
Sampler::Sampler(Device* device, SamplerBuilder* builder)
: SamplerBase(builder), device(device) {
glGenSamplers(1, &handle);
glSamplerParameteri(handle, GL_TEXTURE_MAG_FILTER, MagFilterMode(builder->GetMagFilter()));
glSamplerParameteri(handle, GL_TEXTURE_MIN_FILTER, MinFilterMode(builder->GetMinFilter(), builder->GetMipMapFilter()));
}
GLuint Sampler::GetHandle() const {
return handle;
}
}
}

View File

@ -0,0 +1,41 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_SAMPLERGL_H_
#define BACKEND_OPENGL_SAMPLERGL_H_
#include "common/Sampler.h"
#include "glad/glad.h"
namespace backend {
namespace opengl {
class Device;
class Sampler : public SamplerBase {
public:
Sampler(Device* device, SamplerBuilder* builder);
GLuint GetHandle() const;
private:
Device* device;
GLuint handle;
};
}
}
#endif // BACKEND_OPENGL_SAMPLERGL_H_

View File

@ -0,0 +1,105 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ShaderModuleGL.h"
#include <spirv-cross/spirv_glsl.hpp>
#include <sstream>
namespace backend {
namespace opengl {
std::string GetBindingName(uint32_t group, uint32_t binding) {
std::ostringstream o;
o << "nxt_binding_" << group << "_" << binding;
return o.str();
}
bool operator < (const BindingLocation& a, const BindingLocation& b) {
return std::tie(a.group, a.binding) < std::tie(b.group, b.binding);
}
bool operator < (const CombinedSampler& a, const CombinedSampler& b) {
return std::tie(a.samplerLocation, a.textureLocation) < std::tie(b.samplerLocation, b.textureLocation);
}
std::string CombinedSampler::GetName() const {
std::ostringstream o;
o << "nxt_combined";
o << "_" << samplerLocation.group << "_" << samplerLocation.binding;
o << "_with_" << textureLocation.group << "_" << textureLocation.binding;
return o.str();
}
ShaderModule::ShaderModule(Device* device, ShaderModuleBuilder* builder)
: ShaderModuleBase(builder), device(device) {
spirv_cross::CompilerGLSL compiler(builder->AcquireSpirv());
spirv_cross::CompilerGLSL::Options options;
// TODO(cwallez@chromium.org): discover the backing context version and use that.
#if defined(__APPLE__)
options.version = 410;
#else
options.version = 450;
#endif
compiler.set_options(options);
ExtractSpirvInfo(compiler);
const auto& bindingInfo = GetBindingInfo();
// Extract bindings names so that it can be used to get its location in program.
// Now translate the separate sampler / textures into combined ones and store their info.
// We need to do this before removing the set and binding decorations.
compiler.build_combined_image_samplers();
for (const auto& combined : compiler.get_combined_image_samplers()) {
combinedInfo.emplace_back();
auto& info = combinedInfo.back();
info.samplerLocation.group = compiler.get_decoration(combined.sampler_id, spv::DecorationDescriptorSet);
info.samplerLocation.binding = compiler.get_decoration(combined.sampler_id, spv::DecorationBinding);
info.textureLocation.group = compiler.get_decoration(combined.image_id, spv::DecorationDescriptorSet);
info.textureLocation.binding = compiler.get_decoration(combined.image_id, spv::DecorationBinding);
compiler.set_name(combined.combined_id, info.GetName());
}
// Change binding names to be "nxt_binding_<group>_<binding>".
// Also unsets the SPIRV "Binding" decoration as it outputs "layout(binding=)" which
// isn't supported on OSX's OpenGL.
for (uint32_t group = 0; group < kMaxBindGroups; ++group) {
for (uint32_t binding = 0; binding < kMaxBindingsPerGroup; ++binding) {
const auto& info = bindingInfo[group][binding];
if (info.used) {
compiler.set_name(info.base_type_id, GetBindingName(group, binding));
compiler.unset_decoration(info.id, spv::DecorationBinding);
compiler.unset_decoration(info.id, spv::DecorationDescriptorSet);
}
}
}
glslSource = compiler.compile();
}
const char* ShaderModule::GetSource() const {
return reinterpret_cast<const char*>(glslSource.data());
}
const ShaderModule::CombinedSamplerInfo& ShaderModule::GetCombinedSamplerInfo() const {
return combinedInfo;
}
}
}

View File

@ -0,0 +1,60 @@
// Copyright 2017 The NXT Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BACKEND_OPENGL_SHADERMODULEGL_H_
#define BACKEND_OPENGL_SHADERMODULEGL_H_
#include "common/ShaderModule.h"
#include "glad/glad.h"
namespace backend {
namespace opengl {
class Device;
std::string GetBindingName(uint32_t group, uint32_t binding);
struct BindingLocation {
uint32_t group;
uint32_t binding;
};
bool operator < (const BindingLocation& a, const BindingLocation& b);
struct CombinedSampler {
BindingLocation samplerLocation;
BindingLocation textureLocation;
std::string GetName() const;
};
bool operator < (const CombinedSampler& a, const CombinedSampler& b);
class ShaderModule : public ShaderModuleBase {
public:
ShaderModule(Device* device, ShaderModuleBuilder* builder);
using CombinedSamplerInfo = std::vector<CombinedSampler>;
const char* GetSource() const;
const CombinedSamplerInfo& GetCombinedSamplerInfo() const;
private:
Device* device;
CombinedSamplerInfo combinedInfo;
std::string glslSource;
};
}
}
#endif // BACKEND_OPENGL_SHADERMODULEGL_H_

Some files were not shown because too many files have changed in this diff Show More