mirror of
https://github.com/encounter/dawn-cmake.git
synced 2025-12-18 17:35:30 +00:00
Consistent formatting for Dawn/Tint.
This CL updates the clang format files to have a single shared format between Dawn and Tint. The major changes are tabs are 4 spaces, lines are 100 columns and namespaces are not indented. Bug: dawn:1339 Change-Id: I4208742c95643998d9fd14e77a9cc558071ded39 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87603 Commit-Queue: Dan Sinclair <dsinclair@chromium.org> Reviewed-by: Corentin Wallez <cwallez@chromium.org> Kokoro: Kokoro <noreply+kokoro@google.com>
This commit is contained in:
committed by
Dawn LUCI CQ
parent
73b1d1dafa
commit
41e4d9a34c
@@ -26,283 +26,278 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
namespace {
|
||||
namespace {
|
||||
|
||||
struct Vendor {
|
||||
const char* vendorName;
|
||||
uint32_t vendorId;
|
||||
};
|
||||
struct Vendor {
|
||||
const char* vendorName;
|
||||
uint32_t vendorId;
|
||||
};
|
||||
|
||||
const Vendor kVendors[] = {{"ATI", gpu_info::kVendorID_AMD},
|
||||
{"ARM", gpu_info::kVendorID_ARM},
|
||||
{"Imagination", gpu_info::kVendorID_ImgTec},
|
||||
{"Intel", gpu_info::kVendorID_Intel},
|
||||
{"NVIDIA", gpu_info::kVendorID_Nvidia},
|
||||
{"Qualcomm", gpu_info::kVendorID_Qualcomm}};
|
||||
const Vendor kVendors[] = {{"ATI", gpu_info::kVendorID_AMD},
|
||||
{"ARM", gpu_info::kVendorID_ARM},
|
||||
{"Imagination", gpu_info::kVendorID_ImgTec},
|
||||
{"Intel", gpu_info::kVendorID_Intel},
|
||||
{"NVIDIA", gpu_info::kVendorID_Nvidia},
|
||||
{"Qualcomm", gpu_info::kVendorID_Qualcomm}};
|
||||
|
||||
uint32_t GetVendorIdFromVendors(const char* vendor) {
|
||||
uint32_t vendorId = 0;
|
||||
for (const auto& it : kVendors) {
|
||||
// Matching vendor name with vendor string
|
||||
if (strstr(vendor, it.vendorName) != nullptr) {
|
||||
vendorId = it.vendorId;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return vendorId;
|
||||
uint32_t GetVendorIdFromVendors(const char* vendor) {
|
||||
uint32_t vendorId = 0;
|
||||
for (const auto& it : kVendors) {
|
||||
// Matching vendor name with vendor string
|
||||
if (strstr(vendor, it.vendorName) != nullptr) {
|
||||
vendorId = it.vendorId;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return vendorId;
|
||||
}
|
||||
|
||||
void KHRONOS_APIENTRY OnGLDebugMessage(GLenum source,
|
||||
GLenum type,
|
||||
GLuint id,
|
||||
GLenum severity,
|
||||
GLsizei length,
|
||||
const GLchar* message,
|
||||
const void* userParam) {
|
||||
const char* sourceText;
|
||||
switch (source) {
|
||||
case GL_DEBUG_SOURCE_API:
|
||||
sourceText = "OpenGL";
|
||||
break;
|
||||
case GL_DEBUG_SOURCE_WINDOW_SYSTEM:
|
||||
sourceText = "Window System";
|
||||
break;
|
||||
case GL_DEBUG_SOURCE_SHADER_COMPILER:
|
||||
sourceText = "Shader Compiler";
|
||||
break;
|
||||
case GL_DEBUG_SOURCE_THIRD_PARTY:
|
||||
sourceText = "Third Party";
|
||||
break;
|
||||
case GL_DEBUG_SOURCE_APPLICATION:
|
||||
sourceText = "Application";
|
||||
break;
|
||||
case GL_DEBUG_SOURCE_OTHER:
|
||||
sourceText = "Other";
|
||||
break;
|
||||
default:
|
||||
sourceText = "UNKNOWN";
|
||||
break;
|
||||
}
|
||||
|
||||
const char* severityText;
|
||||
switch (severity) {
|
||||
case GL_DEBUG_SEVERITY_HIGH:
|
||||
severityText = "High";
|
||||
break;
|
||||
case GL_DEBUG_SEVERITY_MEDIUM:
|
||||
severityText = "Medium";
|
||||
break;
|
||||
case GL_DEBUG_SEVERITY_LOW:
|
||||
severityText = "Low";
|
||||
break;
|
||||
case GL_DEBUG_SEVERITY_NOTIFICATION:
|
||||
severityText = "Notification";
|
||||
break;
|
||||
default:
|
||||
severityText = "UNKNOWN";
|
||||
break;
|
||||
}
|
||||
|
||||
if (type == GL_DEBUG_TYPE_ERROR) {
|
||||
dawn::WarningLog() << "OpenGL error:"
|
||||
<< "\n Source: " << sourceText //
|
||||
<< "\n ID: " << id //
|
||||
<< "\n Severity: " << severityText //
|
||||
<< "\n Message: " << message;
|
||||
|
||||
// Abort on an error when in Debug mode.
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
// The OpenGL backend's Adapter.
|
||||
|
||||
class Adapter : public AdapterBase {
|
||||
public:
|
||||
Adapter(InstanceBase* instance, wgpu::BackendType backendType)
|
||||
: AdapterBase(instance, backendType) {
|
||||
}
|
||||
|
||||
MaybeError InitializeGLFunctions(void* (*getProc)(const char*)) {
|
||||
// Use getProc to populate the dispatch table
|
||||
return mFunctions.Initialize(getProc);
|
||||
}
|
||||
|
||||
~Adapter() override = default;
|
||||
|
||||
// AdapterBase Implementation
|
||||
bool SupportsExternalImages() const override {
|
||||
// Via dawn::native::opengl::WrapExternalEGLImage
|
||||
return GetBackendType() == wgpu::BackendType::OpenGLES;
|
||||
}
|
||||
|
||||
private:
|
||||
MaybeError InitializeImpl() override {
|
||||
if (mFunctions.GetVersion().IsES()) {
|
||||
ASSERT(GetBackendType() == wgpu::BackendType::OpenGLES);
|
||||
} else {
|
||||
ASSERT(GetBackendType() == wgpu::BackendType::OpenGL);
|
||||
}
|
||||
|
||||
// Use the debug output functionality to get notified about GL errors
|
||||
// TODO(cwallez@chromium.org): add support for the KHR_debug and ARB_debug_output
|
||||
// extensions
|
||||
bool hasDebugOutput = mFunctions.IsAtLeastGL(4, 3) || mFunctions.IsAtLeastGLES(3, 2);
|
||||
|
||||
if (GetInstance()->IsBackendValidationEnabled() && hasDebugOutput) {
|
||||
mFunctions.Enable(GL_DEBUG_OUTPUT);
|
||||
mFunctions.Enable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
|
||||
|
||||
// Any GL error; dangerous undefined behavior; any shader compiler and linker errors
|
||||
mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_HIGH,
|
||||
0, nullptr, GL_TRUE);
|
||||
|
||||
// Severe performance warnings; GLSL or other shader compiler and linker warnings;
|
||||
// use of currently deprecated behavior
|
||||
mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_MEDIUM,
|
||||
0, nullptr, GL_TRUE);
|
||||
|
||||
// Performance warnings from redundant state changes; trivial undefined behavior
|
||||
// This is disabled because we do an incredible amount of redundant state changes.
|
||||
mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_LOW, 0,
|
||||
nullptr, GL_FALSE);
|
||||
|
||||
// Any message which is not an error or performance concern
|
||||
mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE,
|
||||
GL_DEBUG_SEVERITY_NOTIFICATION, 0, nullptr,
|
||||
GL_FALSE);
|
||||
mFunctions.DebugMessageCallback(&OnGLDebugMessage, nullptr);
|
||||
}
|
||||
|
||||
// Set state that never changes between devices.
|
||||
mFunctions.Enable(GL_DEPTH_TEST);
|
||||
mFunctions.Enable(GL_SCISSOR_TEST);
|
||||
mFunctions.Enable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
|
||||
if (mFunctions.GetVersion().IsDesktop()) {
|
||||
// These are not necessary on GLES. The functionality is enabled by default, and
|
||||
// works by specifying sample counts and SRGB textures, respectively.
|
||||
mFunctions.Enable(GL_MULTISAMPLE);
|
||||
mFunctions.Enable(GL_FRAMEBUFFER_SRGB);
|
||||
}
|
||||
mFunctions.Enable(GL_SAMPLE_MASK);
|
||||
|
||||
mName = reinterpret_cast<const char*>(mFunctions.GetString(GL_RENDERER));
|
||||
|
||||
// Workaroud to find vendor id from vendor name
|
||||
const char* vendor = reinterpret_cast<const char*>(mFunctions.GetString(GL_VENDOR));
|
||||
mVendorId = GetVendorIdFromVendors(vendor);
|
||||
|
||||
mDriverDescription = std::string("OpenGL version ") +
|
||||
reinterpret_cast<const char*>(mFunctions.GetString(GL_VERSION));
|
||||
|
||||
if (mName.find("SwiftShader") != std::string::npos) {
|
||||
mAdapterType = wgpu::AdapterType::CPU;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
MaybeError InitializeSupportedFeaturesImpl() override {
|
||||
// TextureCompressionBC
|
||||
{
|
||||
// BC1, BC2 and BC3 are not supported in OpenGL or OpenGL ES core features.
|
||||
bool supportsS3TC =
|
||||
mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc") ||
|
||||
(mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_dxt1") &&
|
||||
mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt3") &&
|
||||
mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt5"));
|
||||
|
||||
// COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT and
|
||||
// COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT requires both GL_EXT_texture_sRGB and
|
||||
// GL_EXT_texture_compression_s3tc on desktop OpenGL drivers.
|
||||
// (https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_sRGB.txt)
|
||||
bool supportsTextureSRGB = mFunctions.IsGLExtensionSupported("GL_EXT_texture_sRGB");
|
||||
|
||||
// GL_EXT_texture_compression_s3tc_srgb is an extension in OpenGL ES.
|
||||
// NVidia GLES drivers don't support this extension, but they do support
|
||||
// GL_NV_sRGB_formats. (Note that GL_EXT_texture_sRGB does not exist on ES.
|
||||
// GL_EXT_sRGB does (core in ES 3.0), but it does not automatically provide S3TC
|
||||
// SRGB support even if S3TC is supported; see
|
||||
// https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_sRGB.txt.)
|
||||
bool supportsS3TCSRGB =
|
||||
mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc_srgb") ||
|
||||
mFunctions.IsGLExtensionSupported("GL_NV_sRGB_formats");
|
||||
|
||||
// BC4 and BC5
|
||||
bool supportsRGTC =
|
||||
mFunctions.IsAtLeastGL(3, 0) ||
|
||||
mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_rgtc") ||
|
||||
mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_rgtc");
|
||||
|
||||
// BC6 and BC7
|
||||
bool supportsBPTC =
|
||||
mFunctions.IsAtLeastGL(4, 2) ||
|
||||
mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_bptc") ||
|
||||
mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_bptc");
|
||||
|
||||
if (supportsS3TC && (supportsTextureSRGB || supportsS3TCSRGB) && supportsRGTC &&
|
||||
supportsBPTC) {
|
||||
mSupportedFeatures.EnableFeature(dawn::native::Feature::TextureCompressionBC);
|
||||
}
|
||||
mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
|
||||
GetDefaultLimits(&limits->v1);
|
||||
return {};
|
||||
}
|
||||
|
||||
ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
|
||||
const DeviceDescriptor* descriptor) override {
|
||||
// There is no limit on the number of devices created from this adapter because they can
|
||||
// all share the same backing OpenGL context.
|
||||
return Device::Create(this, descriptor, mFunctions);
|
||||
}
|
||||
|
||||
OpenGLFunctions mFunctions;
|
||||
};
|
||||
|
||||
// Implementation of the OpenGL backend's BackendConnection
|
||||
|
||||
Backend::Backend(InstanceBase* instance, wgpu::BackendType backendType)
|
||||
: BackendConnection(instance, backendType) {
|
||||
void KHRONOS_APIENTRY OnGLDebugMessage(GLenum source,
|
||||
GLenum type,
|
||||
GLuint id,
|
||||
GLenum severity,
|
||||
GLsizei length,
|
||||
const GLchar* message,
|
||||
const void* userParam) {
|
||||
const char* sourceText;
|
||||
switch (source) {
|
||||
case GL_DEBUG_SOURCE_API:
|
||||
sourceText = "OpenGL";
|
||||
break;
|
||||
case GL_DEBUG_SOURCE_WINDOW_SYSTEM:
|
||||
sourceText = "Window System";
|
||||
break;
|
||||
case GL_DEBUG_SOURCE_SHADER_COMPILER:
|
||||
sourceText = "Shader Compiler";
|
||||
break;
|
||||
case GL_DEBUG_SOURCE_THIRD_PARTY:
|
||||
sourceText = "Third Party";
|
||||
break;
|
||||
case GL_DEBUG_SOURCE_APPLICATION:
|
||||
sourceText = "Application";
|
||||
break;
|
||||
case GL_DEBUG_SOURCE_OTHER:
|
||||
sourceText = "Other";
|
||||
break;
|
||||
default:
|
||||
sourceText = "UNKNOWN";
|
||||
break;
|
||||
}
|
||||
|
||||
std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
|
||||
// The OpenGL backend needs at least "getProcAddress" to discover an adapter.
|
||||
const char* severityText;
|
||||
switch (severity) {
|
||||
case GL_DEBUG_SEVERITY_HIGH:
|
||||
severityText = "High";
|
||||
break;
|
||||
case GL_DEBUG_SEVERITY_MEDIUM:
|
||||
severityText = "Medium";
|
||||
break;
|
||||
case GL_DEBUG_SEVERITY_LOW:
|
||||
severityText = "Low";
|
||||
break;
|
||||
case GL_DEBUG_SEVERITY_NOTIFICATION:
|
||||
severityText = "Notification";
|
||||
break;
|
||||
default:
|
||||
severityText = "UNKNOWN";
|
||||
break;
|
||||
}
|
||||
|
||||
if (type == GL_DEBUG_TYPE_ERROR) {
|
||||
dawn::WarningLog() << "OpenGL error:"
|
||||
<< "\n Source: " << sourceText //
|
||||
<< "\n ID: " << id //
|
||||
<< "\n Severity: " << severityText //
|
||||
<< "\n Message: " << message;
|
||||
|
||||
// Abort on an error when in Debug mode.
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
// The OpenGL backend's Adapter.
|
||||
|
||||
class Adapter : public AdapterBase {
|
||||
public:
|
||||
Adapter(InstanceBase* instance, wgpu::BackendType backendType)
|
||||
: AdapterBase(instance, backendType) {}
|
||||
|
||||
MaybeError InitializeGLFunctions(void* (*getProc)(const char*)) {
|
||||
// Use getProc to populate the dispatch table
|
||||
return mFunctions.Initialize(getProc);
|
||||
}
|
||||
|
||||
~Adapter() override = default;
|
||||
|
||||
// AdapterBase Implementation
|
||||
bool SupportsExternalImages() const override {
|
||||
// Via dawn::native::opengl::WrapExternalEGLImage
|
||||
return GetBackendType() == wgpu::BackendType::OpenGLES;
|
||||
}
|
||||
|
||||
private:
|
||||
MaybeError InitializeImpl() override {
|
||||
if (mFunctions.GetVersion().IsES()) {
|
||||
ASSERT(GetBackendType() == wgpu::BackendType::OpenGLES);
|
||||
} else {
|
||||
ASSERT(GetBackendType() == wgpu::BackendType::OpenGL);
|
||||
}
|
||||
|
||||
// Use the debug output functionality to get notified about GL errors
|
||||
// TODO(cwallez@chromium.org): add support for the KHR_debug and ARB_debug_output
|
||||
// extensions
|
||||
bool hasDebugOutput = mFunctions.IsAtLeastGL(4, 3) || mFunctions.IsAtLeastGLES(3, 2);
|
||||
|
||||
if (GetInstance()->IsBackendValidationEnabled() && hasDebugOutput) {
|
||||
mFunctions.Enable(GL_DEBUG_OUTPUT);
|
||||
mFunctions.Enable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
|
||||
|
||||
// Any GL error; dangerous undefined behavior; any shader compiler and linker errors
|
||||
mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_HIGH, 0,
|
||||
nullptr, GL_TRUE);
|
||||
|
||||
// Severe performance warnings; GLSL or other shader compiler and linker warnings;
|
||||
// use of currently deprecated behavior
|
||||
mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_MEDIUM, 0,
|
||||
nullptr, GL_TRUE);
|
||||
|
||||
// Performance warnings from redundant state changes; trivial undefined behavior
|
||||
// This is disabled because we do an incredible amount of redundant state changes.
|
||||
mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_LOW, 0,
|
||||
nullptr, GL_FALSE);
|
||||
|
||||
// Any message which is not an error or performance concern
|
||||
mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE,
|
||||
GL_DEBUG_SEVERITY_NOTIFICATION, 0, nullptr, GL_FALSE);
|
||||
mFunctions.DebugMessageCallback(&OnGLDebugMessage, nullptr);
|
||||
}
|
||||
|
||||
// Set state that never changes between devices.
|
||||
mFunctions.Enable(GL_DEPTH_TEST);
|
||||
mFunctions.Enable(GL_SCISSOR_TEST);
|
||||
mFunctions.Enable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
|
||||
if (mFunctions.GetVersion().IsDesktop()) {
|
||||
// These are not necessary on GLES. The functionality is enabled by default, and
|
||||
// works by specifying sample counts and SRGB textures, respectively.
|
||||
mFunctions.Enable(GL_MULTISAMPLE);
|
||||
mFunctions.Enable(GL_FRAMEBUFFER_SRGB);
|
||||
}
|
||||
mFunctions.Enable(GL_SAMPLE_MASK);
|
||||
|
||||
mName = reinterpret_cast<const char*>(mFunctions.GetString(GL_RENDERER));
|
||||
|
||||
// Workaroud to find vendor id from vendor name
|
||||
const char* vendor = reinterpret_cast<const char*>(mFunctions.GetString(GL_VENDOR));
|
||||
mVendorId = GetVendorIdFromVendors(vendor);
|
||||
|
||||
mDriverDescription = std::string("OpenGL version ") +
|
||||
reinterpret_cast<const char*>(mFunctions.GetString(GL_VERSION));
|
||||
|
||||
if (mName.find("SwiftShader") != std::string::npos) {
|
||||
mAdapterType = wgpu::AdapterType::CPU;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
|
||||
const AdapterDiscoveryOptionsBase* optionsBase) {
|
||||
// TODO(cwallez@chromium.org): For now only create a single OpenGL adapter because don't
|
||||
// know how to handle MakeCurrent.
|
||||
DAWN_INVALID_IF(mCreatedAdapter, "The OpenGL backend can only create a single adapter.");
|
||||
MaybeError InitializeSupportedFeaturesImpl() override {
|
||||
// TextureCompressionBC
|
||||
{
|
||||
// BC1, BC2 and BC3 are not supported in OpenGL or OpenGL ES core features.
|
||||
bool supportsS3TC =
|
||||
mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc") ||
|
||||
(mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_dxt1") &&
|
||||
mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt3") &&
|
||||
mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt5"));
|
||||
|
||||
ASSERT(static_cast<wgpu::BackendType>(optionsBase->backendType) == GetType());
|
||||
const AdapterDiscoveryOptions* options =
|
||||
static_cast<const AdapterDiscoveryOptions*>(optionsBase);
|
||||
// COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT and
|
||||
// COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT requires both GL_EXT_texture_sRGB and
|
||||
// GL_EXT_texture_compression_s3tc on desktop OpenGL drivers.
|
||||
// (https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_sRGB.txt)
|
||||
bool supportsTextureSRGB = mFunctions.IsGLExtensionSupported("GL_EXT_texture_sRGB");
|
||||
|
||||
DAWN_INVALID_IF(options->getProc == nullptr,
|
||||
"AdapterDiscoveryOptions::getProc must be set");
|
||||
// GL_EXT_texture_compression_s3tc_srgb is an extension in OpenGL ES.
|
||||
// NVidia GLES drivers don't support this extension, but they do support
|
||||
// GL_NV_sRGB_formats. (Note that GL_EXT_texture_sRGB does not exist on ES.
|
||||
// GL_EXT_sRGB does (core in ES 3.0), but it does not automatically provide S3TC
|
||||
// SRGB support even if S3TC is supported; see
|
||||
// https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_sRGB.txt.)
|
||||
bool supportsS3TCSRGB =
|
||||
mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc_srgb") ||
|
||||
mFunctions.IsGLExtensionSupported("GL_NV_sRGB_formats");
|
||||
|
||||
Ref<Adapter> adapter = AcquireRef(
|
||||
new Adapter(GetInstance(), static_cast<wgpu::BackendType>(optionsBase->backendType)));
|
||||
DAWN_TRY(adapter->InitializeGLFunctions(options->getProc));
|
||||
DAWN_TRY(adapter->Initialize());
|
||||
// BC4 and BC5
|
||||
bool supportsRGTC =
|
||||
mFunctions.IsAtLeastGL(3, 0) ||
|
||||
mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_rgtc") ||
|
||||
mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_rgtc");
|
||||
|
||||
mCreatedAdapter = true;
|
||||
std::vector<Ref<AdapterBase>> adapters{std::move(adapter)};
|
||||
return std::move(adapters);
|
||||
// BC6 and BC7
|
||||
bool supportsBPTC =
|
||||
mFunctions.IsAtLeastGL(4, 2) ||
|
||||
mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_bptc") ||
|
||||
mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_bptc");
|
||||
|
||||
if (supportsS3TC && (supportsTextureSRGB || supportsS3TCSRGB) && supportsRGTC &&
|
||||
supportsBPTC) {
|
||||
mSupportedFeatures.EnableFeature(dawn::native::Feature::TextureCompressionBC);
|
||||
}
|
||||
mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType) {
|
||||
return new Backend(instance, backendType);
|
||||
MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
|
||||
GetDefaultLimits(&limits->v1);
|
||||
return {};
|
||||
}
|
||||
|
||||
ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) override {
|
||||
// There is no limit on the number of devices created from this adapter because they can
|
||||
// all share the same backing OpenGL context.
|
||||
return Device::Create(this, descriptor, mFunctions);
|
||||
}
|
||||
|
||||
OpenGLFunctions mFunctions;
|
||||
};
|
||||
|
||||
// Implementation of the OpenGL backend's BackendConnection
|
||||
|
||||
Backend::Backend(InstanceBase* instance, wgpu::BackendType backendType)
|
||||
: BackendConnection(instance, backendType) {}
|
||||
|
||||
std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
|
||||
// The OpenGL backend needs at least "getProcAddress" to discover an adapter.
|
||||
return {};
|
||||
}
|
||||
|
||||
ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
|
||||
const AdapterDiscoveryOptionsBase* optionsBase) {
|
||||
// TODO(cwallez@chromium.org): For now only create a single OpenGL adapter because don't
|
||||
// know how to handle MakeCurrent.
|
||||
DAWN_INVALID_IF(mCreatedAdapter, "The OpenGL backend can only create a single adapter.");
|
||||
|
||||
ASSERT(static_cast<wgpu::BackendType>(optionsBase->backendType) == GetType());
|
||||
const AdapterDiscoveryOptions* options =
|
||||
static_cast<const AdapterDiscoveryOptions*>(optionsBase);
|
||||
|
||||
DAWN_INVALID_IF(options->getProc == nullptr, "AdapterDiscoveryOptions::getProc must be set");
|
||||
|
||||
Ref<Adapter> adapter = AcquireRef(
|
||||
new Adapter(GetInstance(), static_cast<wgpu::BackendType>(optionsBase->backendType)));
|
||||
DAWN_TRY(adapter->InitializeGLFunctions(options->getProc));
|
||||
DAWN_TRY(adapter->Initialize());
|
||||
|
||||
mCreatedAdapter = true;
|
||||
std::vector<Ref<AdapterBase>> adapters{std::move(adapter)};
|
||||
return std::move(adapters);
|
||||
}
|
||||
|
||||
BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType) {
|
||||
return new Backend(instance, backendType);
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -21,17 +21,17 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Backend : public BackendConnection {
|
||||
public:
|
||||
Backend(InstanceBase* instance, wgpu::BackendType backendType);
|
||||
class Backend : public BackendConnection {
|
||||
public:
|
||||
Backend(InstanceBase* instance, wgpu::BackendType backendType);
|
||||
|
||||
std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
|
||||
ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
|
||||
const AdapterDiscoveryOptionsBase* options) override;
|
||||
std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
|
||||
ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
|
||||
const AdapterDiscoveryOptionsBase* options) override;
|
||||
|
||||
private:
|
||||
bool mCreatedAdapter = false;
|
||||
};
|
||||
private:
|
||||
bool mCreatedAdapter = false;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -20,46 +20,45 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor) {
|
||||
const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
|
||||
for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
|
||||
const BindGroupEntry& entry = descriptor->entries[i];
|
||||
MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor) {
|
||||
const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
|
||||
for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
|
||||
const BindGroupEntry& entry = descriptor->entries[i];
|
||||
|
||||
const auto& it = bindingMap.find(BindingNumber(entry.binding));
|
||||
BindingIndex bindingIndex = it->second;
|
||||
ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
|
||||
const auto& it = bindingMap.find(BindingNumber(entry.binding));
|
||||
BindingIndex bindingIndex = it->second;
|
||||
ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
|
||||
|
||||
const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
|
||||
if (bindingInfo.bindingType == BindingInfoType::StorageTexture) {
|
||||
ASSERT(entry.textureView != nullptr);
|
||||
const uint32_t textureViewLayerCount = entry.textureView->GetLayerCount();
|
||||
DAWN_INVALID_IF(
|
||||
textureViewLayerCount != 1 &&
|
||||
textureViewLayerCount != entry.textureView->GetTexture()->GetArrayLayers(),
|
||||
"%s binds %u layers. Currently the OpenGL backend only supports either binding "
|
||||
"1 layer or the all layers (%u) for storage texture.",
|
||||
entry.textureView, textureViewLayerCount,
|
||||
entry.textureView->GetTexture()->GetArrayLayers());
|
||||
}
|
||||
const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
|
||||
if (bindingInfo.bindingType == BindingInfoType::StorageTexture) {
|
||||
ASSERT(entry.textureView != nullptr);
|
||||
const uint32_t textureViewLayerCount = entry.textureView->GetLayerCount();
|
||||
DAWN_INVALID_IF(
|
||||
textureViewLayerCount != 1 &&
|
||||
textureViewLayerCount != entry.textureView->GetTexture()->GetArrayLayers(),
|
||||
"%s binds %u layers. Currently the OpenGL backend only supports either binding "
|
||||
"1 layer or the all layers (%u) for storage texture.",
|
||||
entry.textureView, textureViewLayerCount,
|
||||
entry.textureView->GetTexture()->GetArrayLayers());
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
|
||||
: BindGroupBase(this, device, descriptor) {
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
BindGroup::~BindGroup() = default;
|
||||
BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
|
||||
: BindGroupBase(this, device, descriptor) {}
|
||||
|
||||
void BindGroup::DestroyImpl() {
|
||||
BindGroupBase::DestroyImpl();
|
||||
ToBackend(GetLayout())->DeallocateBindGroup(this);
|
||||
}
|
||||
BindGroup::~BindGroup() = default;
|
||||
|
||||
// static
|
||||
Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
|
||||
return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
|
||||
}
|
||||
void BindGroup::DestroyImpl() {
|
||||
BindGroupBase::DestroyImpl();
|
||||
ToBackend(GetLayout())->DeallocateBindGroup(this);
|
||||
}
|
||||
|
||||
// static
|
||||
Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
|
||||
return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -20,21 +20,21 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class Device;
|
||||
|
||||
MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor);
|
||||
MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor);
|
||||
|
||||
class BindGroup final : public BindGroupBase, public PlacementAllocated {
|
||||
public:
|
||||
static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
|
||||
class BindGroup final : public BindGroupBase, public PlacementAllocated {
|
||||
public:
|
||||
static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
|
||||
|
||||
BindGroup(Device* device, const BindGroupDescriptor* descriptor);
|
||||
BindGroup(Device* device, const BindGroupDescriptor* descriptor);
|
||||
|
||||
private:
|
||||
~BindGroup() override;
|
||||
private:
|
||||
~BindGroup() override;
|
||||
|
||||
void DestroyImpl() override;
|
||||
};
|
||||
void DestroyImpl() override;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -16,20 +16,19 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
BindGroupLayout::BindGroupLayout(DeviceBase* device,
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken)
|
||||
: BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
|
||||
mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
|
||||
}
|
||||
BindGroupLayout::BindGroupLayout(DeviceBase* device,
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken)
|
||||
: BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
|
||||
mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {}
|
||||
|
||||
Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
|
||||
const BindGroupDescriptor* descriptor) {
|
||||
return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
|
||||
}
|
||||
Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
|
||||
const BindGroupDescriptor* descriptor) {
|
||||
return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
|
||||
}
|
||||
|
||||
void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
|
||||
mBindGroupAllocator.Deallocate(bindGroup);
|
||||
}
|
||||
void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
|
||||
mBindGroupAllocator.Deallocate(bindGroup);
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -21,21 +21,21 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class Device;
|
||||
|
||||
class BindGroupLayout final : public BindGroupLayoutBase {
|
||||
public:
|
||||
BindGroupLayout(DeviceBase* device,
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken);
|
||||
class BindGroupLayout final : public BindGroupLayoutBase {
|
||||
public:
|
||||
BindGroupLayout(DeviceBase* device,
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken);
|
||||
|
||||
Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
|
||||
void DeallocateBindGroup(BindGroup* bindGroup);
|
||||
Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
|
||||
void DeallocateBindGroup(BindGroup* bindGroup);
|
||||
|
||||
private:
|
||||
~BindGroupLayout() override = default;
|
||||
SlabAllocator<BindGroup> mBindGroupAllocator;
|
||||
};
|
||||
private:
|
||||
~BindGroupLayout() override = default;
|
||||
SlabAllocator<BindGroup> mBindGroupAllocator;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -23,166 +23,165 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
// Buffer
|
||||
// Buffer
|
||||
|
||||
// static
|
||||
ResultOrError<Ref<Buffer>> Buffer::CreateInternalBuffer(Device* device,
|
||||
const BufferDescriptor* descriptor,
|
||||
bool shouldLazyClear) {
|
||||
Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor, shouldLazyClear));
|
||||
if (descriptor->mappedAtCreation) {
|
||||
DAWN_TRY(buffer->MapAtCreationInternal());
|
||||
}
|
||||
|
||||
return std::move(buffer);
|
||||
// static
|
||||
ResultOrError<Ref<Buffer>> Buffer::CreateInternalBuffer(Device* device,
|
||||
const BufferDescriptor* descriptor,
|
||||
bool shouldLazyClear) {
|
||||
Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor, shouldLazyClear));
|
||||
if (descriptor->mappedAtCreation) {
|
||||
DAWN_TRY(buffer->MapAtCreationInternal());
|
||||
}
|
||||
|
||||
Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
|
||||
: BufferBase(device, descriptor) {
|
||||
// Allocate at least 4 bytes so clamped accesses are always in bounds.
|
||||
mAllocatedSize = std::max(GetSize(), uint64_t(4u));
|
||||
return std::move(buffer);
|
||||
}
|
||||
|
||||
device->gl.GenBuffers(1, &mBuffer);
|
||||
device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
|
||||
Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
|
||||
: BufferBase(device, descriptor) {
|
||||
// Allocate at least 4 bytes so clamped accesses are always in bounds.
|
||||
mAllocatedSize = std::max(GetSize(), uint64_t(4u));
|
||||
|
||||
// The buffers with mappedAtCreation == true will be initialized in
|
||||
// BufferBase::MapAtCreation().
|
||||
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
|
||||
!descriptor->mappedAtCreation) {
|
||||
std::vector<uint8_t> clearValues(mAllocatedSize, 1u);
|
||||
device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, clearValues.data(),
|
||||
GL_STATIC_DRAW);
|
||||
} else {
|
||||
// Buffers start zeroed if you pass nullptr to glBufferData.
|
||||
device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, nullptr, GL_STATIC_DRAW);
|
||||
}
|
||||
device->gl.GenBuffers(1, &mBuffer);
|
||||
device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
|
||||
|
||||
// The buffers with mappedAtCreation == true will be initialized in
|
||||
// BufferBase::MapAtCreation().
|
||||
if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
|
||||
!descriptor->mappedAtCreation) {
|
||||
std::vector<uint8_t> clearValues(mAllocatedSize, 1u);
|
||||
device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, clearValues.data(), GL_STATIC_DRAW);
|
||||
} else {
|
||||
// Buffers start zeroed if you pass nullptr to glBufferData.
|
||||
device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, nullptr, GL_STATIC_DRAW);
|
||||
}
|
||||
}
|
||||
|
||||
Buffer::Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear)
|
||||
: Buffer(device, descriptor) {
|
||||
if (!shouldLazyClear) {
|
||||
SetIsDataInitialized();
|
||||
}
|
||||
}
|
||||
|
||||
Buffer::~Buffer() = default;
|
||||
|
||||
GLuint Buffer::GetHandle() const {
|
||||
return mBuffer;
|
||||
}
|
||||
|
||||
bool Buffer::EnsureDataInitialized() {
|
||||
if (!NeedsInitialization()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
InitializeToZero();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Buffer::EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size) {
|
||||
if (!NeedsInitialization()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (IsFullBufferRange(offset, size)) {
|
||||
SetIsDataInitialized();
|
||||
return false;
|
||||
}
|
||||
|
||||
InitializeToZero();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Buffer::EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy) {
|
||||
if (!NeedsInitialization()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
|
||||
SetIsDataInitialized();
|
||||
return false;
|
||||
}
|
||||
|
||||
InitializeToZero();
|
||||
return true;
|
||||
}
|
||||
|
||||
void Buffer::InitializeToZero() {
|
||||
ASSERT(NeedsInitialization());
|
||||
|
||||
const uint64_t size = GetAllocatedSize();
|
||||
Device* device = ToBackend(GetDevice());
|
||||
|
||||
const std::vector<uint8_t> clearValues(size, 0u);
|
||||
device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
|
||||
device->gl.BufferSubData(GL_ARRAY_BUFFER, 0, size, clearValues.data());
|
||||
device->IncrementLazyClearCountForTesting();
|
||||
|
||||
Buffer::Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear)
|
||||
: Buffer(device, descriptor) {
|
||||
if (!shouldLazyClear) {
|
||||
SetIsDataInitialized();
|
||||
}
|
||||
}
|
||||
|
||||
bool Buffer::IsCPUWritableAtCreation() const {
|
||||
// TODO(enga): All buffers in GL can be mapped. Investigate if mapping them will cause the
|
||||
// driver to migrate it to shared memory.
|
||||
return true;
|
||||
Buffer::~Buffer() = default;
|
||||
|
||||
GLuint Buffer::GetHandle() const {
|
||||
return mBuffer;
|
||||
}
|
||||
|
||||
bool Buffer::EnsureDataInitialized() {
|
||||
if (!NeedsInitialization()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
MaybeError Buffer::MapAtCreationImpl() {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
|
||||
mMappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, 0, GetSize(), GL_MAP_WRITE_BIT);
|
||||
return {};
|
||||
InitializeToZero();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Buffer::EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size) {
|
||||
if (!NeedsInitialization()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
if (IsFullBufferRange(offset, size)) {
|
||||
SetIsDataInitialized();
|
||||
return false;
|
||||
}
|
||||
|
||||
// It is an error to map an empty range in OpenGL. We always have at least a 4-byte buffer
|
||||
// so we extend the range to be 4 bytes.
|
||||
if (size == 0) {
|
||||
if (offset != 0) {
|
||||
offset -= 4;
|
||||
}
|
||||
size = 4;
|
||||
InitializeToZero();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Buffer::EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy) {
|
||||
if (!NeedsInitialization()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
|
||||
SetIsDataInitialized();
|
||||
return false;
|
||||
}
|
||||
|
||||
InitializeToZero();
|
||||
return true;
|
||||
}
|
||||
|
||||
void Buffer::InitializeToZero() {
|
||||
ASSERT(NeedsInitialization());
|
||||
|
||||
const uint64_t size = GetAllocatedSize();
|
||||
Device* device = ToBackend(GetDevice());
|
||||
|
||||
const std::vector<uint8_t> clearValues(size, 0u);
|
||||
device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
|
||||
device->gl.BufferSubData(GL_ARRAY_BUFFER, 0, size, clearValues.data());
|
||||
device->IncrementLazyClearCountForTesting();
|
||||
|
||||
SetIsDataInitialized();
|
||||
}
|
||||
|
||||
bool Buffer::IsCPUWritableAtCreation() const {
|
||||
// TODO(enga): All buffers in GL can be mapped. Investigate if mapping them will cause the
|
||||
// driver to migrate it to shared memory.
|
||||
return true;
|
||||
}
|
||||
|
||||
MaybeError Buffer::MapAtCreationImpl() {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
|
||||
mMappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, 0, GetSize(), GL_MAP_WRITE_BIT);
|
||||
return {};
|
||||
}
|
||||
|
||||
MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
|
||||
// It is an error to map an empty range in OpenGL. We always have at least a 4-byte buffer
|
||||
// so we extend the range to be 4 bytes.
|
||||
if (size == 0) {
|
||||
if (offset != 0) {
|
||||
offset -= 4;
|
||||
}
|
||||
|
||||
EnsureDataInitialized();
|
||||
|
||||
// This does GPU->CPU synchronization, we could require a high
|
||||
// version of OpenGL that would let us map the buffer unsynchronized.
|
||||
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
|
||||
void* mappedData = nullptr;
|
||||
if (mode & wgpu::MapMode::Read) {
|
||||
mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_READ_BIT);
|
||||
} else {
|
||||
ASSERT(mode & wgpu::MapMode::Write);
|
||||
mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_WRITE_BIT);
|
||||
}
|
||||
|
||||
// The frontend asks that the pointer returned by GetMappedPointerImpl is from the start of
|
||||
// the resource but OpenGL gives us the pointer at offset. Remove the offset.
|
||||
mMappedData = static_cast<uint8_t*>(mappedData) - offset;
|
||||
return {};
|
||||
size = 4;
|
||||
}
|
||||
|
||||
void* Buffer::GetMappedPointerImpl() {
|
||||
// The mapping offset has already been removed.
|
||||
return mMappedData;
|
||||
EnsureDataInitialized();
|
||||
|
||||
// This does GPU->CPU synchronization, we could require a high
|
||||
// version of OpenGL that would let us map the buffer unsynchronized.
|
||||
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
|
||||
void* mappedData = nullptr;
|
||||
if (mode & wgpu::MapMode::Read) {
|
||||
mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_READ_BIT);
|
||||
} else {
|
||||
ASSERT(mode & wgpu::MapMode::Write);
|
||||
mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_WRITE_BIT);
|
||||
}
|
||||
|
||||
void Buffer::UnmapImpl() {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
// The frontend asks that the pointer returned by GetMappedPointerImpl is from the start of
|
||||
// the resource but OpenGL gives us the pointer at offset. Remove the offset.
|
||||
mMappedData = static_cast<uint8_t*>(mappedData) - offset;
|
||||
return {};
|
||||
}
|
||||
|
||||
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
|
||||
gl.UnmapBuffer(GL_ARRAY_BUFFER);
|
||||
mMappedData = nullptr;
|
||||
}
|
||||
void* Buffer::GetMappedPointerImpl() {
|
||||
// The mapping offset has already been removed.
|
||||
return mMappedData;
|
||||
}
|
||||
|
||||
void Buffer::DestroyImpl() {
|
||||
BufferBase::DestroyImpl();
|
||||
ToBackend(GetDevice())->gl.DeleteBuffers(1, &mBuffer);
|
||||
mBuffer = 0;
|
||||
}
|
||||
void Buffer::UnmapImpl() {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
|
||||
gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
|
||||
gl.UnmapBuffer(GL_ARRAY_BUFFER);
|
||||
mMappedData = nullptr;
|
||||
}
|
||||
|
||||
void Buffer::DestroyImpl() {
|
||||
BufferBase::DestroyImpl();
|
||||
ToBackend(GetDevice())->gl.DeleteBuffers(1, &mBuffer);
|
||||
mBuffer = 0;
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -21,37 +21,37 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class Device;
|
||||
|
||||
class Buffer final : public BufferBase {
|
||||
public:
|
||||
static ResultOrError<Ref<Buffer>> CreateInternalBuffer(Device* device,
|
||||
const BufferDescriptor* descriptor,
|
||||
bool shouldLazyClear);
|
||||
class Buffer final : public BufferBase {
|
||||
public:
|
||||
static ResultOrError<Ref<Buffer>> CreateInternalBuffer(Device* device,
|
||||
const BufferDescriptor* descriptor,
|
||||
bool shouldLazyClear);
|
||||
|
||||
Buffer(Device* device, const BufferDescriptor* descriptor);
|
||||
Buffer(Device* device, const BufferDescriptor* descriptor);
|
||||
|
||||
GLuint GetHandle() const;
|
||||
GLuint GetHandle() const;
|
||||
|
||||
bool EnsureDataInitialized();
|
||||
bool EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size);
|
||||
bool EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy);
|
||||
bool EnsureDataInitialized();
|
||||
bool EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size);
|
||||
bool EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy);
|
||||
|
||||
private:
|
||||
Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear);
|
||||
~Buffer() override;
|
||||
MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
|
||||
void UnmapImpl() override;
|
||||
void DestroyImpl() override;
|
||||
bool IsCPUWritableAtCreation() const override;
|
||||
MaybeError MapAtCreationImpl() override;
|
||||
void* GetMappedPointerImpl() override;
|
||||
private:
|
||||
Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear);
|
||||
~Buffer() override;
|
||||
MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
|
||||
void UnmapImpl() override;
|
||||
void DestroyImpl() override;
|
||||
bool IsCPUWritableAtCreation() const override;
|
||||
MaybeError MapAtCreationImpl() override;
|
||||
void* GetMappedPointerImpl() override;
|
||||
|
||||
void InitializeToZero();
|
||||
void InitializeToZero();
|
||||
|
||||
GLuint mBuffer = 0;
|
||||
void* mMappedData = nullptr;
|
||||
};
|
||||
GLuint mBuffer = 0;
|
||||
void* mMappedData = nullptr;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,32 +18,32 @@
|
||||
#include "dawn/native/CommandBuffer.h"
|
||||
|
||||
namespace dawn::native {
|
||||
struct BeginRenderPassCmd;
|
||||
struct BeginRenderPassCmd;
|
||||
} // namespace dawn::native
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
struct OpenGLFunctions;
|
||||
class Device;
|
||||
struct OpenGLFunctions;
|
||||
|
||||
class CommandBuffer final : public CommandBufferBase {
|
||||
public:
|
||||
CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
|
||||
class CommandBuffer final : public CommandBufferBase {
|
||||
public:
|
||||
CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
|
||||
|
||||
MaybeError Execute();
|
||||
MaybeError Execute();
|
||||
|
||||
private:
|
||||
MaybeError ExecuteComputePass();
|
||||
MaybeError ExecuteRenderPass(BeginRenderPassCmd* renderPass);
|
||||
};
|
||||
private:
|
||||
MaybeError ExecuteComputePass();
|
||||
MaybeError ExecuteRenderPass(BeginRenderPassCmd* renderPass);
|
||||
};
|
||||
|
||||
// Like glTexSubImage*, the "data" argument is either a pointer to image data or
|
||||
// an offset if a PBO is bound.
|
||||
void DoTexSubImage(const OpenGLFunctions& gl,
|
||||
const TextureCopy& destination,
|
||||
const void* data,
|
||||
const TextureDataLayout& dataLayout,
|
||||
const Extent3D& copySize);
|
||||
// Like glTexSubImage*, the "data" argument is either a pointer to image data or
|
||||
// an offset if a PBO is bound.
|
||||
void DoTexSubImage(const OpenGLFunctions& gl,
|
||||
const TextureCopy& destination,
|
||||
const void* data,
|
||||
const TextureDataLayout& dataLayout,
|
||||
const Extent3D& copySize);
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
#endif // SRC_DAWN_NATIVE_OPENGL_COMMANDBUFFERGL_H_
|
||||
|
||||
@@ -18,28 +18,27 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
// static
|
||||
Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
|
||||
Device* device,
|
||||
const ComputePipelineDescriptor* descriptor) {
|
||||
return AcquireRef(new ComputePipeline(device, descriptor));
|
||||
}
|
||||
// static
|
||||
Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
|
||||
Device* device,
|
||||
const ComputePipelineDescriptor* descriptor) {
|
||||
return AcquireRef(new ComputePipeline(device, descriptor));
|
||||
}
|
||||
|
||||
ComputePipeline::~ComputePipeline() = default;
|
||||
ComputePipeline::~ComputePipeline() = default;
|
||||
|
||||
void ComputePipeline::DestroyImpl() {
|
||||
ComputePipelineBase::DestroyImpl();
|
||||
DeleteProgram(ToBackend(GetDevice())->gl);
|
||||
}
|
||||
void ComputePipeline::DestroyImpl() {
|
||||
ComputePipelineBase::DestroyImpl();
|
||||
DeleteProgram(ToBackend(GetDevice())->gl);
|
||||
}
|
||||
|
||||
MaybeError ComputePipeline::Initialize() {
|
||||
DAWN_TRY(
|
||||
InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
|
||||
return {};
|
||||
}
|
||||
MaybeError ComputePipeline::Initialize() {
|
||||
DAWN_TRY(InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
|
||||
return {};
|
||||
}
|
||||
|
||||
void ComputePipeline::ApplyNow() {
|
||||
PipelineGL::ApplyNow(ToBackend(GetDevice())->gl);
|
||||
}
|
||||
void ComputePipeline::ApplyNow() {
|
||||
PipelineGL::ApplyNow(ToBackend(GetDevice())->gl);
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -23,23 +23,22 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class Device;
|
||||
|
||||
class ComputePipeline final : public ComputePipelineBase, public PipelineGL {
|
||||
public:
|
||||
static Ref<ComputePipeline> CreateUninitialized(
|
||||
Device* device,
|
||||
const ComputePipelineDescriptor* descriptor);
|
||||
class ComputePipeline final : public ComputePipelineBase, public PipelineGL {
|
||||
public:
|
||||
static Ref<ComputePipeline> CreateUninitialized(Device* device,
|
||||
const ComputePipelineDescriptor* descriptor);
|
||||
|
||||
void ApplyNow();
|
||||
void ApplyNow();
|
||||
|
||||
MaybeError Initialize() override;
|
||||
MaybeError Initialize() override;
|
||||
|
||||
private:
|
||||
using ComputePipelineBase::ComputePipelineBase;
|
||||
~ComputePipeline() override;
|
||||
void DestroyImpl() override;
|
||||
};
|
||||
private:
|
||||
using ComputePipelineBase::ComputePipelineBase;
|
||||
~ComputePipeline() override;
|
||||
void DestroyImpl() override;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -34,305 +34,302 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
// static
|
||||
ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
|
||||
const DeviceDescriptor* descriptor,
|
||||
const OpenGLFunctions& functions) {
|
||||
Ref<Device> device = AcquireRef(new Device(adapter, descriptor, functions));
|
||||
DAWN_TRY(device->Initialize(descriptor));
|
||||
return device;
|
||||
// static
|
||||
ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
|
||||
const DeviceDescriptor* descriptor,
|
||||
const OpenGLFunctions& functions) {
|
||||
Ref<Device> device = AcquireRef(new Device(adapter, descriptor, functions));
|
||||
DAWN_TRY(device->Initialize(descriptor));
|
||||
return device;
|
||||
}
|
||||
|
||||
Device::Device(AdapterBase* adapter,
|
||||
const DeviceDescriptor* descriptor,
|
||||
const OpenGLFunctions& functions)
|
||||
: DeviceBase(adapter, descriptor), gl(functions) {}
|
||||
|
||||
Device::~Device() {
|
||||
Destroy();
|
||||
}
|
||||
|
||||
MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
|
||||
InitTogglesFromDriver();
|
||||
mFormatTable = BuildGLFormatTable(GetBGRAInternalFormat());
|
||||
|
||||
return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
|
||||
}
|
||||
|
||||
void Device::InitTogglesFromDriver() {
|
||||
bool supportsBaseVertex = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 2);
|
||||
|
||||
bool supportsBaseInstance = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(4, 2);
|
||||
|
||||
// TODO(crbug.com/dawn/582): Use OES_draw_buffers_indexed where available.
|
||||
bool supportsIndexedDrawBuffers = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 0);
|
||||
|
||||
bool supportsSnormRead =
|
||||
gl.IsAtLeastGL(4, 4) || gl.IsGLExtensionSupported("GL_EXT_render_snorm");
|
||||
|
||||
bool supportsDepthRead = gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth");
|
||||
|
||||
bool supportsStencilRead =
|
||||
gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_stencil");
|
||||
|
||||
bool supportsDepthStencilRead =
|
||||
gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth_stencil");
|
||||
|
||||
// Desktop GL supports BGRA textures via swizzling in the driver; ES requires an extension.
|
||||
bool supportsBGRARead =
|
||||
gl.GetVersion().IsDesktop() || gl.IsGLExtensionSupported("GL_EXT_read_format_bgra");
|
||||
|
||||
bool supportsSampleVariables = gl.IsAtLeastGL(4, 0) || gl.IsAtLeastGLES(3, 2) ||
|
||||
gl.IsGLExtensionSupported("GL_OES_sample_variables");
|
||||
|
||||
// TODO(crbug.com/dawn/343): We can support the extension variants, but need to load the EXT
|
||||
// procs without the extension suffix.
|
||||
// We'll also need emulation of shader builtins gl_BaseVertex and gl_BaseInstance.
|
||||
|
||||
// supportsBaseVertex |=
|
||||
// (gl.IsAtLeastGLES(2, 0) &&
|
||||
// (gl.IsGLExtensionSupported("OES_draw_elements_base_vertex") ||
|
||||
// gl.IsGLExtensionSupported("EXT_draw_elements_base_vertex"))) ||
|
||||
// (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_draw_elements_base_vertex"));
|
||||
|
||||
// supportsBaseInstance |=
|
||||
// (gl.IsAtLeastGLES(3, 1) && gl.IsGLExtensionSupported("EXT_base_instance")) ||
|
||||
// (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_base_instance"));
|
||||
|
||||
// TODO(crbug.com/dawn/343): Investigate emulation.
|
||||
SetToggle(Toggle::DisableBaseVertex, !supportsBaseVertex);
|
||||
SetToggle(Toggle::DisableBaseInstance, !supportsBaseInstance);
|
||||
SetToggle(Toggle::DisableIndexedDrawBuffers, !supportsIndexedDrawBuffers);
|
||||
SetToggle(Toggle::DisableSnormRead, !supportsSnormRead);
|
||||
SetToggle(Toggle::DisableDepthRead, !supportsDepthRead);
|
||||
SetToggle(Toggle::DisableStencilRead, !supportsStencilRead);
|
||||
SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead);
|
||||
SetToggle(Toggle::DisableBGRARead, !supportsBGRARead);
|
||||
SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables);
|
||||
SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES());
|
||||
// For OpenGL ES, we must use a placeholder fragment shader for vertex-only render pipeline.
|
||||
SetToggle(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
|
||||
}
|
||||
|
||||
const GLFormat& Device::GetGLFormat(const Format& format) {
|
||||
ASSERT(format.isSupported);
|
||||
ASSERT(format.GetIndex() < mFormatTable.size());
|
||||
|
||||
const GLFormat& result = mFormatTable[format.GetIndex()];
|
||||
ASSERT(result.isSupportedOnBackend);
|
||||
return result;
|
||||
}
|
||||
|
||||
GLenum Device::GetBGRAInternalFormat() const {
|
||||
if (gl.IsGLExtensionSupported("GL_EXT_texture_format_BGRA8888") ||
|
||||
gl.IsGLExtensionSupported("GL_APPLE_texture_format_BGRA8888")) {
|
||||
return GL_BGRA8_EXT;
|
||||
} else {
|
||||
// Desktop GL will swizzle to/from RGBA8 for BGRA formats.
|
||||
return GL_RGBA8;
|
||||
}
|
||||
}
|
||||
|
||||
ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
|
||||
const BindGroupDescriptor* descriptor) {
|
||||
DAWN_TRY(ValidateGLBindGroupDescriptor(descriptor));
|
||||
return BindGroup::Create(this, descriptor);
|
||||
}
|
||||
ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken) {
|
||||
return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
|
||||
}
|
||||
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
|
||||
return AcquireRef(new Buffer(this, descriptor));
|
||||
}
|
||||
ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
|
||||
CommandEncoder* encoder,
|
||||
const CommandBufferDescriptor* descriptor) {
|
||||
return AcquireRef(new CommandBuffer(encoder, descriptor));
|
||||
}
|
||||
Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
|
||||
const ComputePipelineDescriptor* descriptor) {
|
||||
return ComputePipeline::CreateUninitialized(this, descriptor);
|
||||
}
|
||||
ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
|
||||
const PipelineLayoutDescriptor* descriptor) {
|
||||
return AcquireRef(new PipelineLayout(this, descriptor));
|
||||
}
|
||||
ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
|
||||
return AcquireRef(new QuerySet(this, descriptor));
|
||||
}
|
||||
Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
|
||||
const RenderPipelineDescriptor* descriptor) {
|
||||
return RenderPipeline::CreateUninitialized(this, descriptor);
|
||||
}
|
||||
ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
|
||||
return AcquireRef(new Sampler(this, descriptor));
|
||||
}
|
||||
ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
|
||||
const ShaderModuleDescriptor* descriptor,
|
||||
ShaderModuleParseResult* parseResult) {
|
||||
return ShaderModule::Create(this, descriptor, parseResult);
|
||||
}
|
||||
ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
|
||||
const SwapChainDescriptor* descriptor) {
|
||||
return AcquireRef(new SwapChain(this, descriptor));
|
||||
}
|
||||
ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
|
||||
Surface* surface,
|
||||
NewSwapChainBase* previousSwapChain,
|
||||
const SwapChainDescriptor* descriptor) {
|
||||
return DAWN_FORMAT_VALIDATION_ERROR("New swapchains not implemented.");
|
||||
}
|
||||
ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
|
||||
return AcquireRef(new Texture(this, descriptor));
|
||||
}
|
||||
ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
|
||||
TextureBase* texture,
|
||||
const TextureViewDescriptor* descriptor) {
|
||||
return AcquireRef(new TextureView(texture, descriptor));
|
||||
}
|
||||
|
||||
void Device::SubmitFenceSync() {
|
||||
GLsync sync = gl.FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
|
||||
IncrementLastSubmittedCommandSerial();
|
||||
mFencesInFlight.emplace(sync, GetLastSubmittedCommandSerial());
|
||||
}
|
||||
|
||||
MaybeError Device::ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
|
||||
::EGLImage image) {
|
||||
DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
|
||||
"Texture dimension (%s) is not %s.", descriptor->dimension,
|
||||
wgpu::TextureDimension::e2D);
|
||||
|
||||
DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
|
||||
descriptor->mipLevelCount);
|
||||
|
||||
DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1, "Array layer count (%u) is not 1.",
|
||||
descriptor->size.depthOrArrayLayers);
|
||||
|
||||
DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
|
||||
descriptor->sampleCount);
|
||||
|
||||
DAWN_INVALID_IF(descriptor->usage &
|
||||
(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding),
|
||||
"Texture usage (%s) cannot have %s or %s.", descriptor->usage,
|
||||
wgpu::TextureUsage::TextureBinding, wgpu::TextureUsage::StorageBinding);
|
||||
|
||||
return {};
|
||||
}
|
||||
TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
|
||||
::EGLImage image) {
|
||||
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
|
||||
|
||||
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
|
||||
return nullptr;
|
||||
}
|
||||
if (ConsumedError(ValidateEGLImageCanBeWrapped(textureDescriptor, image))) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Device::Device(AdapterBase* adapter,
|
||||
const DeviceDescriptor* descriptor,
|
||||
const OpenGLFunctions& functions)
|
||||
: DeviceBase(adapter, descriptor), gl(functions) {
|
||||
GLuint tex;
|
||||
gl.GenTextures(1, &tex);
|
||||
gl.BindTexture(GL_TEXTURE_2D, tex);
|
||||
gl.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
|
||||
|
||||
GLint width, height, internalFormat;
|
||||
gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
|
||||
gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
|
||||
gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_INTERNAL_FORMAT, &internalFormat);
|
||||
|
||||
if (textureDescriptor->size.width != static_cast<uint32_t>(width) ||
|
||||
textureDescriptor->size.height != static_cast<uint32_t>(height) ||
|
||||
textureDescriptor->size.depthOrArrayLayers != 1) {
|
||||
ConsumedError(DAWN_FORMAT_VALIDATION_ERROR(
|
||||
"EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.",
|
||||
width, height, &textureDescriptor->size));
|
||||
gl.DeleteTextures(1, &tex);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Device::~Device() {
|
||||
Destroy();
|
||||
}
|
||||
// TODO(dawn:803): Validate the OpenGL texture format from the EGLImage against the format
|
||||
// in the passed-in TextureDescriptor.
|
||||
return new Texture(this, textureDescriptor, tex, TextureBase::TextureState::OwnedInternal);
|
||||
}
|
||||
|
||||
MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
|
||||
InitTogglesFromDriver();
|
||||
mFormatTable = BuildGLFormatTable(GetBGRAInternalFormat());
|
||||
MaybeError Device::TickImpl() {
|
||||
return {};
|
||||
}
|
||||
|
||||
return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
|
||||
}
|
||||
ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
|
||||
ExecutionSerial fenceSerial{0};
|
||||
while (!mFencesInFlight.empty()) {
|
||||
auto [sync, tentativeSerial] = mFencesInFlight.front();
|
||||
|
||||
void Device::InitTogglesFromDriver() {
|
||||
bool supportsBaseVertex = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 2);
|
||||
// Fence are added in order, so we can stop searching as soon
|
||||
// as we see one that's not ready.
|
||||
|
||||
bool supportsBaseInstance = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(4, 2);
|
||||
|
||||
// TODO(crbug.com/dawn/582): Use OES_draw_buffers_indexed where available.
|
||||
bool supportsIndexedDrawBuffers = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 0);
|
||||
|
||||
bool supportsSnormRead =
|
||||
gl.IsAtLeastGL(4, 4) || gl.IsGLExtensionSupported("GL_EXT_render_snorm");
|
||||
|
||||
bool supportsDepthRead =
|
||||
gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth");
|
||||
|
||||
bool supportsStencilRead =
|
||||
gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_stencil");
|
||||
|
||||
bool supportsDepthStencilRead =
|
||||
gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth_stencil");
|
||||
|
||||
// Desktop GL supports BGRA textures via swizzling in the driver; ES requires an extension.
|
||||
bool supportsBGRARead =
|
||||
gl.GetVersion().IsDesktop() || gl.IsGLExtensionSupported("GL_EXT_read_format_bgra");
|
||||
|
||||
bool supportsSampleVariables = gl.IsAtLeastGL(4, 0) || gl.IsAtLeastGLES(3, 2) ||
|
||||
gl.IsGLExtensionSupported("GL_OES_sample_variables");
|
||||
|
||||
// TODO(crbug.com/dawn/343): We can support the extension variants, but need to load the EXT
|
||||
// procs without the extension suffix.
|
||||
// We'll also need emulation of shader builtins gl_BaseVertex and gl_BaseInstance.
|
||||
|
||||
// supportsBaseVertex |=
|
||||
// (gl.IsAtLeastGLES(2, 0) &&
|
||||
// (gl.IsGLExtensionSupported("OES_draw_elements_base_vertex") ||
|
||||
// gl.IsGLExtensionSupported("EXT_draw_elements_base_vertex"))) ||
|
||||
// (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_draw_elements_base_vertex"));
|
||||
|
||||
// supportsBaseInstance |=
|
||||
// (gl.IsAtLeastGLES(3, 1) && gl.IsGLExtensionSupported("EXT_base_instance")) ||
|
||||
// (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_base_instance"));
|
||||
|
||||
// TODO(crbug.com/dawn/343): Investigate emulation.
|
||||
SetToggle(Toggle::DisableBaseVertex, !supportsBaseVertex);
|
||||
SetToggle(Toggle::DisableBaseInstance, !supportsBaseInstance);
|
||||
SetToggle(Toggle::DisableIndexedDrawBuffers, !supportsIndexedDrawBuffers);
|
||||
SetToggle(Toggle::DisableSnormRead, !supportsSnormRead);
|
||||
SetToggle(Toggle::DisableDepthRead, !supportsDepthRead);
|
||||
SetToggle(Toggle::DisableStencilRead, !supportsStencilRead);
|
||||
SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead);
|
||||
SetToggle(Toggle::DisableBGRARead, !supportsBGRARead);
|
||||
SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables);
|
||||
SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES());
|
||||
// For OpenGL ES, we must use a placeholder fragment shader for vertex-only render pipeline.
|
||||
SetToggle(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
|
||||
}
|
||||
|
||||
const GLFormat& Device::GetGLFormat(const Format& format) {
|
||||
ASSERT(format.isSupported);
|
||||
ASSERT(format.GetIndex() < mFormatTable.size());
|
||||
|
||||
const GLFormat& result = mFormatTable[format.GetIndex()];
|
||||
ASSERT(result.isSupportedOnBackend);
|
||||
return result;
|
||||
}
|
||||
|
||||
GLenum Device::GetBGRAInternalFormat() const {
|
||||
if (gl.IsGLExtensionSupported("GL_EXT_texture_format_BGRA8888") ||
|
||||
gl.IsGLExtensionSupported("GL_APPLE_texture_format_BGRA8888")) {
|
||||
return GL_BGRA8_EXT;
|
||||
} else {
|
||||
// Desktop GL will swizzle to/from RGBA8 for BGRA formats.
|
||||
return GL_RGBA8;
|
||||
// TODO(crbug.com/dawn/633): Remove this workaround after the deadlock issue is fixed.
|
||||
if (IsToggleEnabled(Toggle::FlushBeforeClientWaitSync)) {
|
||||
gl.Flush();
|
||||
}
|
||||
}
|
||||
|
||||
ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
|
||||
const BindGroupDescriptor* descriptor) {
|
||||
DAWN_TRY(ValidateGLBindGroupDescriptor(descriptor));
|
||||
return BindGroup::Create(this, descriptor);
|
||||
}
|
||||
ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken) {
|
||||
return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
|
||||
}
|
||||
ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
|
||||
return AcquireRef(new Buffer(this, descriptor));
|
||||
}
|
||||
ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
|
||||
CommandEncoder* encoder,
|
||||
const CommandBufferDescriptor* descriptor) {
|
||||
return AcquireRef(new CommandBuffer(encoder, descriptor));
|
||||
}
|
||||
Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
|
||||
const ComputePipelineDescriptor* descriptor) {
|
||||
return ComputePipeline::CreateUninitialized(this, descriptor);
|
||||
}
|
||||
ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
|
||||
const PipelineLayoutDescriptor* descriptor) {
|
||||
return AcquireRef(new PipelineLayout(this, descriptor));
|
||||
}
|
||||
ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
|
||||
const QuerySetDescriptor* descriptor) {
|
||||
return AcquireRef(new QuerySet(this, descriptor));
|
||||
}
|
||||
Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
|
||||
const RenderPipelineDescriptor* descriptor) {
|
||||
return RenderPipeline::CreateUninitialized(this, descriptor);
|
||||
}
|
||||
ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
|
||||
return AcquireRef(new Sampler(this, descriptor));
|
||||
}
|
||||
ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
|
||||
const ShaderModuleDescriptor* descriptor,
|
||||
ShaderModuleParseResult* parseResult) {
|
||||
return ShaderModule::Create(this, descriptor, parseResult);
|
||||
}
|
||||
ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
|
||||
const SwapChainDescriptor* descriptor) {
|
||||
return AcquireRef(new SwapChain(this, descriptor));
|
||||
}
|
||||
ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
|
||||
Surface* surface,
|
||||
NewSwapChainBase* previousSwapChain,
|
||||
const SwapChainDescriptor* descriptor) {
|
||||
return DAWN_FORMAT_VALIDATION_ERROR("New swapchains not implemented.");
|
||||
}
|
||||
ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
|
||||
return AcquireRef(new Texture(this, descriptor));
|
||||
}
|
||||
ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
|
||||
TextureBase* texture,
|
||||
const TextureViewDescriptor* descriptor) {
|
||||
return AcquireRef(new TextureView(texture, descriptor));
|
||||
}
|
||||
|
||||
void Device::SubmitFenceSync() {
|
||||
GLsync sync = gl.FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
|
||||
IncrementLastSubmittedCommandSerial();
|
||||
mFencesInFlight.emplace(sync, GetLastSubmittedCommandSerial());
|
||||
}
|
||||
|
||||
MaybeError Device::ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
|
||||
::EGLImage image) {
|
||||
DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
|
||||
"Texture dimension (%s) is not %s.", descriptor->dimension,
|
||||
wgpu::TextureDimension::e2D);
|
||||
|
||||
DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
|
||||
descriptor->mipLevelCount);
|
||||
|
||||
DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
|
||||
"Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
|
||||
|
||||
DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
|
||||
descriptor->sampleCount);
|
||||
|
||||
DAWN_INVALID_IF(descriptor->usage & (wgpu::TextureUsage::TextureBinding |
|
||||
wgpu::TextureUsage::StorageBinding),
|
||||
"Texture usage (%s) cannot have %s or %s.", descriptor->usage,
|
||||
wgpu::TextureUsage::TextureBinding, wgpu::TextureUsage::StorageBinding);
|
||||
|
||||
return {};
|
||||
}
|
||||
TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
|
||||
::EGLImage image) {
|
||||
const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
|
||||
|
||||
if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
|
||||
return nullptr;
|
||||
}
|
||||
if (ConsumedError(ValidateEGLImageCanBeWrapped(textureDescriptor, image))) {
|
||||
return nullptr;
|
||||
GLenum result = gl.ClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0);
|
||||
if (result == GL_TIMEOUT_EXPIRED) {
|
||||
return fenceSerial;
|
||||
}
|
||||
// Update fenceSerial since fence is ready.
|
||||
fenceSerial = tentativeSerial;
|
||||
|
||||
GLuint tex;
|
||||
gl.GenTextures(1, &tex);
|
||||
gl.BindTexture(GL_TEXTURE_2D, tex);
|
||||
gl.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
|
||||
gl.DeleteSync(sync);
|
||||
|
||||
GLint width, height, internalFormat;
|
||||
gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
|
||||
gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
|
||||
gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_INTERNAL_FORMAT, &internalFormat);
|
||||
mFencesInFlight.pop();
|
||||
|
||||
if (textureDescriptor->size.width != static_cast<uint32_t>(width) ||
|
||||
textureDescriptor->size.height != static_cast<uint32_t>(height) ||
|
||||
textureDescriptor->size.depthOrArrayLayers != 1) {
|
||||
ConsumedError(DAWN_FORMAT_VALIDATION_ERROR(
|
||||
"EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.",
|
||||
width, height, &textureDescriptor->size));
|
||||
gl.DeleteTextures(1, &tex);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// TODO(dawn:803): Validate the OpenGL texture format from the EGLImage against the format
|
||||
// in the passed-in TextureDescriptor.
|
||||
return new Texture(this, textureDescriptor, tex, TextureBase::TextureState::OwnedInternal);
|
||||
ASSERT(fenceSerial > GetCompletedCommandSerial());
|
||||
}
|
||||
return fenceSerial;
|
||||
}
|
||||
|
||||
MaybeError Device::TickImpl() {
|
||||
return {};
|
||||
}
|
||||
ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
|
||||
return DAWN_UNIMPLEMENTED_ERROR("Device unable to create staging buffer.");
|
||||
}
|
||||
|
||||
ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
|
||||
ExecutionSerial fenceSerial{0};
|
||||
while (!mFencesInFlight.empty()) {
|
||||
auto [sync, tentativeSerial] = mFencesInFlight.front();
|
||||
MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
|
||||
uint64_t sourceOffset,
|
||||
BufferBase* destination,
|
||||
uint64_t destinationOffset,
|
||||
uint64_t size) {
|
||||
return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer.");
|
||||
}
|
||||
|
||||
// Fence are added in order, so we can stop searching as soon
|
||||
// as we see one that's not ready.
|
||||
MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||
const TextureDataLayout& src,
|
||||
TextureCopy* dst,
|
||||
const Extent3D& copySizePixels) {
|
||||
return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer to texture.");
|
||||
}
|
||||
|
||||
// TODO(crbug.com/dawn/633): Remove this workaround after the deadlock issue is fixed.
|
||||
if (IsToggleEnabled(Toggle::FlushBeforeClientWaitSync)) {
|
||||
gl.Flush();
|
||||
}
|
||||
GLenum result = gl.ClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0);
|
||||
if (result == GL_TIMEOUT_EXPIRED) {
|
||||
return fenceSerial;
|
||||
}
|
||||
// Update fenceSerial since fence is ready.
|
||||
fenceSerial = tentativeSerial;
|
||||
void Device::DestroyImpl() {
|
||||
ASSERT(GetState() == State::Disconnected);
|
||||
}
|
||||
|
||||
gl.DeleteSync(sync);
|
||||
MaybeError Device::WaitForIdleForDestruction() {
|
||||
gl.Finish();
|
||||
DAWN_TRY(CheckPassedSerials());
|
||||
ASSERT(mFencesInFlight.empty());
|
||||
|
||||
mFencesInFlight.pop();
|
||||
return {};
|
||||
}
|
||||
|
||||
ASSERT(fenceSerial > GetCompletedCommandSerial());
|
||||
}
|
||||
return fenceSerial;
|
||||
}
|
||||
uint32_t Device::GetOptimalBytesPerRowAlignment() const {
|
||||
return 1;
|
||||
}
|
||||
|
||||
ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
|
||||
return DAWN_UNIMPLEMENTED_ERROR("Device unable to create staging buffer.");
|
||||
}
|
||||
uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
|
||||
return 1;
|
||||
}
|
||||
|
||||
MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
|
||||
uint64_t sourceOffset,
|
||||
BufferBase* destination,
|
||||
uint64_t destinationOffset,
|
||||
uint64_t size) {
|
||||
return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer.");
|
||||
}
|
||||
|
||||
MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||
const TextureDataLayout& src,
|
||||
TextureCopy* dst,
|
||||
const Extent3D& copySizePixels) {
|
||||
return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer to texture.");
|
||||
}
|
||||
|
||||
void Device::DestroyImpl() {
|
||||
ASSERT(GetState() == State::Disconnected);
|
||||
}
|
||||
|
||||
MaybeError Device::WaitForIdleForDestruction() {
|
||||
gl.Finish();
|
||||
DAWN_TRY(CheckPassedSerials());
|
||||
ASSERT(mFencesInFlight.empty());
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
uint32_t Device::GetOptimalBytesPerRowAlignment() const {
|
||||
return 1;
|
||||
}
|
||||
|
||||
uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
|
||||
return 1;
|
||||
}
|
||||
|
||||
float Device::GetTimestampPeriodInNS() const {
|
||||
return 1.0f;
|
||||
}
|
||||
float Device::GetTimestampPeriodInNS() const {
|
||||
return 1.0f;
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -30,104 +30,100 @@
|
||||
|
||||
// Remove windows.h macros after glad's include of windows.h
|
||||
#if defined(DAWN_PLATFORM_WINDOWS)
|
||||
# include "dawn/common/windows_with_undefs.h"
|
||||
#include "dawn/common/windows_with_undefs.h"
|
||||
#endif
|
||||
|
||||
typedef void* EGLImage;
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device final : public DeviceBase {
|
||||
public:
|
||||
static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
|
||||
const DeviceDescriptor* descriptor,
|
||||
const OpenGLFunctions& functions);
|
||||
~Device() override;
|
||||
class Device final : public DeviceBase {
|
||||
public:
|
||||
static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
|
||||
const DeviceDescriptor* descriptor,
|
||||
const OpenGLFunctions& functions);
|
||||
~Device() override;
|
||||
|
||||
MaybeError Initialize(const DeviceDescriptor* descriptor);
|
||||
MaybeError Initialize(const DeviceDescriptor* descriptor);
|
||||
|
||||
// Contains all the OpenGL entry points, glDoFoo is called via device->gl.DoFoo.
|
||||
const OpenGLFunctions gl;
|
||||
// Contains all the OpenGL entry points, glDoFoo is called via device->gl.DoFoo.
|
||||
const OpenGLFunctions gl;
|
||||
|
||||
const GLFormat& GetGLFormat(const Format& format);
|
||||
const GLFormat& GetGLFormat(const Format& format);
|
||||
|
||||
void SubmitFenceSync();
|
||||
void SubmitFenceSync();
|
||||
|
||||
MaybeError ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
|
||||
::EGLImage image);
|
||||
TextureBase* CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
|
||||
::EGLImage image);
|
||||
MaybeError ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor, ::EGLImage image);
|
||||
TextureBase* CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
|
||||
::EGLImage image);
|
||||
|
||||
ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
|
||||
CommandEncoder* encoder,
|
||||
const CommandBufferDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
|
||||
CommandEncoder* encoder,
|
||||
const CommandBufferDescriptor* descriptor) override;
|
||||
|
||||
MaybeError TickImpl() override;
|
||||
MaybeError TickImpl() override;
|
||||
|
||||
ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
|
||||
MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
|
||||
uint64_t sourceOffset,
|
||||
BufferBase* destination,
|
||||
uint64_t destinationOffset,
|
||||
uint64_t size) override;
|
||||
ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
|
||||
MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
|
||||
uint64_t sourceOffset,
|
||||
BufferBase* destination,
|
||||
uint64_t destinationOffset,
|
||||
uint64_t size) override;
|
||||
|
||||
MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||
const TextureDataLayout& src,
|
||||
TextureCopy* dst,
|
||||
const Extent3D& copySizePixels) override;
|
||||
MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
|
||||
const TextureDataLayout& src,
|
||||
TextureCopy* dst,
|
||||
const Extent3D& copySizePixels) override;
|
||||
|
||||
uint32_t GetOptimalBytesPerRowAlignment() const override;
|
||||
uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
|
||||
uint32_t GetOptimalBytesPerRowAlignment() const override;
|
||||
uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
|
||||
|
||||
float GetTimestampPeriodInNS() const override;
|
||||
float GetTimestampPeriodInNS() const override;
|
||||
|
||||
private:
|
||||
Device(AdapterBase* adapter,
|
||||
const DeviceDescriptor* descriptor,
|
||||
const OpenGLFunctions& functions);
|
||||
private:
|
||||
Device(AdapterBase* adapter,
|
||||
const DeviceDescriptor* descriptor,
|
||||
const OpenGLFunctions& functions);
|
||||
|
||||
ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
|
||||
const BindGroupDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken) override;
|
||||
ResultOrError<Ref<BufferBase>> CreateBufferImpl(
|
||||
const BufferDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
|
||||
const PipelineLayoutDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
|
||||
const QuerySetDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
|
||||
const SamplerDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
|
||||
const ShaderModuleDescriptor* descriptor,
|
||||
ShaderModuleParseResult* parseResult) override;
|
||||
ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
|
||||
const SwapChainDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
|
||||
Surface* surface,
|
||||
NewSwapChainBase* previousSwapChain,
|
||||
const SwapChainDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<TextureBase>> CreateTextureImpl(
|
||||
const TextureDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
|
||||
TextureBase* texture,
|
||||
const TextureViewDescriptor* descriptor) override;
|
||||
Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
|
||||
const ComputePipelineDescriptor* descriptor) override;
|
||||
Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
|
||||
const RenderPipelineDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
|
||||
const BindGroupDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
|
||||
const BindGroupLayoutDescriptor* descriptor,
|
||||
PipelineCompatibilityToken pipelineCompatibilityToken) override;
|
||||
ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
|
||||
const PipelineLayoutDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
|
||||
const QuerySetDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
|
||||
const ShaderModuleDescriptor* descriptor,
|
||||
ShaderModuleParseResult* parseResult) override;
|
||||
ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
|
||||
const SwapChainDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
|
||||
Surface* surface,
|
||||
NewSwapChainBase* previousSwapChain,
|
||||
const SwapChainDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<TextureBase>> CreateTextureImpl(const TextureDescriptor* descriptor) override;
|
||||
ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
|
||||
TextureBase* texture,
|
||||
const TextureViewDescriptor* descriptor) override;
|
||||
Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
|
||||
const ComputePipelineDescriptor* descriptor) override;
|
||||
Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
|
||||
const RenderPipelineDescriptor* descriptor) override;
|
||||
|
||||
void InitTogglesFromDriver();
|
||||
GLenum GetBGRAInternalFormat() const;
|
||||
ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
|
||||
void DestroyImpl() override;
|
||||
MaybeError WaitForIdleForDestruction() override;
|
||||
void InitTogglesFromDriver();
|
||||
GLenum GetBGRAInternalFormat() const;
|
||||
ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
|
||||
void DestroyImpl() override;
|
||||
MaybeError WaitForIdleForDestruction() override;
|
||||
|
||||
std::queue<std::pair<GLsync, ExecutionSerial>> mFencesInFlight;
|
||||
std::queue<std::pair<GLsync, ExecutionSerial>> mFencesInFlight;
|
||||
|
||||
GLFormatTable mFormatTable;
|
||||
};
|
||||
GLFormatTable mFormatTable;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -19,47 +19,47 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Adapter;
|
||||
class BindGroup;
|
||||
class BindGroupLayout;
|
||||
class Buffer;
|
||||
class CommandBuffer;
|
||||
class ComputePipeline;
|
||||
class Device;
|
||||
class PersistentPipelineState;
|
||||
class PipelineLayout;
|
||||
class QuerySet;
|
||||
class Queue;
|
||||
class RenderPipeline;
|
||||
class Sampler;
|
||||
class ShaderModule;
|
||||
class SwapChain;
|
||||
class Texture;
|
||||
class TextureView;
|
||||
class Adapter;
|
||||
class BindGroup;
|
||||
class BindGroupLayout;
|
||||
class Buffer;
|
||||
class CommandBuffer;
|
||||
class ComputePipeline;
|
||||
class Device;
|
||||
class PersistentPipelineState;
|
||||
class PipelineLayout;
|
||||
class QuerySet;
|
||||
class Queue;
|
||||
class RenderPipeline;
|
||||
class Sampler;
|
||||
class ShaderModule;
|
||||
class SwapChain;
|
||||
class Texture;
|
||||
class TextureView;
|
||||
|
||||
struct OpenGLBackendTraits {
|
||||
using AdapterType = Adapter;
|
||||
using BindGroupType = BindGroup;
|
||||
using BindGroupLayoutType = BindGroupLayout;
|
||||
using BufferType = Buffer;
|
||||
using CommandBufferType = CommandBuffer;
|
||||
using ComputePipelineType = ComputePipeline;
|
||||
using DeviceType = Device;
|
||||
using PipelineLayoutType = PipelineLayout;
|
||||
using QuerySetType = QuerySet;
|
||||
using QueueType = Queue;
|
||||
using RenderPipelineType = RenderPipeline;
|
||||
using SamplerType = Sampler;
|
||||
using ShaderModuleType = ShaderModule;
|
||||
using SwapChainType = SwapChain;
|
||||
using TextureType = Texture;
|
||||
using TextureViewType = TextureView;
|
||||
};
|
||||
struct OpenGLBackendTraits {
|
||||
using AdapterType = Adapter;
|
||||
using BindGroupType = BindGroup;
|
||||
using BindGroupLayoutType = BindGroupLayout;
|
||||
using BufferType = Buffer;
|
||||
using CommandBufferType = CommandBuffer;
|
||||
using ComputePipelineType = ComputePipeline;
|
||||
using DeviceType = Device;
|
||||
using PipelineLayoutType = PipelineLayout;
|
||||
using QuerySetType = QuerySet;
|
||||
using QueueType = Queue;
|
||||
using RenderPipelineType = RenderPipeline;
|
||||
using SamplerType = Sampler;
|
||||
using ShaderModuleType = ShaderModule;
|
||||
using SwapChainType = SwapChain;
|
||||
using TextureType = Texture;
|
||||
using TextureViewType = TextureView;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
auto ToBackend(T&& common) -> decltype(ToBackendBase<OpenGLBackendTraits>(common)) {
|
||||
return ToBackendBase<OpenGLBackendTraits>(common);
|
||||
}
|
||||
template <typename T>
|
||||
auto ToBackend(T&& common) -> decltype(ToBackendBase<OpenGLBackendTraits>(common)) {
|
||||
return ToBackendBase<OpenGLBackendTraits>(common);
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -16,32 +16,32 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
GLFormatTable BuildGLFormatTable(GLenum internalFormatForBGRA) {
|
||||
GLFormatTable table;
|
||||
GLFormatTable BuildGLFormatTable(GLenum internalFormatForBGRA) {
|
||||
GLFormatTable table;
|
||||
|
||||
using Type = GLFormat::ComponentType;
|
||||
using Type = GLFormat::ComponentType;
|
||||
|
||||
auto AddFormat = [&table](wgpu::TextureFormat dawnFormat, GLenum internalFormat,
|
||||
GLenum format, GLenum type, Type componentType) {
|
||||
FormatIndex index = ComputeFormatIndex(dawnFormat);
|
||||
ASSERT(index < table.size());
|
||||
auto AddFormat = [&table](wgpu::TextureFormat dawnFormat, GLenum internalFormat, GLenum format,
|
||||
GLenum type, Type componentType) {
|
||||
FormatIndex index = ComputeFormatIndex(dawnFormat);
|
||||
ASSERT(index < table.size());
|
||||
|
||||
table[index].internalFormat = internalFormat;
|
||||
table[index].format = format;
|
||||
table[index].type = type;
|
||||
table[index].componentType = componentType;
|
||||
table[index].isSupportedOnBackend = true;
|
||||
};
|
||||
table[index].internalFormat = internalFormat;
|
||||
table[index].format = format;
|
||||
table[index].type = type;
|
||||
table[index].componentType = componentType;
|
||||
table[index].isSupportedOnBackend = true;
|
||||
};
|
||||
|
||||
// It's dangerous to go alone, take this:
|
||||
//
|
||||
// [ANGLE's formatutils.cpp]
|
||||
// [ANGLE's formatutilsgl.cpp]
|
||||
//
|
||||
// The format tables in these files are extremely complete and the best reference on GL
|
||||
// format support, enums, etc.
|
||||
// It's dangerous to go alone, take this:
|
||||
//
|
||||
// [ANGLE's formatutils.cpp]
|
||||
// [ANGLE's formatutilsgl.cpp]
|
||||
//
|
||||
// The format tables in these files are extremely complete and the best reference on GL
|
||||
// format support, enums, etc.
|
||||
|
||||
// clang-format off
|
||||
// clang-format off
|
||||
|
||||
// 1 byte color formats
|
||||
AddFormat(wgpu::TextureFormat::R8Unorm, GL_R8, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
|
||||
@@ -113,9 +113,9 @@ namespace dawn::native::opengl {
|
||||
AddFormat(wgpu::TextureFormat::BC7RGBAUnorm, GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
|
||||
AddFormat(wgpu::TextureFormat::BC7RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
|
||||
|
||||
// clang-format on
|
||||
// clang-format on
|
||||
|
||||
return table;
|
||||
}
|
||||
return table;
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -20,22 +20,22 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class Device;
|
||||
|
||||
struct GLFormat {
|
||||
GLenum internalFormat = 0;
|
||||
GLenum format = 0;
|
||||
GLenum type = 0;
|
||||
bool isSupportedOnBackend = false;
|
||||
struct GLFormat {
|
||||
GLenum internalFormat = 0;
|
||||
GLenum format = 0;
|
||||
GLenum type = 0;
|
||||
bool isSupportedOnBackend = false;
|
||||
|
||||
// OpenGL has different functions depending on the format component type, for example
|
||||
// glClearBufferfv is only valid on formats with the Float ComponentType
|
||||
enum ComponentType { Float, Int, Uint, DepthStencil };
|
||||
ComponentType componentType;
|
||||
};
|
||||
// OpenGL has different functions depending on the format component type, for example
|
||||
// glClearBufferfv is only valid on formats with the Float ComponentType
|
||||
enum ComponentType { Float, Int, Uint, DepthStencil };
|
||||
ComponentType componentType;
|
||||
};
|
||||
|
||||
using GLFormatTable = ityp::array<FormatIndex, GLFormat, kKnownFormatCount>;
|
||||
GLFormatTable BuildGLFormatTable(GLenum internalFormatForBGRA);
|
||||
using GLFormatTable = ityp::array<FormatIndex, GLFormat, kKnownFormatCount>;
|
||||
GLFormatTable BuildGLFormatTable(GLenum internalFormatForBGRA);
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -18,71 +18,69 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
NativeSwapChainImpl::NativeSwapChainImpl(Device* device,
|
||||
PresentCallback present,
|
||||
void* presentUserdata)
|
||||
: mPresentCallback(present), mPresentUserdata(presentUserdata), mDevice(device) {
|
||||
NativeSwapChainImpl::NativeSwapChainImpl(Device* device,
|
||||
PresentCallback present,
|
||||
void* presentUserdata)
|
||||
: mPresentCallback(present), mPresentUserdata(presentUserdata), mDevice(device) {}
|
||||
|
||||
NativeSwapChainImpl::~NativeSwapChainImpl() {
|
||||
const OpenGLFunctions& gl = mDevice->gl;
|
||||
gl.DeleteTextures(1, &mBackTexture);
|
||||
gl.DeleteFramebuffers(1, &mBackFBO);
|
||||
}
|
||||
|
||||
void NativeSwapChainImpl::Init(DawnWSIContextGL* /*context*/) {
|
||||
const OpenGLFunctions& gl = mDevice->gl;
|
||||
gl.GenTextures(1, &mBackTexture);
|
||||
gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
|
||||
gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
|
||||
|
||||
gl.GenFramebuffers(1, &mBackFBO);
|
||||
gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
|
||||
gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mBackTexture,
|
||||
0);
|
||||
}
|
||||
|
||||
DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
|
||||
WGPUTextureUsage usage,
|
||||
uint32_t width,
|
||||
uint32_t height) {
|
||||
if (format != WGPUTextureFormat_RGBA8Unorm) {
|
||||
return "unsupported format";
|
||||
}
|
||||
ASSERT(width > 0);
|
||||
ASSERT(height > 0);
|
||||
mWidth = width;
|
||||
mHeight = height;
|
||||
|
||||
NativeSwapChainImpl::~NativeSwapChainImpl() {
|
||||
const OpenGLFunctions& gl = mDevice->gl;
|
||||
gl.DeleteTextures(1, &mBackTexture);
|
||||
gl.DeleteFramebuffers(1, &mBackFBO);
|
||||
}
|
||||
const OpenGLFunctions& gl = mDevice->gl;
|
||||
gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
|
||||
// Reallocate the texture
|
||||
gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
|
||||
|
||||
void NativeSwapChainImpl::Init(DawnWSIContextGL* /*context*/) {
|
||||
const OpenGLFunctions& gl = mDevice->gl;
|
||||
gl.GenTextures(1, &mBackTexture);
|
||||
gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
|
||||
gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
|
||||
return DAWN_SWAP_CHAIN_NO_ERROR;
|
||||
}
|
||||
|
||||
gl.GenFramebuffers(1, &mBackFBO);
|
||||
gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
|
||||
gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
|
||||
mBackTexture, 0);
|
||||
}
|
||||
DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
|
||||
nextTexture->texture.u32 = mBackTexture;
|
||||
return DAWN_SWAP_CHAIN_NO_ERROR;
|
||||
}
|
||||
|
||||
DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
|
||||
WGPUTextureUsage usage,
|
||||
uint32_t width,
|
||||
uint32_t height) {
|
||||
if (format != WGPUTextureFormat_RGBA8Unorm) {
|
||||
return "unsupported format";
|
||||
}
|
||||
ASSERT(width > 0);
|
||||
ASSERT(height > 0);
|
||||
mWidth = width;
|
||||
mHeight = height;
|
||||
DawnSwapChainError NativeSwapChainImpl::Present() {
|
||||
const OpenGLFunctions& gl = mDevice->gl;
|
||||
gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
|
||||
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
|
||||
gl.Scissor(0, 0, mWidth, mHeight);
|
||||
gl.BlitFramebuffer(0, 0, mWidth, mHeight, 0, mHeight, mWidth, 0, GL_COLOR_BUFFER_BIT,
|
||||
GL_NEAREST);
|
||||
|
||||
const OpenGLFunctions& gl = mDevice->gl;
|
||||
gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
|
||||
// Reallocate the texture
|
||||
gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE,
|
||||
nullptr);
|
||||
mPresentCallback(mPresentUserdata);
|
||||
|
||||
return DAWN_SWAP_CHAIN_NO_ERROR;
|
||||
}
|
||||
return DAWN_SWAP_CHAIN_NO_ERROR;
|
||||
}
|
||||
|
||||
DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
|
||||
nextTexture->texture.u32 = mBackTexture;
|
||||
return DAWN_SWAP_CHAIN_NO_ERROR;
|
||||
}
|
||||
|
||||
DawnSwapChainError NativeSwapChainImpl::Present() {
|
||||
const OpenGLFunctions& gl = mDevice->gl;
|
||||
gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
|
||||
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
|
||||
gl.Scissor(0, 0, mWidth, mHeight);
|
||||
gl.BlitFramebuffer(0, 0, mWidth, mHeight, 0, mHeight, mWidth, 0, GL_COLOR_BUFFER_BIT,
|
||||
GL_NEAREST);
|
||||
|
||||
mPresentCallback(mPresentUserdata);
|
||||
|
||||
return DAWN_SWAP_CHAIN_NO_ERROR;
|
||||
}
|
||||
|
||||
wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
|
||||
return wgpu::TextureFormat::RGBA8Unorm;
|
||||
}
|
||||
wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
|
||||
return wgpu::TextureFormat::RGBA8Unorm;
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -22,36 +22,36 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class Device;
|
||||
|
||||
class NativeSwapChainImpl {
|
||||
public:
|
||||
using WSIContext = DawnWSIContextGL;
|
||||
class NativeSwapChainImpl {
|
||||
public:
|
||||
using WSIContext = DawnWSIContextGL;
|
||||
|
||||
NativeSwapChainImpl(Device* device, PresentCallback present, void* presentUserdata);
|
||||
~NativeSwapChainImpl();
|
||||
NativeSwapChainImpl(Device* device, PresentCallback present, void* presentUserdata);
|
||||
~NativeSwapChainImpl();
|
||||
|
||||
void Init(DawnWSIContextGL* context);
|
||||
DawnSwapChainError Configure(WGPUTextureFormat format,
|
||||
WGPUTextureUsage,
|
||||
uint32_t width,
|
||||
uint32_t height);
|
||||
DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
|
||||
DawnSwapChainError Present();
|
||||
void Init(DawnWSIContextGL* context);
|
||||
DawnSwapChainError Configure(WGPUTextureFormat format,
|
||||
WGPUTextureUsage,
|
||||
uint32_t width,
|
||||
uint32_t height);
|
||||
DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
|
||||
DawnSwapChainError Present();
|
||||
|
||||
wgpu::TextureFormat GetPreferredFormat() const;
|
||||
wgpu::TextureFormat GetPreferredFormat() const;
|
||||
|
||||
private:
|
||||
PresentCallback mPresentCallback;
|
||||
void* mPresentUserdata;
|
||||
private:
|
||||
PresentCallback mPresentCallback;
|
||||
void* mPresentUserdata;
|
||||
|
||||
uint32_t mWidth = 0;
|
||||
uint32_t mHeight = 0;
|
||||
GLuint mBackFBO = 0;
|
||||
GLuint mBackTexture = 0;
|
||||
uint32_t mWidth = 0;
|
||||
uint32_t mHeight = 0;
|
||||
GLuint mBackFBO = 0;
|
||||
GLuint mBackTexture = 0;
|
||||
|
||||
Device* mDevice = nullptr;
|
||||
};
|
||||
Device* mDevice = nullptr;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -23,43 +23,39 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
AdapterDiscoveryOptions::AdapterDiscoveryOptions()
|
||||
: AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGL) {
|
||||
}
|
||||
AdapterDiscoveryOptions::AdapterDiscoveryOptions()
|
||||
: AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGL) {}
|
||||
|
||||
AdapterDiscoveryOptionsES::AdapterDiscoveryOptionsES()
|
||||
: AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGLES) {
|
||||
}
|
||||
AdapterDiscoveryOptionsES::AdapterDiscoveryOptionsES()
|
||||
: AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGLES) {}
|
||||
|
||||
DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
|
||||
PresentCallback present,
|
||||
void* presentUserdata) {
|
||||
Device* backendDevice = ToBackend(FromAPI(device));
|
||||
DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
|
||||
PresentCallback present,
|
||||
void* presentUserdata) {
|
||||
Device* backendDevice = ToBackend(FromAPI(device));
|
||||
|
||||
DawnSwapChainImplementation impl;
|
||||
impl = CreateSwapChainImplementation(
|
||||
new NativeSwapChainImpl(backendDevice, present, presentUserdata));
|
||||
impl.textureUsage = WGPUTextureUsage_Present;
|
||||
DawnSwapChainImplementation impl;
|
||||
impl = CreateSwapChainImplementation(
|
||||
new NativeSwapChainImpl(backendDevice, present, presentUserdata));
|
||||
impl.textureUsage = WGPUTextureUsage_Present;
|
||||
|
||||
return impl;
|
||||
}
|
||||
return impl;
|
||||
}
|
||||
|
||||
WGPUTextureFormat GetNativeSwapChainPreferredFormat(
|
||||
const DawnSwapChainImplementation* swapChain) {
|
||||
NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
|
||||
return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
|
||||
}
|
||||
WGPUTextureFormat GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain) {
|
||||
NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
|
||||
return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
|
||||
}
|
||||
|
||||
ExternalImageDescriptorEGLImage::ExternalImageDescriptorEGLImage()
|
||||
: ExternalImageDescriptor(ExternalImageType::EGLImage) {
|
||||
}
|
||||
ExternalImageDescriptorEGLImage::ExternalImageDescriptorEGLImage()
|
||||
: ExternalImageDescriptor(ExternalImageType::EGLImage) {}
|
||||
|
||||
WGPUTexture WrapExternalEGLImage(WGPUDevice device,
|
||||
const ExternalImageDescriptorEGLImage* descriptor) {
|
||||
Device* backendDevice = ToBackend(FromAPI(device));
|
||||
TextureBase* texture =
|
||||
backendDevice->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
|
||||
return ToAPI(texture);
|
||||
}
|
||||
WGPUTexture WrapExternalEGLImage(WGPUDevice device,
|
||||
const ExternalImageDescriptorEGLImage* descriptor) {
|
||||
Device* backendDevice = ToBackend(FromAPI(device));
|
||||
TextureBase* texture =
|
||||
backendDevice->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
|
||||
return ToAPI(texture);
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -18,44 +18,44 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
MaybeError OpenGLFunctions::Initialize(GetProcAddress getProc) {
|
||||
DAWN_TRY(mVersion.Initialize(getProc));
|
||||
if (mVersion.IsES()) {
|
||||
DAWN_TRY(LoadOpenGLESProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
|
||||
} else {
|
||||
DAWN_TRY(LoadDesktopGLProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
|
||||
}
|
||||
|
||||
InitializeSupportedGLExtensions();
|
||||
|
||||
return {};
|
||||
MaybeError OpenGLFunctions::Initialize(GetProcAddress getProc) {
|
||||
DAWN_TRY(mVersion.Initialize(getProc));
|
||||
if (mVersion.IsES()) {
|
||||
DAWN_TRY(LoadOpenGLESProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
|
||||
} else {
|
||||
DAWN_TRY(LoadDesktopGLProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
|
||||
}
|
||||
|
||||
void OpenGLFunctions::InitializeSupportedGLExtensions() {
|
||||
int32_t numExtensions;
|
||||
GetIntegerv(GL_NUM_EXTENSIONS, &numExtensions);
|
||||
InitializeSupportedGLExtensions();
|
||||
|
||||
for (int32_t i = 0; i < numExtensions; ++i) {
|
||||
const char* extensionName = reinterpret_cast<const char*>(GetStringi(GL_EXTENSIONS, i));
|
||||
mSupportedGLExtensionsSet.insert(extensionName);
|
||||
}
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
bool OpenGLFunctions::IsGLExtensionSupported(const char* extension) const {
|
||||
ASSERT(extension != nullptr);
|
||||
return mSupportedGLExtensionsSet.count(extension) != 0;
|
||||
}
|
||||
void OpenGLFunctions::InitializeSupportedGLExtensions() {
|
||||
int32_t numExtensions;
|
||||
GetIntegerv(GL_NUM_EXTENSIONS, &numExtensions);
|
||||
|
||||
const OpenGLVersion& OpenGLFunctions::GetVersion() const {
|
||||
return mVersion;
|
||||
for (int32_t i = 0; i < numExtensions; ++i) {
|
||||
const char* extensionName = reinterpret_cast<const char*>(GetStringi(GL_EXTENSIONS, i));
|
||||
mSupportedGLExtensionsSet.insert(extensionName);
|
||||
}
|
||||
}
|
||||
|
||||
bool OpenGLFunctions::IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const {
|
||||
return mVersion.IsDesktop() && mVersion.IsAtLeast(majorVersion, minorVersion);
|
||||
}
|
||||
bool OpenGLFunctions::IsGLExtensionSupported(const char* extension) const {
|
||||
ASSERT(extension != nullptr);
|
||||
return mSupportedGLExtensionsSet.count(extension) != 0;
|
||||
}
|
||||
|
||||
bool OpenGLFunctions::IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const {
|
||||
return mVersion.IsES() && mVersion.IsAtLeast(majorVersion, minorVersion);
|
||||
}
|
||||
const OpenGLVersion& OpenGLFunctions::GetVersion() const {
|
||||
return mVersion;
|
||||
}
|
||||
|
||||
bool OpenGLFunctions::IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const {
|
||||
return mVersion.IsDesktop() && mVersion.IsAtLeast(majorVersion, minorVersion);
|
||||
}
|
||||
|
||||
bool OpenGLFunctions::IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const {
|
||||
return mVersion.IsES() && mVersion.IsAtLeast(majorVersion, minorVersion);
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -23,23 +23,23 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
struct OpenGLFunctions : OpenGLFunctionsBase {
|
||||
public:
|
||||
MaybeError Initialize(GetProcAddress getProc);
|
||||
struct OpenGLFunctions : OpenGLFunctionsBase {
|
||||
public:
|
||||
MaybeError Initialize(GetProcAddress getProc);
|
||||
|
||||
const OpenGLVersion& GetVersion() const;
|
||||
bool IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const;
|
||||
bool IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const;
|
||||
const OpenGLVersion& GetVersion() const;
|
||||
bool IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const;
|
||||
bool IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const;
|
||||
|
||||
bool IsGLExtensionSupported(const char* extension) const;
|
||||
bool IsGLExtensionSupported(const char* extension) const;
|
||||
|
||||
private:
|
||||
void InitializeSupportedGLExtensions();
|
||||
private:
|
||||
void InitializeSupportedGLExtensions();
|
||||
|
||||
OpenGLVersion mVersion;
|
||||
OpenGLVersion mVersion;
|
||||
|
||||
std::unordered_set<std::string> mSupportedGLExtensionsSet;
|
||||
};
|
||||
std::unordered_set<std::string> mSupportedGLExtensionsSet;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -20,58 +20,58 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
MaybeError OpenGLVersion::Initialize(GetProcAddress getProc) {
|
||||
PFNGLGETSTRINGPROC getString = reinterpret_cast<PFNGLGETSTRINGPROC>(getProc("glGetString"));
|
||||
if (getString == nullptr) {
|
||||
return DAWN_INTERNAL_ERROR("Couldn't load glGetString");
|
||||
}
|
||||
|
||||
std::string version = reinterpret_cast<const char*>(getString(GL_VERSION));
|
||||
|
||||
if (version.find("OpenGL ES") != std::string::npos) {
|
||||
// ES spec states that the GL_VERSION string will be in the following format:
|
||||
// "OpenGL ES N.M vendor-specific information"
|
||||
mStandard = Standard::ES;
|
||||
mMajorVersion = version[10] - '0';
|
||||
mMinorVersion = version[12] - '0';
|
||||
|
||||
// The minor version shouldn't get to two digits.
|
||||
ASSERT(version.size() <= 13 || !isdigit(version[13]));
|
||||
} else {
|
||||
// OpenGL spec states the GL_VERSION string will be in the following format:
|
||||
// <version number><space><vendor-specific information>
|
||||
// The version number is either of the form major number.minor number or major
|
||||
// number.minor number.release number, where the numbers all have one or more
|
||||
// digits
|
||||
mStandard = Standard::Desktop;
|
||||
mMajorVersion = version[0] - '0';
|
||||
mMinorVersion = version[2] - '0';
|
||||
|
||||
// The minor version shouldn't get to two digits.
|
||||
ASSERT(version.size() <= 3 || !isdigit(version[3]));
|
||||
}
|
||||
|
||||
return {};
|
||||
MaybeError OpenGLVersion::Initialize(GetProcAddress getProc) {
|
||||
PFNGLGETSTRINGPROC getString = reinterpret_cast<PFNGLGETSTRINGPROC>(getProc("glGetString"));
|
||||
if (getString == nullptr) {
|
||||
return DAWN_INTERNAL_ERROR("Couldn't load glGetString");
|
||||
}
|
||||
|
||||
bool OpenGLVersion::IsDesktop() const {
|
||||
return mStandard == Standard::Desktop;
|
||||
std::string version = reinterpret_cast<const char*>(getString(GL_VERSION));
|
||||
|
||||
if (version.find("OpenGL ES") != std::string::npos) {
|
||||
// ES spec states that the GL_VERSION string will be in the following format:
|
||||
// "OpenGL ES N.M vendor-specific information"
|
||||
mStandard = Standard::ES;
|
||||
mMajorVersion = version[10] - '0';
|
||||
mMinorVersion = version[12] - '0';
|
||||
|
||||
// The minor version shouldn't get to two digits.
|
||||
ASSERT(version.size() <= 13 || !isdigit(version[13]));
|
||||
} else {
|
||||
// OpenGL spec states the GL_VERSION string will be in the following format:
|
||||
// <version number><space><vendor-specific information>
|
||||
// The version number is either of the form major number.minor number or major
|
||||
// number.minor number.release number, where the numbers all have one or more
|
||||
// digits
|
||||
mStandard = Standard::Desktop;
|
||||
mMajorVersion = version[0] - '0';
|
||||
mMinorVersion = version[2] - '0';
|
||||
|
||||
// The minor version shouldn't get to two digits.
|
||||
ASSERT(version.size() <= 3 || !isdigit(version[3]));
|
||||
}
|
||||
|
||||
bool OpenGLVersion::IsES() const {
|
||||
return mStandard == Standard::ES;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
uint32_t OpenGLVersion::GetMajor() const {
|
||||
return mMajorVersion;
|
||||
}
|
||||
bool OpenGLVersion::IsDesktop() const {
|
||||
return mStandard == Standard::Desktop;
|
||||
}
|
||||
|
||||
uint32_t OpenGLVersion::GetMinor() const {
|
||||
return mMinorVersion;
|
||||
}
|
||||
bool OpenGLVersion::IsES() const {
|
||||
return mStandard == Standard::ES;
|
||||
}
|
||||
|
||||
bool OpenGLVersion::IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const {
|
||||
return std::tie(mMajorVersion, mMinorVersion) >= std::tie(majorVersion, minorVersion);
|
||||
}
|
||||
uint32_t OpenGLVersion::GetMajor() const {
|
||||
return mMajorVersion;
|
||||
}
|
||||
|
||||
uint32_t OpenGLVersion::GetMinor() const {
|
||||
return mMinorVersion;
|
||||
}
|
||||
|
||||
bool OpenGLVersion::IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const {
|
||||
return std::tie(mMajorVersion, mMinorVersion) >= std::tie(majorVersion, minorVersion);
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -19,25 +19,25 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
struct OpenGLVersion {
|
||||
public:
|
||||
MaybeError Initialize(GetProcAddress getProc);
|
||||
struct OpenGLVersion {
|
||||
public:
|
||||
MaybeError Initialize(GetProcAddress getProc);
|
||||
|
||||
bool IsDesktop() const;
|
||||
bool IsES() const;
|
||||
uint32_t GetMajor() const;
|
||||
uint32_t GetMinor() const;
|
||||
bool IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const;
|
||||
bool IsDesktop() const;
|
||||
bool IsES() const;
|
||||
uint32_t GetMajor() const;
|
||||
uint32_t GetMinor() const;
|
||||
bool IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const;
|
||||
|
||||
private:
|
||||
enum class Standard {
|
||||
Desktop,
|
||||
ES,
|
||||
};
|
||||
uint32_t mMajorVersion;
|
||||
uint32_t mMinorVersion;
|
||||
Standard mStandard;
|
||||
private:
|
||||
enum class Standard {
|
||||
Desktop,
|
||||
ES,
|
||||
};
|
||||
uint32_t mMajorVersion;
|
||||
uint32_t mMinorVersion;
|
||||
Standard mStandard;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -18,41 +18,41 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
void PersistentPipelineState::SetDefaultState(const OpenGLFunctions& gl) {
|
||||
CallGLStencilFunc(gl);
|
||||
void PersistentPipelineState::SetDefaultState(const OpenGLFunctions& gl) {
|
||||
CallGLStencilFunc(gl);
|
||||
}
|
||||
|
||||
void PersistentPipelineState::SetStencilFuncsAndMask(const OpenGLFunctions& gl,
|
||||
GLenum stencilBackCompareFunction,
|
||||
GLenum stencilFrontCompareFunction,
|
||||
uint32_t stencilReadMask) {
|
||||
if (mStencilBackCompareFunction == stencilBackCompareFunction &&
|
||||
mStencilFrontCompareFunction == stencilFrontCompareFunction &&
|
||||
mStencilReadMask == stencilReadMask) {
|
||||
return;
|
||||
}
|
||||
|
||||
void PersistentPipelineState::SetStencilFuncsAndMask(const OpenGLFunctions& gl,
|
||||
GLenum stencilBackCompareFunction,
|
||||
GLenum stencilFrontCompareFunction,
|
||||
uint32_t stencilReadMask) {
|
||||
if (mStencilBackCompareFunction == stencilBackCompareFunction &&
|
||||
mStencilFrontCompareFunction == stencilFrontCompareFunction &&
|
||||
mStencilReadMask == stencilReadMask) {
|
||||
return;
|
||||
}
|
||||
mStencilBackCompareFunction = stencilBackCompareFunction;
|
||||
mStencilFrontCompareFunction = stencilFrontCompareFunction;
|
||||
mStencilReadMask = stencilReadMask;
|
||||
CallGLStencilFunc(gl);
|
||||
}
|
||||
|
||||
mStencilBackCompareFunction = stencilBackCompareFunction;
|
||||
mStencilFrontCompareFunction = stencilFrontCompareFunction;
|
||||
mStencilReadMask = stencilReadMask;
|
||||
CallGLStencilFunc(gl);
|
||||
void PersistentPipelineState::SetStencilReference(const OpenGLFunctions& gl,
|
||||
uint32_t stencilReference) {
|
||||
if (mStencilReference == stencilReference) {
|
||||
return;
|
||||
}
|
||||
|
||||
void PersistentPipelineState::SetStencilReference(const OpenGLFunctions& gl,
|
||||
uint32_t stencilReference) {
|
||||
if (mStencilReference == stencilReference) {
|
||||
return;
|
||||
}
|
||||
mStencilReference = stencilReference;
|
||||
CallGLStencilFunc(gl);
|
||||
}
|
||||
|
||||
mStencilReference = stencilReference;
|
||||
CallGLStencilFunc(gl);
|
||||
}
|
||||
|
||||
void PersistentPipelineState::CallGLStencilFunc(const OpenGLFunctions& gl) {
|
||||
gl.StencilFuncSeparate(GL_BACK, mStencilBackCompareFunction, mStencilReference,
|
||||
mStencilReadMask);
|
||||
gl.StencilFuncSeparate(GL_FRONT, mStencilFrontCompareFunction, mStencilReference,
|
||||
mStencilReadMask);
|
||||
}
|
||||
void PersistentPipelineState::CallGLStencilFunc(const OpenGLFunctions& gl) {
|
||||
gl.StencilFuncSeparate(GL_BACK, mStencilBackCompareFunction, mStencilReference,
|
||||
mStencilReadMask);
|
||||
gl.StencilFuncSeparate(GL_FRONT, mStencilFrontCompareFunction, mStencilReference,
|
||||
mStencilReadMask);
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -20,25 +20,25 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
struct OpenGLFunctions;
|
||||
struct OpenGLFunctions;
|
||||
|
||||
class PersistentPipelineState {
|
||||
public:
|
||||
void SetDefaultState(const OpenGLFunctions& gl);
|
||||
void SetStencilFuncsAndMask(const OpenGLFunctions& gl,
|
||||
GLenum stencilBackCompareFunction,
|
||||
GLenum stencilFrontCompareFunction,
|
||||
uint32_t stencilReadMask);
|
||||
void SetStencilReference(const OpenGLFunctions& gl, uint32_t stencilReference);
|
||||
class PersistentPipelineState {
|
||||
public:
|
||||
void SetDefaultState(const OpenGLFunctions& gl);
|
||||
void SetStencilFuncsAndMask(const OpenGLFunctions& gl,
|
||||
GLenum stencilBackCompareFunction,
|
||||
GLenum stencilFrontCompareFunction,
|
||||
uint32_t stencilReadMask);
|
||||
void SetStencilReference(const OpenGLFunctions& gl, uint32_t stencilReference);
|
||||
|
||||
private:
|
||||
void CallGLStencilFunc(const OpenGLFunctions& gl);
|
||||
private:
|
||||
void CallGLStencilFunc(const OpenGLFunctions& gl);
|
||||
|
||||
GLenum mStencilBackCompareFunction = GL_ALWAYS;
|
||||
GLenum mStencilFrontCompareFunction = GL_ALWAYS;
|
||||
GLuint mStencilReadMask = 0xffffffff;
|
||||
GLuint mStencilReference = 0;
|
||||
};
|
||||
GLenum mStencilBackCompareFunction = GL_ALWAYS;
|
||||
GLenum mStencilFrontCompareFunction = GL_ALWAYS;
|
||||
GLuint mStencilReadMask = 0xffffffff;
|
||||
GLuint mStencilReference = 0;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -30,190 +30,188 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
namespace {
|
||||
namespace {
|
||||
|
||||
GLenum GLShaderType(SingleShaderStage stage) {
|
||||
switch (stage) {
|
||||
case SingleShaderStage::Vertex:
|
||||
return GL_VERTEX_SHADER;
|
||||
case SingleShaderStage::Fragment:
|
||||
return GL_FRAGMENT_SHADER;
|
||||
case SingleShaderStage::Compute:
|
||||
return GL_COMPUTE_SHADER;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
PipelineGL::PipelineGL() : mProgram(0) {
|
||||
GLenum GLShaderType(SingleShaderStage stage) {
|
||||
switch (stage) {
|
||||
case SingleShaderStage::Vertex:
|
||||
return GL_VERTEX_SHADER;
|
||||
case SingleShaderStage::Fragment:
|
||||
return GL_FRAGMENT_SHADER;
|
||||
case SingleShaderStage::Compute:
|
||||
return GL_COMPUTE_SHADER;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
PipelineGL::~PipelineGL() = default;
|
||||
} // namespace
|
||||
|
||||
MaybeError PipelineGL::InitializeBase(const OpenGLFunctions& gl,
|
||||
const PipelineLayout* layout,
|
||||
const PerStage<ProgrammableStage>& stages) {
|
||||
auto CreateShader = [](const OpenGLFunctions& gl, GLenum type,
|
||||
const char* source) -> ResultOrError<GLuint> {
|
||||
GLuint shader = gl.CreateShader(type);
|
||||
gl.ShaderSource(shader, 1, &source, nullptr);
|
||||
gl.CompileShader(shader);
|
||||
PipelineGL::PipelineGL() : mProgram(0) {}
|
||||
|
||||
GLint compileStatus = GL_FALSE;
|
||||
gl.GetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
|
||||
if (compileStatus == GL_FALSE) {
|
||||
GLint infoLogLength = 0;
|
||||
gl.GetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLogLength);
|
||||
PipelineGL::~PipelineGL() = default;
|
||||
|
||||
if (infoLogLength > 1) {
|
||||
std::vector<char> buffer(infoLogLength);
|
||||
gl.GetShaderInfoLog(shader, infoLogLength, nullptr, &buffer[0]);
|
||||
return DAWN_FORMAT_VALIDATION_ERROR("%s\nProgram compilation failed:\n%s",
|
||||
source, buffer.data());
|
||||
}
|
||||
}
|
||||
return shader;
|
||||
};
|
||||
MaybeError PipelineGL::InitializeBase(const OpenGLFunctions& gl,
|
||||
const PipelineLayout* layout,
|
||||
const PerStage<ProgrammableStage>& stages) {
|
||||
auto CreateShader = [](const OpenGLFunctions& gl, GLenum type,
|
||||
const char* source) -> ResultOrError<GLuint> {
|
||||
GLuint shader = gl.CreateShader(type);
|
||||
gl.ShaderSource(shader, 1, &source, nullptr);
|
||||
gl.CompileShader(shader);
|
||||
|
||||
mProgram = gl.CreateProgram();
|
||||
|
||||
// Compute the set of active stages.
|
||||
wgpu::ShaderStage activeStages = wgpu::ShaderStage::None;
|
||||
for (SingleShaderStage stage : IterateStages(kAllStages)) {
|
||||
if (stages[stage].module != nullptr) {
|
||||
activeStages |= StageBit(stage);
|
||||
}
|
||||
}
|
||||
|
||||
// Create an OpenGL shader for each stage and gather the list of combined samplers.
|
||||
PerStage<CombinedSamplerInfo> combinedSamplers;
|
||||
bool needsPlaceholderSampler = false;
|
||||
std::vector<GLuint> glShaders;
|
||||
for (SingleShaderStage stage : IterateStages(activeStages)) {
|
||||
const ShaderModule* module = ToBackend(stages[stage].module.Get());
|
||||
std::string glsl;
|
||||
DAWN_TRY_ASSIGN(glsl, module->TranslateToGLSL(stages[stage].entryPoint.c_str(), stage,
|
||||
&combinedSamplers[stage], layout,
|
||||
&needsPlaceholderSampler));
|
||||
GLuint shader;
|
||||
DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str()));
|
||||
gl.AttachShader(mProgram, shader);
|
||||
glShaders.push_back(shader);
|
||||
}
|
||||
|
||||
if (needsPlaceholderSampler) {
|
||||
SamplerDescriptor desc = {};
|
||||
ASSERT(desc.minFilter == wgpu::FilterMode::Nearest);
|
||||
ASSERT(desc.magFilter == wgpu::FilterMode::Nearest);
|
||||
ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest);
|
||||
mPlaceholderSampler =
|
||||
ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess());
|
||||
}
|
||||
|
||||
// Link all the shaders together.
|
||||
gl.LinkProgram(mProgram);
|
||||
|
||||
GLint linkStatus = GL_FALSE;
|
||||
gl.GetProgramiv(mProgram, GL_LINK_STATUS, &linkStatus);
|
||||
if (linkStatus == GL_FALSE) {
|
||||
GLint compileStatus = GL_FALSE;
|
||||
gl.GetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
|
||||
if (compileStatus == GL_FALSE) {
|
||||
GLint infoLogLength = 0;
|
||||
gl.GetProgramiv(mProgram, GL_INFO_LOG_LENGTH, &infoLogLength);
|
||||
gl.GetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLogLength);
|
||||
|
||||
if (infoLogLength > 1) {
|
||||
std::vector<char> buffer(infoLogLength);
|
||||
gl.GetProgramInfoLog(mProgram, infoLogLength, nullptr, &buffer[0]);
|
||||
return DAWN_FORMAT_VALIDATION_ERROR("Program link failed:\n%s", buffer.data());
|
||||
gl.GetShaderInfoLog(shader, infoLogLength, nullptr, &buffer[0]);
|
||||
return DAWN_FORMAT_VALIDATION_ERROR("%s\nProgram compilation failed:\n%s", source,
|
||||
buffer.data());
|
||||
}
|
||||
}
|
||||
return shader;
|
||||
};
|
||||
|
||||
// Compute links between stages for combined samplers, then bind them to texture units
|
||||
gl.UseProgram(mProgram);
|
||||
const auto& indices = layout->GetBindingIndexInfo();
|
||||
mProgram = gl.CreateProgram();
|
||||
|
||||
std::set<CombinedSampler> combinedSamplersSet;
|
||||
for (SingleShaderStage stage : IterateStages(activeStages)) {
|
||||
for (const CombinedSampler& combined : combinedSamplers[stage]) {
|
||||
combinedSamplersSet.insert(combined);
|
||||
}
|
||||
// Compute the set of active stages.
|
||||
wgpu::ShaderStage activeStages = wgpu::ShaderStage::None;
|
||||
for (SingleShaderStage stage : IterateStages(kAllStages)) {
|
||||
if (stages[stage].module != nullptr) {
|
||||
activeStages |= StageBit(stage);
|
||||
}
|
||||
}
|
||||
|
||||
// Create an OpenGL shader for each stage and gather the list of combined samplers.
|
||||
PerStage<CombinedSamplerInfo> combinedSamplers;
|
||||
bool needsPlaceholderSampler = false;
|
||||
std::vector<GLuint> glShaders;
|
||||
for (SingleShaderStage stage : IterateStages(activeStages)) {
|
||||
const ShaderModule* module = ToBackend(stages[stage].module.Get());
|
||||
std::string glsl;
|
||||
DAWN_TRY_ASSIGN(glsl, module->TranslateToGLSL(stages[stage].entryPoint.c_str(), stage,
|
||||
&combinedSamplers[stage], layout,
|
||||
&needsPlaceholderSampler));
|
||||
GLuint shader;
|
||||
DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str()));
|
||||
gl.AttachShader(mProgram, shader);
|
||||
glShaders.push_back(shader);
|
||||
}
|
||||
|
||||
if (needsPlaceholderSampler) {
|
||||
SamplerDescriptor desc = {};
|
||||
ASSERT(desc.minFilter == wgpu::FilterMode::Nearest);
|
||||
ASSERT(desc.magFilter == wgpu::FilterMode::Nearest);
|
||||
ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest);
|
||||
mPlaceholderSampler =
|
||||
ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess());
|
||||
}
|
||||
|
||||
// Link all the shaders together.
|
||||
gl.LinkProgram(mProgram);
|
||||
|
||||
GLint linkStatus = GL_FALSE;
|
||||
gl.GetProgramiv(mProgram, GL_LINK_STATUS, &linkStatus);
|
||||
if (linkStatus == GL_FALSE) {
|
||||
GLint infoLogLength = 0;
|
||||
gl.GetProgramiv(mProgram, GL_INFO_LOG_LENGTH, &infoLogLength);
|
||||
|
||||
if (infoLogLength > 1) {
|
||||
std::vector<char> buffer(infoLogLength);
|
||||
gl.GetProgramInfoLog(mProgram, infoLogLength, nullptr, &buffer[0]);
|
||||
return DAWN_FORMAT_VALIDATION_ERROR("Program link failed:\n%s", buffer.data());
|
||||
}
|
||||
}
|
||||
|
||||
// Compute links between stages for combined samplers, then bind them to texture units
|
||||
gl.UseProgram(mProgram);
|
||||
const auto& indices = layout->GetBindingIndexInfo();
|
||||
|
||||
std::set<CombinedSampler> combinedSamplersSet;
|
||||
for (SingleShaderStage stage : IterateStages(activeStages)) {
|
||||
for (const CombinedSampler& combined : combinedSamplers[stage]) {
|
||||
combinedSamplersSet.insert(combined);
|
||||
}
|
||||
}
|
||||
|
||||
mUnitsForSamplers.resize(layout->GetNumSamplers());
|
||||
mUnitsForTextures.resize(layout->GetNumSampledTextures());
|
||||
|
||||
GLuint textureUnit = layout->GetTextureUnitsUsed();
|
||||
for (const auto& combined : combinedSamplersSet) {
|
||||
const std::string& name = combined.GetName();
|
||||
GLint location = gl.GetUniformLocation(mProgram, name.c_str());
|
||||
|
||||
if (location == -1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
mUnitsForSamplers.resize(layout->GetNumSamplers());
|
||||
mUnitsForTextures.resize(layout->GetNumSampledTextures());
|
||||
gl.Uniform1i(location, textureUnit);
|
||||
|
||||
GLuint textureUnit = layout->GetTextureUnitsUsed();
|
||||
for (const auto& combined : combinedSamplersSet) {
|
||||
const std::string& name = combined.GetName();
|
||||
GLint location = gl.GetUniformLocation(mProgram, name.c_str());
|
||||
bool shouldUseFiltering;
|
||||
{
|
||||
const BindGroupLayoutBase* bgl =
|
||||
layout->GetBindGroupLayout(combined.textureLocation.group);
|
||||
BindingIndex bindingIndex = bgl->GetBindingIndex(combined.textureLocation.binding);
|
||||
|
||||
if (location == -1) {
|
||||
continue;
|
||||
}
|
||||
GLuint textureIndex = indices[combined.textureLocation.group][bindingIndex];
|
||||
mUnitsForTextures[textureIndex].push_back(textureUnit);
|
||||
|
||||
gl.Uniform1i(location, textureUnit);
|
||||
|
||||
bool shouldUseFiltering;
|
||||
{
|
||||
shouldUseFiltering = bgl->GetBindingInfo(bindingIndex).texture.sampleType ==
|
||||
wgpu::TextureSampleType::Float;
|
||||
}
|
||||
{
|
||||
if (combined.usePlaceholderSampler) {
|
||||
mPlaceholderSamplerUnits.push_back(textureUnit);
|
||||
} else {
|
||||
const BindGroupLayoutBase* bgl =
|
||||
layout->GetBindGroupLayout(combined.textureLocation.group);
|
||||
BindingIndex bindingIndex = bgl->GetBindingIndex(combined.textureLocation.binding);
|
||||
layout->GetBindGroupLayout(combined.samplerLocation.group);
|
||||
BindingIndex bindingIndex = bgl->GetBindingIndex(combined.samplerLocation.binding);
|
||||
|
||||
GLuint textureIndex = indices[combined.textureLocation.group][bindingIndex];
|
||||
mUnitsForTextures[textureIndex].push_back(textureUnit);
|
||||
|
||||
shouldUseFiltering = bgl->GetBindingInfo(bindingIndex).texture.sampleType ==
|
||||
wgpu::TextureSampleType::Float;
|
||||
GLuint samplerIndex = indices[combined.samplerLocation.group][bindingIndex];
|
||||
mUnitsForSamplers[samplerIndex].push_back({textureUnit, shouldUseFiltering});
|
||||
}
|
||||
{
|
||||
if (combined.usePlaceholderSampler) {
|
||||
mPlaceholderSamplerUnits.push_back(textureUnit);
|
||||
} else {
|
||||
const BindGroupLayoutBase* bgl =
|
||||
layout->GetBindGroupLayout(combined.samplerLocation.group);
|
||||
BindingIndex bindingIndex =
|
||||
bgl->GetBindingIndex(combined.samplerLocation.binding);
|
||||
|
||||
GLuint samplerIndex = indices[combined.samplerLocation.group][bindingIndex];
|
||||
mUnitsForSamplers[samplerIndex].push_back({textureUnit, shouldUseFiltering});
|
||||
}
|
||||
}
|
||||
|
||||
textureUnit++;
|
||||
}
|
||||
|
||||
for (GLuint glShader : glShaders) {
|
||||
gl.DetachShader(mProgram, glShader);
|
||||
gl.DeleteShader(glShader);
|
||||
}
|
||||
|
||||
return {};
|
||||
textureUnit++;
|
||||
}
|
||||
|
||||
void PipelineGL::DeleteProgram(const OpenGLFunctions& gl) {
|
||||
gl.DeleteProgram(mProgram);
|
||||
for (GLuint glShader : glShaders) {
|
||||
gl.DetachShader(mProgram, glShader);
|
||||
gl.DeleteShader(glShader);
|
||||
}
|
||||
|
||||
const std::vector<PipelineGL::SamplerUnit>& PipelineGL::GetTextureUnitsForSampler(
|
||||
GLuint index) const {
|
||||
ASSERT(index < mUnitsForSamplers.size());
|
||||
return mUnitsForSamplers[index];
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
const std::vector<GLuint>& PipelineGL::GetTextureUnitsForTextureView(GLuint index) const {
|
||||
ASSERT(index < mUnitsForTextures.size());
|
||||
return mUnitsForTextures[index];
|
||||
}
|
||||
void PipelineGL::DeleteProgram(const OpenGLFunctions& gl) {
|
||||
gl.DeleteProgram(mProgram);
|
||||
}
|
||||
|
||||
GLuint PipelineGL::GetProgramHandle() const {
|
||||
return mProgram;
|
||||
}
|
||||
const std::vector<PipelineGL::SamplerUnit>& PipelineGL::GetTextureUnitsForSampler(
|
||||
GLuint index) const {
|
||||
ASSERT(index < mUnitsForSamplers.size());
|
||||
return mUnitsForSamplers[index];
|
||||
}
|
||||
|
||||
void PipelineGL::ApplyNow(const OpenGLFunctions& gl) {
|
||||
gl.UseProgram(mProgram);
|
||||
for (GLuint unit : mPlaceholderSamplerUnits) {
|
||||
ASSERT(mPlaceholderSampler.Get() != nullptr);
|
||||
gl.BindSampler(unit, mPlaceholderSampler->GetNonFilteringHandle());
|
||||
}
|
||||
const std::vector<GLuint>& PipelineGL::GetTextureUnitsForTextureView(GLuint index) const {
|
||||
ASSERT(index < mUnitsForTextures.size());
|
||||
return mUnitsForTextures[index];
|
||||
}
|
||||
|
||||
GLuint PipelineGL::GetProgramHandle() const {
|
||||
return mProgram;
|
||||
}
|
||||
|
||||
void PipelineGL::ApplyNow(const OpenGLFunctions& gl) {
|
||||
gl.UseProgram(mProgram);
|
||||
for (GLuint unit : mPlaceholderSamplerUnits) {
|
||||
ASSERT(mPlaceholderSampler.Get() != nullptr);
|
||||
gl.BindSampler(unit, mPlaceholderSampler->GetNonFilteringHandle());
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -23,46 +23,46 @@
|
||||
#include "dawn/native/opengl/opengl_platform.h"
|
||||
|
||||
namespace dawn::native {
|
||||
struct ProgrammableStage;
|
||||
struct ProgrammableStage;
|
||||
} // namespace dawn::native
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
struct OpenGLFunctions;
|
||||
class PipelineLayout;
|
||||
class Sampler;
|
||||
struct OpenGLFunctions;
|
||||
class PipelineLayout;
|
||||
class Sampler;
|
||||
|
||||
class PipelineGL {
|
||||
public:
|
||||
PipelineGL();
|
||||
~PipelineGL();
|
||||
class PipelineGL {
|
||||
public:
|
||||
PipelineGL();
|
||||
~PipelineGL();
|
||||
|
||||
// For each unit a sampler is bound to we need to know if we should use filtering or not
|
||||
// because int and uint texture are only complete without filtering.
|
||||
struct SamplerUnit {
|
||||
GLuint unit;
|
||||
bool shouldUseFiltering;
|
||||
};
|
||||
const std::vector<SamplerUnit>& GetTextureUnitsForSampler(GLuint index) const;
|
||||
const std::vector<GLuint>& GetTextureUnitsForTextureView(GLuint index) const;
|
||||
GLuint GetProgramHandle() const;
|
||||
|
||||
protected:
|
||||
void ApplyNow(const OpenGLFunctions& gl);
|
||||
MaybeError InitializeBase(const OpenGLFunctions& gl,
|
||||
const PipelineLayout* layout,
|
||||
const PerStage<ProgrammableStage>& stages);
|
||||
void DeleteProgram(const OpenGLFunctions& gl);
|
||||
|
||||
private:
|
||||
GLuint mProgram;
|
||||
std::vector<std::vector<SamplerUnit>> mUnitsForSamplers;
|
||||
std::vector<std::vector<GLuint>> mUnitsForTextures;
|
||||
std::vector<GLuint> mPlaceholderSamplerUnits;
|
||||
// TODO(enga): This could live on the Device, or elsewhere, but currently it makes Device
|
||||
// destruction complex as it requires the sampler to be destroyed before the sampler cache.
|
||||
Ref<Sampler> mPlaceholderSampler;
|
||||
// For each unit a sampler is bound to we need to know if we should use filtering or not
|
||||
// because int and uint texture are only complete without filtering.
|
||||
struct SamplerUnit {
|
||||
GLuint unit;
|
||||
bool shouldUseFiltering;
|
||||
};
|
||||
const std::vector<SamplerUnit>& GetTextureUnitsForSampler(GLuint index) const;
|
||||
const std::vector<GLuint>& GetTextureUnitsForTextureView(GLuint index) const;
|
||||
GLuint GetProgramHandle() const;
|
||||
|
||||
protected:
|
||||
void ApplyNow(const OpenGLFunctions& gl);
|
||||
MaybeError InitializeBase(const OpenGLFunctions& gl,
|
||||
const PipelineLayout* layout,
|
||||
const PerStage<ProgrammableStage>& stages);
|
||||
void DeleteProgram(const OpenGLFunctions& gl);
|
||||
|
||||
private:
|
||||
GLuint mProgram;
|
||||
std::vector<std::vector<SamplerUnit>> mUnitsForSamplers;
|
||||
std::vector<std::vector<GLuint>> mUnitsForTextures;
|
||||
std::vector<GLuint> mPlaceholderSamplerUnits;
|
||||
// TODO(enga): This could live on the Device, or elsewhere, but currently it makes Device
|
||||
// destruction complex as it requires the sampler to be destroyed before the sampler cache.
|
||||
Ref<Sampler> mPlaceholderSampler;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -20,76 +20,75 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
|
||||
: PipelineLayoutBase(device, descriptor) {
|
||||
GLuint uboIndex = 0;
|
||||
GLuint samplerIndex = 0;
|
||||
GLuint sampledTextureIndex = 0;
|
||||
GLuint ssboIndex = 0;
|
||||
GLuint storageTextureIndex = 0;
|
||||
PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
|
||||
: PipelineLayoutBase(device, descriptor) {
|
||||
GLuint uboIndex = 0;
|
||||
GLuint samplerIndex = 0;
|
||||
GLuint sampledTextureIndex = 0;
|
||||
GLuint ssboIndex = 0;
|
||||
GLuint storageTextureIndex = 0;
|
||||
|
||||
for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
|
||||
const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
|
||||
mIndexInfo[group].resize(bgl->GetBindingCount());
|
||||
for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
|
||||
const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
|
||||
mIndexInfo[group].resize(bgl->GetBindingCount());
|
||||
|
||||
for (BindingIndex bindingIndex{0}; bindingIndex < bgl->GetBindingCount();
|
||||
++bindingIndex) {
|
||||
const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
|
||||
switch (bindingInfo.bindingType) {
|
||||
case BindingInfoType::Buffer:
|
||||
switch (bindingInfo.buffer.type) {
|
||||
case wgpu::BufferBindingType::Uniform:
|
||||
mIndexInfo[group][bindingIndex] = uboIndex;
|
||||
uboIndex++;
|
||||
break;
|
||||
case wgpu::BufferBindingType::Storage:
|
||||
case kInternalStorageBufferBinding:
|
||||
case wgpu::BufferBindingType::ReadOnlyStorage:
|
||||
mIndexInfo[group][bindingIndex] = ssboIndex;
|
||||
ssboIndex++;
|
||||
break;
|
||||
case wgpu::BufferBindingType::Undefined:
|
||||
UNREACHABLE();
|
||||
}
|
||||
break;
|
||||
for (BindingIndex bindingIndex{0}; bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
|
||||
const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
|
||||
switch (bindingInfo.bindingType) {
|
||||
case BindingInfoType::Buffer:
|
||||
switch (bindingInfo.buffer.type) {
|
||||
case wgpu::BufferBindingType::Uniform:
|
||||
mIndexInfo[group][bindingIndex] = uboIndex;
|
||||
uboIndex++;
|
||||
break;
|
||||
case wgpu::BufferBindingType::Storage:
|
||||
case kInternalStorageBufferBinding:
|
||||
case wgpu::BufferBindingType::ReadOnlyStorage:
|
||||
mIndexInfo[group][bindingIndex] = ssboIndex;
|
||||
ssboIndex++;
|
||||
break;
|
||||
case wgpu::BufferBindingType::Undefined:
|
||||
UNREACHABLE();
|
||||
}
|
||||
break;
|
||||
|
||||
case BindingInfoType::Sampler:
|
||||
mIndexInfo[group][bindingIndex] = samplerIndex;
|
||||
samplerIndex++;
|
||||
break;
|
||||
case BindingInfoType::Sampler:
|
||||
mIndexInfo[group][bindingIndex] = samplerIndex;
|
||||
samplerIndex++;
|
||||
break;
|
||||
|
||||
case BindingInfoType::Texture:
|
||||
case BindingInfoType::ExternalTexture:
|
||||
mIndexInfo[group][bindingIndex] = sampledTextureIndex;
|
||||
sampledTextureIndex++;
|
||||
break;
|
||||
case BindingInfoType::Texture:
|
||||
case BindingInfoType::ExternalTexture:
|
||||
mIndexInfo[group][bindingIndex] = sampledTextureIndex;
|
||||
sampledTextureIndex++;
|
||||
break;
|
||||
|
||||
case BindingInfoType::StorageTexture:
|
||||
mIndexInfo[group][bindingIndex] = storageTextureIndex;
|
||||
storageTextureIndex++;
|
||||
break;
|
||||
}
|
||||
case BindingInfoType::StorageTexture:
|
||||
mIndexInfo[group][bindingIndex] = storageTextureIndex;
|
||||
storageTextureIndex++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mNumSamplers = samplerIndex;
|
||||
mNumSampledTextures = sampledTextureIndex;
|
||||
}
|
||||
|
||||
const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo() const {
|
||||
return mIndexInfo;
|
||||
}
|
||||
mNumSamplers = samplerIndex;
|
||||
mNumSampledTextures = sampledTextureIndex;
|
||||
}
|
||||
|
||||
GLuint PipelineLayout::GetTextureUnitsUsed() const {
|
||||
return 0;
|
||||
}
|
||||
const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo() const {
|
||||
return mIndexInfo;
|
||||
}
|
||||
|
||||
size_t PipelineLayout::GetNumSamplers() const {
|
||||
return mNumSamplers;
|
||||
}
|
||||
GLuint PipelineLayout::GetTextureUnitsUsed() const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t PipelineLayout::GetNumSampledTextures() const {
|
||||
return mNumSampledTextures;
|
||||
}
|
||||
size_t PipelineLayout::GetNumSamplers() const {
|
||||
return mNumSamplers;
|
||||
}
|
||||
|
||||
size_t PipelineLayout::GetNumSampledTextures() const {
|
||||
return mNumSampledTextures;
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -24,26 +24,26 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class Device;
|
||||
|
||||
class PipelineLayout final : public PipelineLayoutBase {
|
||||
public:
|
||||
PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
|
||||
class PipelineLayout final : public PipelineLayoutBase {
|
||||
public:
|
||||
PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
|
||||
|
||||
using BindingIndexInfo =
|
||||
ityp::array<BindGroupIndex, ityp::vector<BindingIndex, GLuint>, kMaxBindGroups>;
|
||||
const BindingIndexInfo& GetBindingIndexInfo() const;
|
||||
using BindingIndexInfo =
|
||||
ityp::array<BindGroupIndex, ityp::vector<BindingIndex, GLuint>, kMaxBindGroups>;
|
||||
const BindingIndexInfo& GetBindingIndexInfo() const;
|
||||
|
||||
GLuint GetTextureUnitsUsed() const;
|
||||
size_t GetNumSamplers() const;
|
||||
size_t GetNumSampledTextures() const;
|
||||
GLuint GetTextureUnitsUsed() const;
|
||||
size_t GetNumSamplers() const;
|
||||
size_t GetNumSampledTextures() const;
|
||||
|
||||
private:
|
||||
~PipelineLayout() override = default;
|
||||
BindingIndexInfo mIndexInfo;
|
||||
size_t mNumSamplers;
|
||||
size_t mNumSampledTextures;
|
||||
};
|
||||
private:
|
||||
~PipelineLayout() override = default;
|
||||
BindingIndexInfo mIndexInfo;
|
||||
size_t mNumSamplers;
|
||||
size_t mNumSampledTextures;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -18,10 +18,9 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
|
||||
: QuerySetBase(device, descriptor) {
|
||||
}
|
||||
QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
|
||||
: QuerySetBase(device, descriptor) {}
|
||||
|
||||
QuerySet::~QuerySet() = default;
|
||||
QuerySet::~QuerySet() = default;
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -19,15 +19,15 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class Device;
|
||||
|
||||
class QuerySet final : public QuerySetBase {
|
||||
public:
|
||||
QuerySet(Device* device, const QuerySetDescriptor* descriptor);
|
||||
class QuerySet final : public QuerySetBase {
|
||||
public:
|
||||
QuerySet(Device* device, const QuerySetDescriptor* descriptor);
|
||||
|
||||
private:
|
||||
~QuerySet() override;
|
||||
};
|
||||
private:
|
||||
~QuerySet() override;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -23,60 +23,56 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
Queue::Queue(Device* device, const QueueDescriptor* descriptor)
|
||||
: QueueBase(device, descriptor) {
|
||||
Queue::Queue(Device* device, const QueueDescriptor* descriptor) : QueueBase(device, descriptor) {}
|
||||
|
||||
MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
|
||||
Device* device = ToBackend(GetDevice());
|
||||
|
||||
TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
|
||||
for (uint32_t i = 0; i < commandCount; ++i) {
|
||||
DAWN_TRY(ToBackend(commands[i])->Execute());
|
||||
}
|
||||
TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
|
||||
|
||||
MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
|
||||
Device* device = ToBackend(GetDevice());
|
||||
device->SubmitFenceSync();
|
||||
return {};
|
||||
}
|
||||
|
||||
TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
|
||||
for (uint32_t i = 0; i < commandCount; ++i) {
|
||||
DAWN_TRY(ToBackend(commands[i])->Execute());
|
||||
}
|
||||
TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
|
||||
MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
|
||||
uint64_t bufferOffset,
|
||||
const void* data,
|
||||
size_t size) {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
|
||||
device->SubmitFenceSync();
|
||||
return {};
|
||||
}
|
||||
|
||||
MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
|
||||
uint64_t bufferOffset,
|
||||
const void* data,
|
||||
size_t size) {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
|
||||
ToBackend(buffer)->EnsureDataInitializedAsDestination(bufferOffset, size);
|
||||
|
||||
gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
|
||||
gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
|
||||
return {};
|
||||
}
|
||||
|
||||
MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination,
|
||||
const void* data,
|
||||
const TextureDataLayout& dataLayout,
|
||||
const Extent3D& writeSizePixel) {
|
||||
DAWN_INVALID_IF(destination.aspect == wgpu::TextureAspect::StencilOnly,
|
||||
"Writes to stencil textures unsupported on the OpenGL backend.");
|
||||
|
||||
TextureCopy textureCopy;
|
||||
textureCopy.texture = destination.texture;
|
||||
textureCopy.mipLevel = destination.mipLevel;
|
||||
textureCopy.origin = destination.origin;
|
||||
textureCopy.aspect =
|
||||
SelectFormatAspects(destination.texture->GetFormat(), destination.aspect);
|
||||
|
||||
SubresourceRange range = GetSubresourcesAffectedByCopy(textureCopy, writeSizePixel);
|
||||
if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel,
|
||||
destination.mipLevel)) {
|
||||
destination.texture->SetIsSubresourceContentInitialized(true, range);
|
||||
} else {
|
||||
ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
|
||||
}
|
||||
DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, data, dataLayout, writeSizePixel);
|
||||
ToBackend(destination.texture)->Touch();
|
||||
return {};
|
||||
ToBackend(buffer)->EnsureDataInitializedAsDestination(bufferOffset, size);
|
||||
|
||||
gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
|
||||
gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
|
||||
return {};
|
||||
}
|
||||
|
||||
MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination,
|
||||
const void* data,
|
||||
const TextureDataLayout& dataLayout,
|
||||
const Extent3D& writeSizePixel) {
|
||||
DAWN_INVALID_IF(destination.aspect == wgpu::TextureAspect::StencilOnly,
|
||||
"Writes to stencil textures unsupported on the OpenGL backend.");
|
||||
|
||||
TextureCopy textureCopy;
|
||||
textureCopy.texture = destination.texture;
|
||||
textureCopy.mipLevel = destination.mipLevel;
|
||||
textureCopy.origin = destination.origin;
|
||||
textureCopy.aspect = SelectFormatAspects(destination.texture->GetFormat(), destination.aspect);
|
||||
|
||||
SubresourceRange range = GetSubresourcesAffectedByCopy(textureCopy, writeSizePixel);
|
||||
if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel, destination.mipLevel)) {
|
||||
destination.texture->SetIsSubresourceContentInitialized(true, range);
|
||||
} else {
|
||||
ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
|
||||
}
|
||||
DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, data, dataLayout, writeSizePixel);
|
||||
ToBackend(destination.texture)->Touch();
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -19,23 +19,23 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class Device;
|
||||
|
||||
class Queue final : public QueueBase {
|
||||
public:
|
||||
Queue(Device* device, const QueueDescriptor* descriptor);
|
||||
class Queue final : public QueueBase {
|
||||
public:
|
||||
Queue(Device* device, const QueueDescriptor* descriptor);
|
||||
|
||||
private:
|
||||
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
||||
MaybeError WriteBufferImpl(BufferBase* buffer,
|
||||
uint64_t bufferOffset,
|
||||
const void* data,
|
||||
size_t size) override;
|
||||
MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
|
||||
const void* data,
|
||||
const TextureDataLayout& dataLayout,
|
||||
const Extent3D& writeSizePixel) override;
|
||||
};
|
||||
private:
|
||||
MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
|
||||
MaybeError WriteBufferImpl(BufferBase* buffer,
|
||||
uint64_t bufferOffset,
|
||||
const void* data,
|
||||
size_t size) override;
|
||||
MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
|
||||
const void* data,
|
||||
const TextureDataLayout& dataLayout,
|
||||
const Extent3D& writeSizePixel) override;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -21,325 +21,322 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
namespace {
|
||||
namespace {
|
||||
|
||||
GLenum GLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
|
||||
switch (primitiveTopology) {
|
||||
case wgpu::PrimitiveTopology::PointList:
|
||||
return GL_POINTS;
|
||||
case wgpu::PrimitiveTopology::LineList:
|
||||
return GL_LINES;
|
||||
case wgpu::PrimitiveTopology::LineStrip:
|
||||
return GL_LINE_STRIP;
|
||||
case wgpu::PrimitiveTopology::TriangleList:
|
||||
return GL_TRIANGLES;
|
||||
case wgpu::PrimitiveTopology::TriangleStrip:
|
||||
return GL_TRIANGLE_STRIP;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
GLenum GLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
|
||||
switch (primitiveTopology) {
|
||||
case wgpu::PrimitiveTopology::PointList:
|
||||
return GL_POINTS;
|
||||
case wgpu::PrimitiveTopology::LineList:
|
||||
return GL_LINES;
|
||||
case wgpu::PrimitiveTopology::LineStrip:
|
||||
return GL_LINE_STRIP;
|
||||
case wgpu::PrimitiveTopology::TriangleList:
|
||||
return GL_TRIANGLES;
|
||||
case wgpu::PrimitiveTopology::TriangleStrip:
|
||||
return GL_TRIANGLE_STRIP;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void ApplyFrontFaceAndCulling(const OpenGLFunctions& gl,
|
||||
wgpu::FrontFace face,
|
||||
wgpu::CullMode mode) {
|
||||
// Note that we invert winding direction in OpenGL. Because Y axis is up in OpenGL,
|
||||
// which is different from WebGPU and other backends (Y axis is down).
|
||||
GLenum direction = (face == wgpu::FrontFace::CCW) ? GL_CW : GL_CCW;
|
||||
gl.FrontFace(direction);
|
||||
void ApplyFrontFaceAndCulling(const OpenGLFunctions& gl,
|
||||
wgpu::FrontFace face,
|
||||
wgpu::CullMode mode) {
|
||||
// Note that we invert winding direction in OpenGL. Because Y axis is up in OpenGL,
|
||||
// which is different from WebGPU and other backends (Y axis is down).
|
||||
GLenum direction = (face == wgpu::FrontFace::CCW) ? GL_CW : GL_CCW;
|
||||
gl.FrontFace(direction);
|
||||
|
||||
if (mode == wgpu::CullMode::None) {
|
||||
gl.Disable(GL_CULL_FACE);
|
||||
} else {
|
||||
gl.Enable(GL_CULL_FACE);
|
||||
if (mode == wgpu::CullMode::None) {
|
||||
gl.Disable(GL_CULL_FACE);
|
||||
} else {
|
||||
gl.Enable(GL_CULL_FACE);
|
||||
|
||||
GLenum cullMode = (mode == wgpu::CullMode::Front) ? GL_FRONT : GL_BACK;
|
||||
gl.CullFace(cullMode);
|
||||
}
|
||||
}
|
||||
GLenum cullMode = (mode == wgpu::CullMode::Front) ? GL_FRONT : GL_BACK;
|
||||
gl.CullFace(cullMode);
|
||||
}
|
||||
}
|
||||
|
||||
GLenum GLBlendFactor(wgpu::BlendFactor factor, bool alpha) {
|
||||
switch (factor) {
|
||||
case wgpu::BlendFactor::Zero:
|
||||
return GL_ZERO;
|
||||
case wgpu::BlendFactor::One:
|
||||
return GL_ONE;
|
||||
case wgpu::BlendFactor::Src:
|
||||
return GL_SRC_COLOR;
|
||||
case wgpu::BlendFactor::OneMinusSrc:
|
||||
return GL_ONE_MINUS_SRC_COLOR;
|
||||
case wgpu::BlendFactor::SrcAlpha:
|
||||
return GL_SRC_ALPHA;
|
||||
case wgpu::BlendFactor::OneMinusSrcAlpha:
|
||||
return GL_ONE_MINUS_SRC_ALPHA;
|
||||
case wgpu::BlendFactor::Dst:
|
||||
return GL_DST_COLOR;
|
||||
case wgpu::BlendFactor::OneMinusDst:
|
||||
return GL_ONE_MINUS_DST_COLOR;
|
||||
case wgpu::BlendFactor::DstAlpha:
|
||||
return GL_DST_ALPHA;
|
||||
case wgpu::BlendFactor::OneMinusDstAlpha:
|
||||
return GL_ONE_MINUS_DST_ALPHA;
|
||||
case wgpu::BlendFactor::SrcAlphaSaturated:
|
||||
return GL_SRC_ALPHA_SATURATE;
|
||||
case wgpu::BlendFactor::Constant:
|
||||
return alpha ? GL_CONSTANT_ALPHA : GL_CONSTANT_COLOR;
|
||||
case wgpu::BlendFactor::OneMinusConstant:
|
||||
return alpha ? GL_ONE_MINUS_CONSTANT_ALPHA : GL_ONE_MINUS_CONSTANT_COLOR;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
GLenum GLBlendFactor(wgpu::BlendFactor factor, bool alpha) {
|
||||
switch (factor) {
|
||||
case wgpu::BlendFactor::Zero:
|
||||
return GL_ZERO;
|
||||
case wgpu::BlendFactor::One:
|
||||
return GL_ONE;
|
||||
case wgpu::BlendFactor::Src:
|
||||
return GL_SRC_COLOR;
|
||||
case wgpu::BlendFactor::OneMinusSrc:
|
||||
return GL_ONE_MINUS_SRC_COLOR;
|
||||
case wgpu::BlendFactor::SrcAlpha:
|
||||
return GL_SRC_ALPHA;
|
||||
case wgpu::BlendFactor::OneMinusSrcAlpha:
|
||||
return GL_ONE_MINUS_SRC_ALPHA;
|
||||
case wgpu::BlendFactor::Dst:
|
||||
return GL_DST_COLOR;
|
||||
case wgpu::BlendFactor::OneMinusDst:
|
||||
return GL_ONE_MINUS_DST_COLOR;
|
||||
case wgpu::BlendFactor::DstAlpha:
|
||||
return GL_DST_ALPHA;
|
||||
case wgpu::BlendFactor::OneMinusDstAlpha:
|
||||
return GL_ONE_MINUS_DST_ALPHA;
|
||||
case wgpu::BlendFactor::SrcAlphaSaturated:
|
||||
return GL_SRC_ALPHA_SATURATE;
|
||||
case wgpu::BlendFactor::Constant:
|
||||
return alpha ? GL_CONSTANT_ALPHA : GL_CONSTANT_COLOR;
|
||||
case wgpu::BlendFactor::OneMinusConstant:
|
||||
return alpha ? GL_ONE_MINUS_CONSTANT_ALPHA : GL_ONE_MINUS_CONSTANT_COLOR;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
GLenum GLBlendMode(wgpu::BlendOperation operation) {
|
||||
switch (operation) {
|
||||
case wgpu::BlendOperation::Add:
|
||||
return GL_FUNC_ADD;
|
||||
case wgpu::BlendOperation::Subtract:
|
||||
return GL_FUNC_SUBTRACT;
|
||||
case wgpu::BlendOperation::ReverseSubtract:
|
||||
return GL_FUNC_REVERSE_SUBTRACT;
|
||||
case wgpu::BlendOperation::Min:
|
||||
return GL_MIN;
|
||||
case wgpu::BlendOperation::Max:
|
||||
return GL_MAX;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
GLenum GLBlendMode(wgpu::BlendOperation operation) {
|
||||
switch (operation) {
|
||||
case wgpu::BlendOperation::Add:
|
||||
return GL_FUNC_ADD;
|
||||
case wgpu::BlendOperation::Subtract:
|
||||
return GL_FUNC_SUBTRACT;
|
||||
case wgpu::BlendOperation::ReverseSubtract:
|
||||
return GL_FUNC_REVERSE_SUBTRACT;
|
||||
case wgpu::BlendOperation::Min:
|
||||
return GL_MIN;
|
||||
case wgpu::BlendOperation::Max:
|
||||
return GL_MAX;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void ApplyColorState(const OpenGLFunctions& gl,
|
||||
ColorAttachmentIndex attachment,
|
||||
const ColorTargetState* state) {
|
||||
GLuint colorBuffer = static_cast<GLuint>(static_cast<uint8_t>(attachment));
|
||||
if (state->blend != nullptr) {
|
||||
gl.Enablei(GL_BLEND, colorBuffer);
|
||||
gl.BlendEquationSeparatei(colorBuffer, GLBlendMode(state->blend->color.operation),
|
||||
GLBlendMode(state->blend->alpha.operation));
|
||||
gl.BlendFuncSeparatei(colorBuffer,
|
||||
GLBlendFactor(state->blend->color.srcFactor, false),
|
||||
GLBlendFactor(state->blend->color.dstFactor, false),
|
||||
GLBlendFactor(state->blend->alpha.srcFactor, true),
|
||||
GLBlendFactor(state->blend->alpha.dstFactor, true));
|
||||
} else {
|
||||
gl.Disablei(GL_BLEND, colorBuffer);
|
||||
}
|
||||
gl.ColorMaski(colorBuffer, state->writeMask & wgpu::ColorWriteMask::Red,
|
||||
state->writeMask & wgpu::ColorWriteMask::Green,
|
||||
state->writeMask & wgpu::ColorWriteMask::Blue,
|
||||
state->writeMask & wgpu::ColorWriteMask::Alpha);
|
||||
}
|
||||
void ApplyColorState(const OpenGLFunctions& gl,
|
||||
ColorAttachmentIndex attachment,
|
||||
const ColorTargetState* state) {
|
||||
GLuint colorBuffer = static_cast<GLuint>(static_cast<uint8_t>(attachment));
|
||||
if (state->blend != nullptr) {
|
||||
gl.Enablei(GL_BLEND, colorBuffer);
|
||||
gl.BlendEquationSeparatei(colorBuffer, GLBlendMode(state->blend->color.operation),
|
||||
GLBlendMode(state->blend->alpha.operation));
|
||||
gl.BlendFuncSeparatei(colorBuffer, GLBlendFactor(state->blend->color.srcFactor, false),
|
||||
GLBlendFactor(state->blend->color.dstFactor, false),
|
||||
GLBlendFactor(state->blend->alpha.srcFactor, true),
|
||||
GLBlendFactor(state->blend->alpha.dstFactor, true));
|
||||
} else {
|
||||
gl.Disablei(GL_BLEND, colorBuffer);
|
||||
}
|
||||
gl.ColorMaski(colorBuffer, state->writeMask & wgpu::ColorWriteMask::Red,
|
||||
state->writeMask & wgpu::ColorWriteMask::Green,
|
||||
state->writeMask & wgpu::ColorWriteMask::Blue,
|
||||
state->writeMask & wgpu::ColorWriteMask::Alpha);
|
||||
}
|
||||
|
||||
void ApplyColorState(const OpenGLFunctions& gl, const ColorTargetState* state) {
|
||||
if (state->blend != nullptr) {
|
||||
gl.Enable(GL_BLEND);
|
||||
gl.BlendEquationSeparate(GLBlendMode(state->blend->color.operation),
|
||||
GLBlendMode(state->blend->alpha.operation));
|
||||
gl.BlendFuncSeparate(GLBlendFactor(state->blend->color.srcFactor, false),
|
||||
GLBlendFactor(state->blend->color.dstFactor, false),
|
||||
GLBlendFactor(state->blend->alpha.srcFactor, true),
|
||||
GLBlendFactor(state->blend->alpha.dstFactor, true));
|
||||
} else {
|
||||
gl.Disable(GL_BLEND);
|
||||
}
|
||||
gl.ColorMask(state->writeMask & wgpu::ColorWriteMask::Red,
|
||||
state->writeMask & wgpu::ColorWriteMask::Green,
|
||||
state->writeMask & wgpu::ColorWriteMask::Blue,
|
||||
state->writeMask & wgpu::ColorWriteMask::Alpha);
|
||||
}
|
||||
void ApplyColorState(const OpenGLFunctions& gl, const ColorTargetState* state) {
|
||||
if (state->blend != nullptr) {
|
||||
gl.Enable(GL_BLEND);
|
||||
gl.BlendEquationSeparate(GLBlendMode(state->blend->color.operation),
|
||||
GLBlendMode(state->blend->alpha.operation));
|
||||
gl.BlendFuncSeparate(GLBlendFactor(state->blend->color.srcFactor, false),
|
||||
GLBlendFactor(state->blend->color.dstFactor, false),
|
||||
GLBlendFactor(state->blend->alpha.srcFactor, true),
|
||||
GLBlendFactor(state->blend->alpha.dstFactor, true));
|
||||
} else {
|
||||
gl.Disable(GL_BLEND);
|
||||
}
|
||||
gl.ColorMask(state->writeMask & wgpu::ColorWriteMask::Red,
|
||||
state->writeMask & wgpu::ColorWriteMask::Green,
|
||||
state->writeMask & wgpu::ColorWriteMask::Blue,
|
||||
state->writeMask & wgpu::ColorWriteMask::Alpha);
|
||||
}
|
||||
|
||||
bool Equal(const BlendComponent& lhs, const BlendComponent& rhs) {
|
||||
return lhs.operation == rhs.operation && lhs.srcFactor == rhs.srcFactor &&
|
||||
lhs.dstFactor == rhs.dstFactor;
|
||||
}
|
||||
bool Equal(const BlendComponent& lhs, const BlendComponent& rhs) {
|
||||
return lhs.operation == rhs.operation && lhs.srcFactor == rhs.srcFactor &&
|
||||
lhs.dstFactor == rhs.dstFactor;
|
||||
}
|
||||
|
||||
GLuint OpenGLStencilOperation(wgpu::StencilOperation stencilOperation) {
|
||||
switch (stencilOperation) {
|
||||
case wgpu::StencilOperation::Keep:
|
||||
return GL_KEEP;
|
||||
case wgpu::StencilOperation::Zero:
|
||||
return GL_ZERO;
|
||||
case wgpu::StencilOperation::Replace:
|
||||
return GL_REPLACE;
|
||||
case wgpu::StencilOperation::Invert:
|
||||
return GL_INVERT;
|
||||
case wgpu::StencilOperation::IncrementClamp:
|
||||
return GL_INCR;
|
||||
case wgpu::StencilOperation::DecrementClamp:
|
||||
return GL_DECR;
|
||||
case wgpu::StencilOperation::IncrementWrap:
|
||||
return GL_INCR_WRAP;
|
||||
case wgpu::StencilOperation::DecrementWrap:
|
||||
return GL_DECR_WRAP;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
GLuint OpenGLStencilOperation(wgpu::StencilOperation stencilOperation) {
|
||||
switch (stencilOperation) {
|
||||
case wgpu::StencilOperation::Keep:
|
||||
return GL_KEEP;
|
||||
case wgpu::StencilOperation::Zero:
|
||||
return GL_ZERO;
|
||||
case wgpu::StencilOperation::Replace:
|
||||
return GL_REPLACE;
|
||||
case wgpu::StencilOperation::Invert:
|
||||
return GL_INVERT;
|
||||
case wgpu::StencilOperation::IncrementClamp:
|
||||
return GL_INCR;
|
||||
case wgpu::StencilOperation::DecrementClamp:
|
||||
return GL_DECR;
|
||||
case wgpu::StencilOperation::IncrementWrap:
|
||||
return GL_INCR_WRAP;
|
||||
case wgpu::StencilOperation::DecrementWrap:
|
||||
return GL_DECR_WRAP;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void ApplyDepthStencilState(const OpenGLFunctions& gl,
|
||||
const DepthStencilState* descriptor,
|
||||
PersistentPipelineState* persistentPipelineState) {
|
||||
// Depth writes only occur if depth is enabled
|
||||
if (descriptor->depthCompare == wgpu::CompareFunction::Always &&
|
||||
!descriptor->depthWriteEnabled) {
|
||||
gl.Disable(GL_DEPTH_TEST);
|
||||
} else {
|
||||
gl.Enable(GL_DEPTH_TEST);
|
||||
}
|
||||
|
||||
if (descriptor->depthWriteEnabled) {
|
||||
gl.DepthMask(GL_TRUE);
|
||||
} else {
|
||||
gl.DepthMask(GL_FALSE);
|
||||
}
|
||||
|
||||
gl.DepthFunc(ToOpenGLCompareFunction(descriptor->depthCompare));
|
||||
|
||||
if (StencilTestEnabled(descriptor)) {
|
||||
gl.Enable(GL_STENCIL_TEST);
|
||||
} else {
|
||||
gl.Disable(GL_STENCIL_TEST);
|
||||
}
|
||||
|
||||
GLenum backCompareFunction = ToOpenGLCompareFunction(descriptor->stencilBack.compare);
|
||||
GLenum frontCompareFunction = ToOpenGLCompareFunction(descriptor->stencilFront.compare);
|
||||
persistentPipelineState->SetStencilFuncsAndMask(
|
||||
gl, backCompareFunction, frontCompareFunction, descriptor->stencilReadMask);
|
||||
|
||||
gl.StencilOpSeparate(GL_BACK, OpenGLStencilOperation(descriptor->stencilBack.failOp),
|
||||
OpenGLStencilOperation(descriptor->stencilBack.depthFailOp),
|
||||
OpenGLStencilOperation(descriptor->stencilBack.passOp));
|
||||
gl.StencilOpSeparate(GL_FRONT, OpenGLStencilOperation(descriptor->stencilFront.failOp),
|
||||
OpenGLStencilOperation(descriptor->stencilFront.depthFailOp),
|
||||
OpenGLStencilOperation(descriptor->stencilFront.passOp));
|
||||
|
||||
gl.StencilMask(descriptor->stencilWriteMask);
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
// static
|
||||
Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
|
||||
Device* device,
|
||||
const RenderPipelineDescriptor* descriptor) {
|
||||
return AcquireRef(new RenderPipeline(device, descriptor));
|
||||
void ApplyDepthStencilState(const OpenGLFunctions& gl,
|
||||
const DepthStencilState* descriptor,
|
||||
PersistentPipelineState* persistentPipelineState) {
|
||||
// Depth writes only occur if depth is enabled
|
||||
if (descriptor->depthCompare == wgpu::CompareFunction::Always &&
|
||||
!descriptor->depthWriteEnabled) {
|
||||
gl.Disable(GL_DEPTH_TEST);
|
||||
} else {
|
||||
gl.Enable(GL_DEPTH_TEST);
|
||||
}
|
||||
|
||||
RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
|
||||
: RenderPipelineBase(device, descriptor),
|
||||
mVertexArrayObject(0),
|
||||
mGlPrimitiveTopology(GLPrimitiveTopology(GetPrimitiveTopology())) {
|
||||
if (descriptor->depthWriteEnabled) {
|
||||
gl.DepthMask(GL_TRUE);
|
||||
} else {
|
||||
gl.DepthMask(GL_FALSE);
|
||||
}
|
||||
|
||||
MaybeError RenderPipeline::Initialize() {
|
||||
DAWN_TRY(
|
||||
InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
|
||||
CreateVAOForVertexState();
|
||||
return {};
|
||||
gl.DepthFunc(ToOpenGLCompareFunction(descriptor->depthCompare));
|
||||
|
||||
if (StencilTestEnabled(descriptor)) {
|
||||
gl.Enable(GL_STENCIL_TEST);
|
||||
} else {
|
||||
gl.Disable(GL_STENCIL_TEST);
|
||||
}
|
||||
|
||||
RenderPipeline::~RenderPipeline() = default;
|
||||
GLenum backCompareFunction = ToOpenGLCompareFunction(descriptor->stencilBack.compare);
|
||||
GLenum frontCompareFunction = ToOpenGLCompareFunction(descriptor->stencilFront.compare);
|
||||
persistentPipelineState->SetStencilFuncsAndMask(gl, backCompareFunction, frontCompareFunction,
|
||||
descriptor->stencilReadMask);
|
||||
|
||||
void RenderPipeline::DestroyImpl() {
|
||||
RenderPipelineBase::DestroyImpl();
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
gl.DeleteVertexArrays(1, &mVertexArrayObject);
|
||||
gl.BindVertexArray(0);
|
||||
DeleteProgram(gl);
|
||||
}
|
||||
gl.StencilOpSeparate(GL_BACK, OpenGLStencilOperation(descriptor->stencilBack.failOp),
|
||||
OpenGLStencilOperation(descriptor->stencilBack.depthFailOp),
|
||||
OpenGLStencilOperation(descriptor->stencilBack.passOp));
|
||||
gl.StencilOpSeparate(GL_FRONT, OpenGLStencilOperation(descriptor->stencilFront.failOp),
|
||||
OpenGLStencilOperation(descriptor->stencilFront.depthFailOp),
|
||||
OpenGLStencilOperation(descriptor->stencilFront.passOp));
|
||||
|
||||
GLenum RenderPipeline::GetGLPrimitiveTopology() const {
|
||||
return mGlPrimitiveTopology;
|
||||
}
|
||||
gl.StencilMask(descriptor->stencilWriteMask);
|
||||
}
|
||||
|
||||
ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>
|
||||
RenderPipeline::GetAttributesUsingVertexBuffer(VertexBufferSlot slot) const {
|
||||
ASSERT(!IsError());
|
||||
return mAttributesUsingVertexBuffer[slot];
|
||||
}
|
||||
} // anonymous namespace
|
||||
|
||||
void RenderPipeline::CreateVAOForVertexState() {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
// static
|
||||
Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
|
||||
Device* device,
|
||||
const RenderPipelineDescriptor* descriptor) {
|
||||
return AcquireRef(new RenderPipeline(device, descriptor));
|
||||
}
|
||||
|
||||
gl.GenVertexArrays(1, &mVertexArrayObject);
|
||||
gl.BindVertexArray(mVertexArrayObject);
|
||||
RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
|
||||
: RenderPipelineBase(device, descriptor),
|
||||
mVertexArrayObject(0),
|
||||
mGlPrimitiveTopology(GLPrimitiveTopology(GetPrimitiveTopology())) {}
|
||||
|
||||
for (VertexAttributeLocation location : IterateBitSet(GetAttributeLocationsUsed())) {
|
||||
const auto& attribute = GetAttribute(location);
|
||||
GLuint glAttrib = static_cast<GLuint>(static_cast<uint8_t>(location));
|
||||
gl.EnableVertexAttribArray(glAttrib);
|
||||
MaybeError RenderPipeline::Initialize() {
|
||||
DAWN_TRY(InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
|
||||
CreateVAOForVertexState();
|
||||
return {};
|
||||
}
|
||||
|
||||
mAttributesUsingVertexBuffer[attribute.vertexBufferSlot][location] = true;
|
||||
const VertexBufferInfo& vertexBuffer = GetVertexBuffer(attribute.vertexBufferSlot);
|
||||
RenderPipeline::~RenderPipeline() = default;
|
||||
|
||||
if (vertexBuffer.arrayStride == 0) {
|
||||
// Emulate a stride of zero (constant vertex attribute) by
|
||||
// setting the attribute instance divisor to a huge number.
|
||||
gl.VertexAttribDivisor(glAttrib, 0xffffffff);
|
||||
} else {
|
||||
switch (vertexBuffer.stepMode) {
|
||||
case wgpu::VertexStepMode::Vertex:
|
||||
break;
|
||||
case wgpu::VertexStepMode::Instance:
|
||||
gl.VertexAttribDivisor(glAttrib, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
void RenderPipeline::DestroyImpl() {
|
||||
RenderPipelineBase::DestroyImpl();
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
gl.DeleteVertexArrays(1, &mVertexArrayObject);
|
||||
gl.BindVertexArray(0);
|
||||
DeleteProgram(gl);
|
||||
}
|
||||
|
||||
void RenderPipeline::ApplyNow(PersistentPipelineState& persistentPipelineState) {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
PipelineGL::ApplyNow(gl);
|
||||
GLenum RenderPipeline::GetGLPrimitiveTopology() const {
|
||||
return mGlPrimitiveTopology;
|
||||
}
|
||||
|
||||
ASSERT(mVertexArrayObject);
|
||||
gl.BindVertexArray(mVertexArrayObject);
|
||||
ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>
|
||||
RenderPipeline::GetAttributesUsingVertexBuffer(VertexBufferSlot slot) const {
|
||||
ASSERT(!IsError());
|
||||
return mAttributesUsingVertexBuffer[slot];
|
||||
}
|
||||
|
||||
ApplyFrontFaceAndCulling(gl, GetFrontFace(), GetCullMode());
|
||||
void RenderPipeline::CreateVAOForVertexState() {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
|
||||
ApplyDepthStencilState(gl, GetDepthStencilState(), &persistentPipelineState);
|
||||
gl.GenVertexArrays(1, &mVertexArrayObject);
|
||||
gl.BindVertexArray(mVertexArrayObject);
|
||||
|
||||
gl.SampleMaski(0, GetSampleMask());
|
||||
if (IsAlphaToCoverageEnabled()) {
|
||||
gl.Enable(GL_SAMPLE_ALPHA_TO_COVERAGE);
|
||||
for (VertexAttributeLocation location : IterateBitSet(GetAttributeLocationsUsed())) {
|
||||
const auto& attribute = GetAttribute(location);
|
||||
GLuint glAttrib = static_cast<GLuint>(static_cast<uint8_t>(location));
|
||||
gl.EnableVertexAttribArray(glAttrib);
|
||||
|
||||
mAttributesUsingVertexBuffer[attribute.vertexBufferSlot][location] = true;
|
||||
const VertexBufferInfo& vertexBuffer = GetVertexBuffer(attribute.vertexBufferSlot);
|
||||
|
||||
if (vertexBuffer.arrayStride == 0) {
|
||||
// Emulate a stride of zero (constant vertex attribute) by
|
||||
// setting the attribute instance divisor to a huge number.
|
||||
gl.VertexAttribDivisor(glAttrib, 0xffffffff);
|
||||
} else {
|
||||
gl.Disable(GL_SAMPLE_ALPHA_TO_COVERAGE);
|
||||
}
|
||||
|
||||
if (IsDepthBiasEnabled()) {
|
||||
gl.Enable(GL_POLYGON_OFFSET_FILL);
|
||||
float depthBias = GetDepthBias();
|
||||
float slopeScale = GetDepthBiasSlopeScale();
|
||||
if (gl.PolygonOffsetClamp != nullptr) {
|
||||
gl.PolygonOffsetClamp(slopeScale, depthBias, GetDepthBiasClamp());
|
||||
} else {
|
||||
gl.PolygonOffset(slopeScale, depthBias);
|
||||
switch (vertexBuffer.stepMode) {
|
||||
case wgpu::VertexStepMode::Vertex:
|
||||
break;
|
||||
case wgpu::VertexStepMode::Instance:
|
||||
gl.VertexAttribDivisor(glAttrib, 1);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
gl.Disable(GL_POLYGON_OFFSET_FILL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!GetDevice()->IsToggleEnabled(Toggle::DisableIndexedDrawBuffers)) {
|
||||
for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
|
||||
ApplyColorState(gl, attachmentSlot, GetColorTargetState(attachmentSlot));
|
||||
}
|
||||
void RenderPipeline::ApplyNow(PersistentPipelineState& persistentPipelineState) {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
PipelineGL::ApplyNow(gl);
|
||||
|
||||
ASSERT(mVertexArrayObject);
|
||||
gl.BindVertexArray(mVertexArrayObject);
|
||||
|
||||
ApplyFrontFaceAndCulling(gl, GetFrontFace(), GetCullMode());
|
||||
|
||||
ApplyDepthStencilState(gl, GetDepthStencilState(), &persistentPipelineState);
|
||||
|
||||
gl.SampleMaski(0, GetSampleMask());
|
||||
if (IsAlphaToCoverageEnabled()) {
|
||||
gl.Enable(GL_SAMPLE_ALPHA_TO_COVERAGE);
|
||||
} else {
|
||||
gl.Disable(GL_SAMPLE_ALPHA_TO_COVERAGE);
|
||||
}
|
||||
|
||||
if (IsDepthBiasEnabled()) {
|
||||
gl.Enable(GL_POLYGON_OFFSET_FILL);
|
||||
float depthBias = GetDepthBias();
|
||||
float slopeScale = GetDepthBiasSlopeScale();
|
||||
if (gl.PolygonOffsetClamp != nullptr) {
|
||||
gl.PolygonOffsetClamp(slopeScale, depthBias, GetDepthBiasClamp());
|
||||
} else {
|
||||
const ColorTargetState* prevDescriptor = nullptr;
|
||||
for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
|
||||
const ColorTargetState* descriptor = GetColorTargetState(attachmentSlot);
|
||||
if (!prevDescriptor) {
|
||||
ApplyColorState(gl, descriptor);
|
||||
prevDescriptor = descriptor;
|
||||
} else if ((descriptor->blend == nullptr) != (prevDescriptor->blend == nullptr)) {
|
||||
// TODO(crbug.com/dawn/582): GLES < 3.2 does not support different blend states
|
||||
// per color target. Add validation to prevent this as it is not.
|
||||
gl.PolygonOffset(slopeScale, depthBias);
|
||||
}
|
||||
} else {
|
||||
gl.Disable(GL_POLYGON_OFFSET_FILL);
|
||||
}
|
||||
|
||||
if (!GetDevice()->IsToggleEnabled(Toggle::DisableIndexedDrawBuffers)) {
|
||||
for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
|
||||
ApplyColorState(gl, attachmentSlot, GetColorTargetState(attachmentSlot));
|
||||
}
|
||||
} else {
|
||||
const ColorTargetState* prevDescriptor = nullptr;
|
||||
for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
|
||||
const ColorTargetState* descriptor = GetColorTargetState(attachmentSlot);
|
||||
if (!prevDescriptor) {
|
||||
ApplyColorState(gl, descriptor);
|
||||
prevDescriptor = descriptor;
|
||||
} else if ((descriptor->blend == nullptr) != (prevDescriptor->blend == nullptr)) {
|
||||
// TODO(crbug.com/dawn/582): GLES < 3.2 does not support different blend states
|
||||
// per color target. Add validation to prevent this as it is not.
|
||||
ASSERT(false);
|
||||
} else if (descriptor->blend != nullptr) {
|
||||
if (!Equal(descriptor->blend->alpha, prevDescriptor->blend->alpha) ||
|
||||
!Equal(descriptor->blend->color, prevDescriptor->blend->color) ||
|
||||
descriptor->writeMask != prevDescriptor->writeMask) {
|
||||
// TODO(crbug.com/dawn/582)
|
||||
ASSERT(false);
|
||||
} else if (descriptor->blend != nullptr) {
|
||||
if (!Equal(descriptor->blend->alpha, prevDescriptor->blend->alpha) ||
|
||||
!Equal(descriptor->blend->color, prevDescriptor->blend->color) ||
|
||||
descriptor->writeMask != prevDescriptor->writeMask) {
|
||||
// TODO(crbug.com/dawn/582)
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -24,38 +24,38 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class PersistentPipelineState;
|
||||
class Device;
|
||||
class PersistentPipelineState;
|
||||
|
||||
class RenderPipeline final : public RenderPipelineBase, public PipelineGL {
|
||||
public:
|
||||
static Ref<RenderPipeline> CreateUninitialized(Device* device,
|
||||
const RenderPipelineDescriptor* descriptor);
|
||||
class RenderPipeline final : public RenderPipelineBase, public PipelineGL {
|
||||
public:
|
||||
static Ref<RenderPipeline> CreateUninitialized(Device* device,
|
||||
const RenderPipelineDescriptor* descriptor);
|
||||
|
||||
GLenum GetGLPrimitiveTopology() const;
|
||||
ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> GetAttributesUsingVertexBuffer(
|
||||
VertexBufferSlot slot) const;
|
||||
GLenum GetGLPrimitiveTopology() const;
|
||||
ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> GetAttributesUsingVertexBuffer(
|
||||
VertexBufferSlot slot) const;
|
||||
|
||||
void ApplyNow(PersistentPipelineState& persistentPipelineState);
|
||||
void ApplyNow(PersistentPipelineState& persistentPipelineState);
|
||||
|
||||
MaybeError Initialize() override;
|
||||
MaybeError Initialize() override;
|
||||
|
||||
private:
|
||||
RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
|
||||
~RenderPipeline() override;
|
||||
void DestroyImpl() override;
|
||||
private:
|
||||
RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
|
||||
~RenderPipeline() override;
|
||||
void DestroyImpl() override;
|
||||
|
||||
void CreateVAOForVertexState();
|
||||
void CreateVAOForVertexState();
|
||||
|
||||
// TODO(yunchao.he@intel.com): vao need to be deduplicated between pipelines.
|
||||
GLuint mVertexArrayObject;
|
||||
GLenum mGlPrimitiveTopology;
|
||||
// TODO(yunchao.he@intel.com): vao need to be deduplicated between pipelines.
|
||||
GLuint mVertexArrayObject;
|
||||
GLenum mGlPrimitiveTopology;
|
||||
|
||||
ityp::array<VertexBufferSlot,
|
||||
ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>,
|
||||
kMaxVertexBuffers>
|
||||
mAttributesUsingVertexBuffer;
|
||||
};
|
||||
ityp::array<VertexBufferSlot,
|
||||
ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>,
|
||||
kMaxVertexBuffers>
|
||||
mAttributesUsingVertexBuffer;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -20,111 +20,109 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
namespace {
|
||||
GLenum MagFilterMode(wgpu::FilterMode filter) {
|
||||
switch (filter) {
|
||||
namespace {
|
||||
GLenum MagFilterMode(wgpu::FilterMode filter) {
|
||||
switch (filter) {
|
||||
case wgpu::FilterMode::Nearest:
|
||||
return GL_NEAREST;
|
||||
case wgpu::FilterMode::Linear:
|
||||
return GL_LINEAR;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
GLenum MinFilterMode(wgpu::FilterMode minFilter, wgpu::FilterMode mipMapFilter) {
|
||||
switch (minFilter) {
|
||||
case wgpu::FilterMode::Nearest:
|
||||
switch (mipMapFilter) {
|
||||
case wgpu::FilterMode::Nearest:
|
||||
return GL_NEAREST;
|
||||
return GL_NEAREST_MIPMAP_NEAREST;
|
||||
case wgpu::FilterMode::Linear:
|
||||
return GL_LINEAR;
|
||||
return GL_NEAREST_MIPMAP_LINEAR;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
GLenum MinFilterMode(wgpu::FilterMode minFilter, wgpu::FilterMode mipMapFilter) {
|
||||
switch (minFilter) {
|
||||
case wgpu::FilterMode::Linear:
|
||||
switch (mipMapFilter) {
|
||||
case wgpu::FilterMode::Nearest:
|
||||
switch (mipMapFilter) {
|
||||
case wgpu::FilterMode::Nearest:
|
||||
return GL_NEAREST_MIPMAP_NEAREST;
|
||||
case wgpu::FilterMode::Linear:
|
||||
return GL_NEAREST_MIPMAP_LINEAR;
|
||||
}
|
||||
return GL_LINEAR_MIPMAP_NEAREST;
|
||||
case wgpu::FilterMode::Linear:
|
||||
switch (mipMapFilter) {
|
||||
case wgpu::FilterMode::Nearest:
|
||||
return GL_LINEAR_MIPMAP_NEAREST;
|
||||
case wgpu::FilterMode::Linear:
|
||||
return GL_LINEAR_MIPMAP_LINEAR;
|
||||
}
|
||||
return GL_LINEAR_MIPMAP_LINEAR;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
GLenum WrapMode(wgpu::AddressMode mode) {
|
||||
switch (mode) {
|
||||
case wgpu::AddressMode::Repeat:
|
||||
return GL_REPEAT;
|
||||
case wgpu::AddressMode::MirrorRepeat:
|
||||
return GL_MIRRORED_REPEAT;
|
||||
case wgpu::AddressMode::ClampToEdge:
|
||||
return GL_CLAMP_TO_EDGE;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
GLenum WrapMode(wgpu::AddressMode mode) {
|
||||
switch (mode) {
|
||||
case wgpu::AddressMode::Repeat:
|
||||
return GL_REPEAT;
|
||||
case wgpu::AddressMode::MirrorRepeat:
|
||||
return GL_MIRRORED_REPEAT;
|
||||
case wgpu::AddressMode::ClampToEdge:
|
||||
return GL_CLAMP_TO_EDGE;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace
|
||||
|
||||
Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
|
||||
: SamplerBase(device, descriptor) {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
|
||||
: SamplerBase(device, descriptor) {
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
|
||||
gl.GenSamplers(1, &mFilteringHandle);
|
||||
SetupGLSampler(mFilteringHandle, descriptor, false);
|
||||
gl.GenSamplers(1, &mFilteringHandle);
|
||||
SetupGLSampler(mFilteringHandle, descriptor, false);
|
||||
|
||||
gl.GenSamplers(1, &mNonFilteringHandle);
|
||||
SetupGLSampler(mNonFilteringHandle, descriptor, true);
|
||||
gl.GenSamplers(1, &mNonFilteringHandle);
|
||||
SetupGLSampler(mNonFilteringHandle, descriptor, true);
|
||||
}
|
||||
|
||||
Sampler::~Sampler() = default;
|
||||
|
||||
void Sampler::DestroyImpl() {
|
||||
SamplerBase::DestroyImpl();
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
gl.DeleteSamplers(1, &mFilteringHandle);
|
||||
gl.DeleteSamplers(1, &mNonFilteringHandle);
|
||||
}
|
||||
|
||||
void Sampler::SetupGLSampler(GLuint sampler,
|
||||
const SamplerDescriptor* descriptor,
|
||||
bool forceNearest) {
|
||||
Device* device = ToBackend(GetDevice());
|
||||
const OpenGLFunctions& gl = device->gl;
|
||||
|
||||
if (forceNearest) {
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
|
||||
} else {
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, MagFilterMode(descriptor->magFilter));
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER,
|
||||
MinFilterMode(descriptor->minFilter, descriptor->mipmapFilter));
|
||||
}
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_R, WrapMode(descriptor->addressModeW));
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_S, WrapMode(descriptor->addressModeU));
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_T, WrapMode(descriptor->addressModeV));
|
||||
|
||||
gl.SamplerParameterf(sampler, GL_TEXTURE_MIN_LOD, descriptor->lodMinClamp);
|
||||
gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_LOD, descriptor->lodMaxClamp);
|
||||
|
||||
if (descriptor->compare != wgpu::CompareFunction::Undefined) {
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_FUNC,
|
||||
ToOpenGLCompareFunction(descriptor->compare));
|
||||
}
|
||||
|
||||
Sampler::~Sampler() = default;
|
||||
|
||||
void Sampler::DestroyImpl() {
|
||||
SamplerBase::DestroyImpl();
|
||||
const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
|
||||
gl.DeleteSamplers(1, &mFilteringHandle);
|
||||
gl.DeleteSamplers(1, &mNonFilteringHandle);
|
||||
if (gl.IsAtLeastGL(4, 6) || gl.IsGLExtensionSupported("GL_EXT_texture_filter_anisotropic")) {
|
||||
gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_ANISOTROPY, GetMaxAnisotropy());
|
||||
}
|
||||
}
|
||||
|
||||
void Sampler::SetupGLSampler(GLuint sampler,
|
||||
const SamplerDescriptor* descriptor,
|
||||
bool forceNearest) {
|
||||
Device* device = ToBackend(GetDevice());
|
||||
const OpenGLFunctions& gl = device->gl;
|
||||
GLuint Sampler::GetFilteringHandle() const {
|
||||
return mFilteringHandle;
|
||||
}
|
||||
|
||||
if (forceNearest) {
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
|
||||
} else {
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER,
|
||||
MagFilterMode(descriptor->magFilter));
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER,
|
||||
MinFilterMode(descriptor->minFilter, descriptor->mipmapFilter));
|
||||
}
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_R, WrapMode(descriptor->addressModeW));
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_S, WrapMode(descriptor->addressModeU));
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_T, WrapMode(descriptor->addressModeV));
|
||||
|
||||
gl.SamplerParameterf(sampler, GL_TEXTURE_MIN_LOD, descriptor->lodMinClamp);
|
||||
gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_LOD, descriptor->lodMaxClamp);
|
||||
|
||||
if (descriptor->compare != wgpu::CompareFunction::Undefined) {
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
|
||||
gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_FUNC,
|
||||
ToOpenGLCompareFunction(descriptor->compare));
|
||||
}
|
||||
|
||||
if (gl.IsAtLeastGL(4, 6) ||
|
||||
gl.IsGLExtensionSupported("GL_EXT_texture_filter_anisotropic")) {
|
||||
gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_ANISOTROPY, GetMaxAnisotropy());
|
||||
}
|
||||
}
|
||||
|
||||
GLuint Sampler::GetFilteringHandle() const {
|
||||
return mFilteringHandle;
|
||||
}
|
||||
|
||||
GLuint Sampler::GetNonFilteringHandle() const {
|
||||
return mNonFilteringHandle;
|
||||
}
|
||||
GLuint Sampler::GetNonFilteringHandle() const {
|
||||
return mNonFilteringHandle;
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -21,27 +21,27 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class Device;
|
||||
|
||||
class Sampler final : public SamplerBase {
|
||||
public:
|
||||
Sampler(Device* device, const SamplerDescriptor* descriptor);
|
||||
class Sampler final : public SamplerBase {
|
||||
public:
|
||||
Sampler(Device* device, const SamplerDescriptor* descriptor);
|
||||
|
||||
GLuint GetFilteringHandle() const;
|
||||
GLuint GetNonFilteringHandle() const;
|
||||
GLuint GetFilteringHandle() const;
|
||||
GLuint GetNonFilteringHandle() const;
|
||||
|
||||
private:
|
||||
~Sampler() override;
|
||||
void DestroyImpl() override;
|
||||
private:
|
||||
~Sampler() override;
|
||||
void DestroyImpl() override;
|
||||
|
||||
void SetupGLSampler(GLuint sampler, const SamplerDescriptor* descriptor, bool forceNearest);
|
||||
void SetupGLSampler(GLuint sampler, const SamplerDescriptor* descriptor, bool forceNearest);
|
||||
|
||||
GLuint mFilteringHandle;
|
||||
GLuint mFilteringHandle;
|
||||
|
||||
// This is a sampler equivalent to mFilteringHandle except that it uses NEAREST filtering
|
||||
// for everything, which is important to preserve texture completeness for u/int textures.
|
||||
GLuint mNonFilteringHandle;
|
||||
};
|
||||
// This is a sampler equivalent to mFilteringHandle except that it uses NEAREST filtering
|
||||
// for everything, which is important to preserve texture completeness for u/int textures.
|
||||
GLuint mNonFilteringHandle;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -28,151 +28,149 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber) {
|
||||
std::ostringstream o;
|
||||
o << "dawn_binding_" << static_cast<uint32_t>(group) << "_"
|
||||
<< static_cast<uint32_t>(bindingNumber);
|
||||
return o.str();
|
||||
}
|
||||
std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber) {
|
||||
std::ostringstream o;
|
||||
o << "dawn_binding_" << static_cast<uint32_t>(group) << "_"
|
||||
<< static_cast<uint32_t>(bindingNumber);
|
||||
return o.str();
|
||||
}
|
||||
|
||||
bool operator<(const BindingLocation& a, const BindingLocation& b) {
|
||||
return std::tie(a.group, a.binding) < std::tie(b.group, b.binding);
|
||||
}
|
||||
bool operator<(const BindingLocation& a, const BindingLocation& b) {
|
||||
return std::tie(a.group, a.binding) < std::tie(b.group, b.binding);
|
||||
}
|
||||
|
||||
bool operator<(const CombinedSampler& a, const CombinedSampler& b) {
|
||||
return std::tie(a.usePlaceholderSampler, a.samplerLocation, a.textureLocation) <
|
||||
std::tie(b.usePlaceholderSampler, a.samplerLocation, b.textureLocation);
|
||||
}
|
||||
bool operator<(const CombinedSampler& a, const CombinedSampler& b) {
|
||||
return std::tie(a.usePlaceholderSampler, a.samplerLocation, a.textureLocation) <
|
||||
std::tie(b.usePlaceholderSampler, a.samplerLocation, b.textureLocation);
|
||||
}
|
||||
|
||||
std::string CombinedSampler::GetName() const {
|
||||
std::ostringstream o;
|
||||
o << "dawn_combined";
|
||||
if (usePlaceholderSampler) {
|
||||
o << "_placeholder_sampler";
|
||||
std::string CombinedSampler::GetName() const {
|
||||
std::ostringstream o;
|
||||
o << "dawn_combined";
|
||||
if (usePlaceholderSampler) {
|
||||
o << "_placeholder_sampler";
|
||||
} else {
|
||||
o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_"
|
||||
<< static_cast<uint32_t>(samplerLocation.binding);
|
||||
}
|
||||
o << "_with_" << static_cast<uint32_t>(textureLocation.group) << "_"
|
||||
<< static_cast<uint32_t>(textureLocation.binding);
|
||||
return o.str();
|
||||
}
|
||||
|
||||
// static
|
||||
ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
|
||||
const ShaderModuleDescriptor* descriptor,
|
||||
ShaderModuleParseResult* parseResult) {
|
||||
Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
|
||||
DAWN_TRY(module->Initialize(parseResult));
|
||||
return module;
|
||||
}
|
||||
|
||||
ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
|
||||
: ShaderModuleBase(device, descriptor) {}
|
||||
|
||||
MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
|
||||
ScopedTintICEHandler scopedICEHandler(GetDevice());
|
||||
|
||||
DAWN_TRY(InitializeBase(parseResult));
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
ResultOrError<std::string> ShaderModule::TranslateToGLSL(const char* entryPointName,
|
||||
SingleShaderStage stage,
|
||||
CombinedSamplerInfo* combinedSamplers,
|
||||
const PipelineLayout* layout,
|
||||
bool* needsPlaceholderSampler) const {
|
||||
TRACE_EVENT0(GetDevice()->GetPlatform(), General, "TranslateToGLSL");
|
||||
tint::transform::Manager transformManager;
|
||||
tint::transform::DataMap transformInputs;
|
||||
|
||||
AddExternalTextureTransform(layout, &transformManager, &transformInputs);
|
||||
|
||||
tint::Program program;
|
||||
DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
|
||||
nullptr, nullptr));
|
||||
const OpenGLVersion& version = ToBackend(GetDevice())->gl.GetVersion();
|
||||
|
||||
tint::writer::glsl::Options tintOptions;
|
||||
using Version = tint::writer::glsl::Version;
|
||||
tintOptions.version =
|
||||
Version(version.IsDesktop() ? Version::Standard::kDesktop : Version::Standard::kES,
|
||||
version.GetMajor(), version.GetMinor());
|
||||
|
||||
using tint::transform::BindingPoint;
|
||||
// When textures are accessed without a sampler (e.g., textureLoad()),
|
||||
// GetSamplerTextureUses() will return this sentinel value.
|
||||
BindingPoint placeholderBindingPoint{static_cast<uint32_t>(kMaxBindGroupsTyped), 0};
|
||||
|
||||
tint::inspector::Inspector inspector(&program);
|
||||
// Find all the sampler/texture pairs for this entry point, and create
|
||||
// CombinedSamplers for them. CombinedSampler records the binding points
|
||||
// of the original texture and sampler, and generates a unique name. The
|
||||
// corresponding uniforms will be retrieved by these generated names
|
||||
// in PipelineGL. Any texture-only references will have
|
||||
// "usePlaceholderSampler" set to true, and only the texture binding point
|
||||
// will be used in naming them. In addition, Dawn will bind a
|
||||
// non-filtering sampler for them (see PipelineGL).
|
||||
auto uses = inspector.GetSamplerTextureUses(entryPointName, placeholderBindingPoint);
|
||||
for (const auto& use : uses) {
|
||||
combinedSamplers->emplace_back();
|
||||
|
||||
CombinedSampler* info = &combinedSamplers->back();
|
||||
if (use.sampler_binding_point == placeholderBindingPoint) {
|
||||
info->usePlaceholderSampler = true;
|
||||
*needsPlaceholderSampler = true;
|
||||
} else {
|
||||
o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_"
|
||||
<< static_cast<uint32_t>(samplerLocation.binding);
|
||||
info->usePlaceholderSampler = false;
|
||||
}
|
||||
o << "_with_" << static_cast<uint32_t>(textureLocation.group) << "_"
|
||||
<< static_cast<uint32_t>(textureLocation.binding);
|
||||
return o.str();
|
||||
info->samplerLocation.group = BindGroupIndex(use.sampler_binding_point.group);
|
||||
info->samplerLocation.binding = BindingNumber(use.sampler_binding_point.binding);
|
||||
info->textureLocation.group = BindGroupIndex(use.texture_binding_point.group);
|
||||
info->textureLocation.binding = BindingNumber(use.texture_binding_point.binding);
|
||||
tintOptions.binding_map[use] = info->GetName();
|
||||
}
|
||||
if (*needsPlaceholderSampler) {
|
||||
tintOptions.placeholder_binding_point = placeholderBindingPoint;
|
||||
}
|
||||
|
||||
// static
|
||||
ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
|
||||
const ShaderModuleDescriptor* descriptor,
|
||||
ShaderModuleParseResult* parseResult) {
|
||||
Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
|
||||
DAWN_TRY(module->Initialize(parseResult));
|
||||
return module;
|
||||
}
|
||||
|
||||
ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
|
||||
: ShaderModuleBase(device, descriptor) {
|
||||
}
|
||||
|
||||
MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
|
||||
ScopedTintICEHandler scopedICEHandler(GetDevice());
|
||||
|
||||
DAWN_TRY(InitializeBase(parseResult));
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
ResultOrError<std::string> ShaderModule::TranslateToGLSL(const char* entryPointName,
|
||||
SingleShaderStage stage,
|
||||
CombinedSamplerInfo* combinedSamplers,
|
||||
const PipelineLayout* layout,
|
||||
bool* needsPlaceholderSampler) const {
|
||||
TRACE_EVENT0(GetDevice()->GetPlatform(), General, "TranslateToGLSL");
|
||||
tint::transform::Manager transformManager;
|
||||
tint::transform::DataMap transformInputs;
|
||||
|
||||
AddExternalTextureTransform(layout, &transformManager, &transformInputs);
|
||||
|
||||
tint::Program program;
|
||||
DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
|
||||
nullptr, nullptr));
|
||||
const OpenGLVersion& version = ToBackend(GetDevice())->gl.GetVersion();
|
||||
|
||||
tint::writer::glsl::Options tintOptions;
|
||||
using Version = tint::writer::glsl::Version;
|
||||
tintOptions.version =
|
||||
Version(version.IsDesktop() ? Version::Standard::kDesktop : Version::Standard::kES,
|
||||
version.GetMajor(), version.GetMinor());
|
||||
|
||||
using tint::transform::BindingPoint;
|
||||
// When textures are accessed without a sampler (e.g., textureLoad()),
|
||||
// GetSamplerTextureUses() will return this sentinel value.
|
||||
BindingPoint placeholderBindingPoint{static_cast<uint32_t>(kMaxBindGroupsTyped), 0};
|
||||
|
||||
tint::inspector::Inspector inspector(&program);
|
||||
// Find all the sampler/texture pairs for this entry point, and create
|
||||
// CombinedSamplers for them. CombinedSampler records the binding points
|
||||
// of the original texture and sampler, and generates a unique name. The
|
||||
// corresponding uniforms will be retrieved by these generated names
|
||||
// in PipelineGL. Any texture-only references will have
|
||||
// "usePlaceholderSampler" set to true, and only the texture binding point
|
||||
// will be used in naming them. In addition, Dawn will bind a
|
||||
// non-filtering sampler for them (see PipelineGL).
|
||||
auto uses = inspector.GetSamplerTextureUses(entryPointName, placeholderBindingPoint);
|
||||
for (const auto& use : uses) {
|
||||
combinedSamplers->emplace_back();
|
||||
|
||||
CombinedSampler* info = &combinedSamplers->back();
|
||||
if (use.sampler_binding_point == placeholderBindingPoint) {
|
||||
info->usePlaceholderSampler = true;
|
||||
*needsPlaceholderSampler = true;
|
||||
} else {
|
||||
info->usePlaceholderSampler = false;
|
||||
// Since (non-Vulkan) GLSL does not support descriptor sets, generate a
|
||||
// mapping from the original group/binding pair to a binding-only
|
||||
// value. This mapping will be used by Tint to remap all global
|
||||
// variables to the 1D space.
|
||||
for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
|
||||
const BindGroupLayoutBase::BindingMap& bindingMap =
|
||||
layout->GetBindGroupLayout(group)->GetBindingMap();
|
||||
for (const auto& it : bindingMap) {
|
||||
BindingNumber bindingNumber = it.first;
|
||||
BindingIndex bindingIndex = it.second;
|
||||
const BindingInfo& bindingInfo =
|
||||
layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
|
||||
if (!(bindingInfo.visibility & StageBit(stage))) {
|
||||
continue;
|
||||
}
|
||||
info->samplerLocation.group = BindGroupIndex(use.sampler_binding_point.group);
|
||||
info->samplerLocation.binding = BindingNumber(use.sampler_binding_point.binding);
|
||||
info->textureLocation.group = BindGroupIndex(use.texture_binding_point.group);
|
||||
info->textureLocation.binding = BindingNumber(use.texture_binding_point.binding);
|
||||
tintOptions.binding_map[use] = info->GetName();
|
||||
|
||||
uint32_t shaderIndex = layout->GetBindingIndexInfo()[group][bindingIndex];
|
||||
BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
|
||||
static_cast<uint32_t>(bindingNumber)};
|
||||
BindingPoint dstBindingPoint{0, shaderIndex};
|
||||
tintOptions.binding_points.emplace(srcBindingPoint, dstBindingPoint);
|
||||
}
|
||||
if (*needsPlaceholderSampler) {
|
||||
tintOptions.placeholder_binding_point = placeholderBindingPoint;
|
||||
}
|
||||
|
||||
// Since (non-Vulkan) GLSL does not support descriptor sets, generate a
|
||||
// mapping from the original group/binding pair to a binding-only
|
||||
// value. This mapping will be used by Tint to remap all global
|
||||
// variables to the 1D space.
|
||||
for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
|
||||
const BindGroupLayoutBase::BindingMap& bindingMap =
|
||||
layout->GetBindGroupLayout(group)->GetBindingMap();
|
||||
for (const auto& it : bindingMap) {
|
||||
BindingNumber bindingNumber = it.first;
|
||||
BindingIndex bindingIndex = it.second;
|
||||
const BindingInfo& bindingInfo =
|
||||
layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
|
||||
if (!(bindingInfo.visibility & StageBit(stage))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
uint32_t shaderIndex = layout->GetBindingIndexInfo()[group][bindingIndex];
|
||||
BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
|
||||
static_cast<uint32_t>(bindingNumber)};
|
||||
BindingPoint dstBindingPoint{0, shaderIndex};
|
||||
tintOptions.binding_points.emplace(srcBindingPoint, dstBindingPoint);
|
||||
}
|
||||
tintOptions.allow_collisions = true;
|
||||
}
|
||||
auto result = tint::writer::glsl::Generate(&program, tintOptions, entryPointName);
|
||||
DAWN_INVALID_IF(!result.success, "An error occured while generating GLSL: %s.",
|
||||
result.error);
|
||||
std::string glsl = std::move(result.glsl);
|
||||
|
||||
if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
|
||||
std::ostringstream dumpedMsg;
|
||||
dumpedMsg << "/* Dumped generated GLSL */" << std::endl << glsl;
|
||||
|
||||
GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
|
||||
}
|
||||
|
||||
return glsl;
|
||||
tintOptions.allow_collisions = true;
|
||||
}
|
||||
auto result = tint::writer::glsl::Generate(&program, tintOptions, entryPointName);
|
||||
DAWN_INVALID_IF(!result.success, "An error occured while generating GLSL: %s.", result.error);
|
||||
std::string glsl = std::move(result.glsl);
|
||||
|
||||
if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
|
||||
std::ostringstream dumpedMsg;
|
||||
dumpedMsg << "/* Dumped generated GLSL */" << std::endl << glsl;
|
||||
|
||||
GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
|
||||
}
|
||||
|
||||
return glsl;
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -26,50 +26,49 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class PipelineLayout;
|
||||
class Device;
|
||||
class PipelineLayout;
|
||||
|
||||
std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber);
|
||||
std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber);
|
||||
|
||||
struct BindingLocation {
|
||||
BindGroupIndex group;
|
||||
BindingNumber binding;
|
||||
};
|
||||
bool operator<(const BindingLocation& a, const BindingLocation& b);
|
||||
struct BindingLocation {
|
||||
BindGroupIndex group;
|
||||
BindingNumber binding;
|
||||
};
|
||||
bool operator<(const BindingLocation& a, const BindingLocation& b);
|
||||
|
||||
struct CombinedSampler {
|
||||
BindingLocation samplerLocation;
|
||||
BindingLocation textureLocation;
|
||||
// OpenGL requires a sampler with texelFetch. If this is true, the developer did not provide
|
||||
// one and Dawn should bind a placeholder non-filtering sampler. |samplerLocation| is
|
||||
// unused.
|
||||
bool usePlaceholderSampler;
|
||||
std::string GetName() const;
|
||||
};
|
||||
bool operator<(const CombinedSampler& a, const CombinedSampler& b);
|
||||
struct CombinedSampler {
|
||||
BindingLocation samplerLocation;
|
||||
BindingLocation textureLocation;
|
||||
// OpenGL requires a sampler with texelFetch. If this is true, the developer did not provide
|
||||
// one and Dawn should bind a placeholder non-filtering sampler. |samplerLocation| is
|
||||
// unused.
|
||||
bool usePlaceholderSampler;
|
||||
std::string GetName() const;
|
||||
};
|
||||
bool operator<(const CombinedSampler& a, const CombinedSampler& b);
|
||||
|
||||
using CombinedSamplerInfo = std::vector<CombinedSampler>;
|
||||
using CombinedSamplerInfo = std::vector<CombinedSampler>;
|
||||
|
||||
using BindingInfoArrayTable =
|
||||
std::unordered_map<std::string, std::unique_ptr<BindingInfoArray>>;
|
||||
using BindingInfoArrayTable = std::unordered_map<std::string, std::unique_ptr<BindingInfoArray>>;
|
||||
|
||||
class ShaderModule final : public ShaderModuleBase {
|
||||
public:
|
||||
static ResultOrError<Ref<ShaderModule>> Create(Device* device,
|
||||
const ShaderModuleDescriptor* descriptor,
|
||||
ShaderModuleParseResult* parseResult);
|
||||
class ShaderModule final : public ShaderModuleBase {
|
||||
public:
|
||||
static ResultOrError<Ref<ShaderModule>> Create(Device* device,
|
||||
const ShaderModuleDescriptor* descriptor,
|
||||
ShaderModuleParseResult* parseResult);
|
||||
|
||||
ResultOrError<std::string> TranslateToGLSL(const char* entryPointName,
|
||||
SingleShaderStage stage,
|
||||
CombinedSamplerInfo* combinedSamplers,
|
||||
const PipelineLayout* layout,
|
||||
bool* needsPlaceholderSampler) const;
|
||||
ResultOrError<std::string> TranslateToGLSL(const char* entryPointName,
|
||||
SingleShaderStage stage,
|
||||
CombinedSamplerInfo* combinedSamplers,
|
||||
const PipelineLayout* layout,
|
||||
bool* needsPlaceholderSampler) const;
|
||||
|
||||
private:
|
||||
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
|
||||
~ShaderModule() override = default;
|
||||
MaybeError Initialize(ShaderModuleParseResult* parseResult);
|
||||
};
|
||||
private:
|
||||
ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
|
||||
~ShaderModule() override = default;
|
||||
MaybeError Initialize(ShaderModuleParseResult* parseResult);
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -22,30 +22,29 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
|
||||
: OldSwapChainBase(device, descriptor) {
|
||||
const auto& im = GetImplementation();
|
||||
im.Init(im.userData, nullptr);
|
||||
}
|
||||
SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
|
||||
: OldSwapChainBase(device, descriptor) {
|
||||
const auto& im = GetImplementation();
|
||||
im.Init(im.userData, nullptr);
|
||||
}
|
||||
|
||||
SwapChain::~SwapChain() {
|
||||
}
|
||||
SwapChain::~SwapChain() {}
|
||||
|
||||
TextureBase* SwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
|
||||
const auto& im = GetImplementation();
|
||||
DawnSwapChainNextTexture next = {};
|
||||
DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
|
||||
if (error) {
|
||||
GetDevice()->HandleError(InternalErrorType::Internal, error);
|
||||
return nullptr;
|
||||
}
|
||||
GLuint nativeTexture = next.texture.u32;
|
||||
return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture,
|
||||
TextureBase::TextureState::OwnedExternal);
|
||||
TextureBase* SwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
|
||||
const auto& im = GetImplementation();
|
||||
DawnSwapChainNextTexture next = {};
|
||||
DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
|
||||
if (error) {
|
||||
GetDevice()->HandleError(InternalErrorType::Internal, error);
|
||||
return nullptr;
|
||||
}
|
||||
GLuint nativeTexture = next.texture.u32;
|
||||
return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture,
|
||||
TextureBase::TextureState::OwnedExternal);
|
||||
}
|
||||
|
||||
MaybeError SwapChain::OnBeforePresent(TextureViewBase*) {
|
||||
return {};
|
||||
}
|
||||
MaybeError SwapChain::OnBeforePresent(TextureViewBase*) {
|
||||
return {};
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -21,17 +21,17 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
class Device;
|
||||
|
||||
class SwapChain final : public OldSwapChainBase {
|
||||
public:
|
||||
SwapChain(Device* device, const SwapChainDescriptor* descriptor);
|
||||
class SwapChain final : public OldSwapChainBase {
|
||||
public:
|
||||
SwapChain(Device* device, const SwapChainDescriptor* descriptor);
|
||||
|
||||
protected:
|
||||
~SwapChain() override;
|
||||
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
|
||||
MaybeError OnBeforePresent(TextureViewBase* view) override;
|
||||
};
|
||||
protected:
|
||||
~SwapChain() override;
|
||||
TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
|
||||
MaybeError OnBeforePresent(TextureViewBase* view) override;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -21,57 +21,54 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
class Device;
|
||||
struct GLFormat;
|
||||
class Device;
|
||||
struct GLFormat;
|
||||
|
||||
class Texture final : public TextureBase {
|
||||
public:
|
||||
Texture(Device* device, const TextureDescriptor* descriptor);
|
||||
Texture(Device* device,
|
||||
const TextureDescriptor* descriptor,
|
||||
GLuint handle,
|
||||
TextureState state);
|
||||
class Texture final : public TextureBase {
|
||||
public:
|
||||
Texture(Device* device, const TextureDescriptor* descriptor);
|
||||
Texture(Device* device, const TextureDescriptor* descriptor, GLuint handle, TextureState state);
|
||||
|
||||
GLuint GetHandle() const;
|
||||
GLenum GetGLTarget() const;
|
||||
const GLFormat& GetGLFormat() const;
|
||||
uint32_t GetGenID() const;
|
||||
void Touch();
|
||||
GLuint GetHandle() const;
|
||||
GLenum GetGLTarget() const;
|
||||
const GLFormat& GetGLFormat() const;
|
||||
uint32_t GetGenID() const;
|
||||
void Touch();
|
||||
|
||||
void EnsureSubresourceContentInitialized(const SubresourceRange& range);
|
||||
void EnsureSubresourceContentInitialized(const SubresourceRange& range);
|
||||
|
||||
private:
|
||||
~Texture() override;
|
||||
private:
|
||||
~Texture() override;
|
||||
|
||||
void DestroyImpl() override;
|
||||
MaybeError ClearTexture(const SubresourceRange& range, TextureBase::ClearValue clearValue);
|
||||
void DestroyImpl() override;
|
||||
MaybeError ClearTexture(const SubresourceRange& range, TextureBase::ClearValue clearValue);
|
||||
|
||||
GLuint mHandle;
|
||||
GLenum mTarget;
|
||||
uint32_t mGenID = 0;
|
||||
};
|
||||
GLuint mHandle;
|
||||
GLenum mTarget;
|
||||
uint32_t mGenID = 0;
|
||||
};
|
||||
|
||||
class TextureView final : public TextureViewBase {
|
||||
public:
|
||||
TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
|
||||
class TextureView final : public TextureViewBase {
|
||||
public:
|
||||
TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
|
||||
|
||||
GLuint GetHandle() const;
|
||||
GLenum GetGLTarget() const;
|
||||
void BindToFramebuffer(GLenum target, GLenum attachment);
|
||||
void CopyIfNeeded();
|
||||
GLuint GetHandle() const;
|
||||
GLenum GetGLTarget() const;
|
||||
void BindToFramebuffer(GLenum target, GLenum attachment);
|
||||
void CopyIfNeeded();
|
||||
|
||||
private:
|
||||
~TextureView() override;
|
||||
void DestroyImpl() override;
|
||||
GLenum GetInternalFormat() const;
|
||||
private:
|
||||
~TextureView() override;
|
||||
void DestroyImpl() override;
|
||||
GLenum GetInternalFormat() const;
|
||||
|
||||
// TODO(crbug.com/dawn/1355): Delete this handle on texture destroy.
|
||||
GLuint mHandle;
|
||||
GLenum mTarget;
|
||||
bool mOwnsHandle;
|
||||
bool mUseCopy = false;
|
||||
uint32_t mGenID = 0;
|
||||
};
|
||||
// TODO(crbug.com/dawn/1355): Delete this handle on texture destroy.
|
||||
GLuint mHandle;
|
||||
GLenum mTarget;
|
||||
bool mOwnsHandle;
|
||||
bool mUseCopy = false;
|
||||
uint32_t mGenID = 0;
|
||||
};
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
@@ -20,134 +20,134 @@
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
|
||||
GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction) {
|
||||
switch (compareFunction) {
|
||||
case wgpu::CompareFunction::Never:
|
||||
return GL_NEVER;
|
||||
case wgpu::CompareFunction::Less:
|
||||
return GL_LESS;
|
||||
case wgpu::CompareFunction::LessEqual:
|
||||
return GL_LEQUAL;
|
||||
case wgpu::CompareFunction::Greater:
|
||||
return GL_GREATER;
|
||||
case wgpu::CompareFunction::GreaterEqual:
|
||||
return GL_GEQUAL;
|
||||
case wgpu::CompareFunction::NotEqual:
|
||||
return GL_NOTEQUAL;
|
||||
case wgpu::CompareFunction::Equal:
|
||||
return GL_EQUAL;
|
||||
case wgpu::CompareFunction::Always:
|
||||
return GL_ALWAYS;
|
||||
GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction) {
|
||||
switch (compareFunction) {
|
||||
case wgpu::CompareFunction::Never:
|
||||
return GL_NEVER;
|
||||
case wgpu::CompareFunction::Less:
|
||||
return GL_LESS;
|
||||
case wgpu::CompareFunction::LessEqual:
|
||||
return GL_LEQUAL;
|
||||
case wgpu::CompareFunction::Greater:
|
||||
return GL_GREATER;
|
||||
case wgpu::CompareFunction::GreaterEqual:
|
||||
return GL_GEQUAL;
|
||||
case wgpu::CompareFunction::NotEqual:
|
||||
return GL_NOTEQUAL;
|
||||
case wgpu::CompareFunction::Equal:
|
||||
return GL_EQUAL;
|
||||
case wgpu::CompareFunction::Always:
|
||||
return GL_ALWAYS;
|
||||
|
||||
case wgpu::CompareFunction::Undefined:
|
||||
break;
|
||||
}
|
||||
UNREACHABLE();
|
||||
case wgpu::CompareFunction::Undefined:
|
||||
break;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat) {
|
||||
switch (depthStencilFormat) {
|
||||
case wgpu::TextureFormat::Depth24PlusStencil8:
|
||||
case wgpu::TextureFormat::Depth24UnormStencil8:
|
||||
case wgpu::TextureFormat::Depth32FloatStencil8:
|
||||
case wgpu::TextureFormat::Stencil8:
|
||||
return 0xFF;
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
void CopyImageSubData(const OpenGLFunctions& gl,
|
||||
Aspect srcAspects,
|
||||
GLuint srcHandle,
|
||||
GLenum srcTarget,
|
||||
GLint srcLevel,
|
||||
const Origin3D& src,
|
||||
GLuint dstHandle,
|
||||
GLenum dstTarget,
|
||||
GLint dstLevel,
|
||||
const Origin3D& dst,
|
||||
const Extent3D& size) {
|
||||
if (gl.IsAtLeastGL(4, 3) || gl.IsAtLeastGLES(3, 2)) {
|
||||
gl.CopyImageSubData(srcHandle, srcTarget, srcLevel, src.x, src.y, src.z, dstHandle,
|
||||
dstTarget, dstLevel, dst.x, dst.y, dst.z, size.width, size.height,
|
||||
size.depthOrArrayLayers);
|
||||
return;
|
||||
}
|
||||
|
||||
GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat) {
|
||||
switch (depthStencilFormat) {
|
||||
case wgpu::TextureFormat::Depth24PlusStencil8:
|
||||
case wgpu::TextureFormat::Depth24UnormStencil8:
|
||||
case wgpu::TextureFormat::Depth32FloatStencil8:
|
||||
case wgpu::TextureFormat::Stencil8:
|
||||
return 0xFF;
|
||||
GLint prevReadFBO = 0, prevDrawFBO = 0;
|
||||
gl.GetIntegerv(GL_READ_FRAMEBUFFER_BINDING, &prevReadFBO);
|
||||
gl.GetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &prevDrawFBO);
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
// Generate temporary framebuffers for the blits.
|
||||
GLuint readFBO = 0, drawFBO = 0;
|
||||
gl.GenFramebuffers(1, &readFBO);
|
||||
gl.GenFramebuffers(1, &drawFBO);
|
||||
gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
|
||||
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, drawFBO);
|
||||
|
||||
// Reset state that may affect glBlitFramebuffer().
|
||||
gl.Disable(GL_SCISSOR_TEST);
|
||||
GLenum blitMask = 0;
|
||||
if (srcAspects & Aspect::Color) {
|
||||
blitMask |= GL_COLOR_BUFFER_BIT;
|
||||
}
|
||||
if (srcAspects & Aspect::Depth) {
|
||||
blitMask |= GL_DEPTH_BUFFER_BIT;
|
||||
}
|
||||
if (srcAspects & Aspect::Stencil) {
|
||||
blitMask |= GL_STENCIL_BUFFER_BIT;
|
||||
}
|
||||
|
||||
void CopyImageSubData(const OpenGLFunctions& gl,
|
||||
Aspect srcAspects,
|
||||
GLuint srcHandle,
|
||||
GLenum srcTarget,
|
||||
GLint srcLevel,
|
||||
const Origin3D& src,
|
||||
GLuint dstHandle,
|
||||
GLenum dstTarget,
|
||||
GLint dstLevel,
|
||||
const Origin3D& dst,
|
||||
const Extent3D& size) {
|
||||
if (gl.IsAtLeastGL(4, 3) || gl.IsAtLeastGLES(3, 2)) {
|
||||
gl.CopyImageSubData(srcHandle, srcTarget, srcLevel, src.x, src.y, src.z, dstHandle,
|
||||
dstTarget, dstLevel, dst.x, dst.y, dst.z, size.width, size.height,
|
||||
size.depthOrArrayLayers);
|
||||
return;
|
||||
}
|
||||
|
||||
GLint prevReadFBO = 0, prevDrawFBO = 0;
|
||||
gl.GetIntegerv(GL_READ_FRAMEBUFFER_BINDING, &prevReadFBO);
|
||||
gl.GetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &prevDrawFBO);
|
||||
|
||||
// Generate temporary framebuffers for the blits.
|
||||
GLuint readFBO = 0, drawFBO = 0;
|
||||
gl.GenFramebuffers(1, &readFBO);
|
||||
gl.GenFramebuffers(1, &drawFBO);
|
||||
gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
|
||||
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, drawFBO);
|
||||
|
||||
// Reset state that may affect glBlitFramebuffer().
|
||||
gl.Disable(GL_SCISSOR_TEST);
|
||||
GLenum blitMask = 0;
|
||||
if (srcAspects & Aspect::Color) {
|
||||
blitMask |= GL_COLOR_BUFFER_BIT;
|
||||
}
|
||||
if (srcAspects & Aspect::Depth) {
|
||||
blitMask |= GL_DEPTH_BUFFER_BIT;
|
||||
}
|
||||
if (srcAspects & Aspect::Stencil) {
|
||||
blitMask |= GL_STENCIL_BUFFER_BIT;
|
||||
}
|
||||
|
||||
// Iterate over all layers, doing a single blit for each.
|
||||
for (uint32_t layer = 0; layer < size.depthOrArrayLayers; ++layer) {
|
||||
// Set attachments for all aspects.
|
||||
for (Aspect aspect : IterateEnumMask(srcAspects)) {
|
||||
GLenum glAttachment;
|
||||
switch (aspect) {
|
||||
case Aspect::Color:
|
||||
glAttachment = GL_COLOR_ATTACHMENT0;
|
||||
break;
|
||||
case Aspect::Depth:
|
||||
glAttachment = GL_DEPTH_ATTACHMENT;
|
||||
break;
|
||||
case Aspect::Stencil:
|
||||
glAttachment = GL_STENCIL_ATTACHMENT;
|
||||
break;
|
||||
case Aspect::CombinedDepthStencil:
|
||||
case Aspect::None:
|
||||
case Aspect::Plane0:
|
||||
case Aspect::Plane1:
|
||||
UNREACHABLE();
|
||||
}
|
||||
if (srcTarget == GL_TEXTURE_2D) {
|
||||
gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, srcTarget, srcHandle,
|
||||
srcLevel);
|
||||
} else {
|
||||
gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment, srcHandle,
|
||||
srcLevel, src.z + layer);
|
||||
}
|
||||
if (dstTarget == GL_TEXTURE_2D) {
|
||||
gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, dstTarget, dstHandle,
|
||||
dstLevel);
|
||||
} else if (dstTarget == GL_TEXTURE_CUBE_MAP) {
|
||||
GLenum target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + layer;
|
||||
gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, dstHandle,
|
||||
dstLevel);
|
||||
} else {
|
||||
gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment, dstHandle,
|
||||
dstLevel, dst.z + layer);
|
||||
}
|
||||
// Iterate over all layers, doing a single blit for each.
|
||||
for (uint32_t layer = 0; layer < size.depthOrArrayLayers; ++layer) {
|
||||
// Set attachments for all aspects.
|
||||
for (Aspect aspect : IterateEnumMask(srcAspects)) {
|
||||
GLenum glAttachment;
|
||||
switch (aspect) {
|
||||
case Aspect::Color:
|
||||
glAttachment = GL_COLOR_ATTACHMENT0;
|
||||
break;
|
||||
case Aspect::Depth:
|
||||
glAttachment = GL_DEPTH_ATTACHMENT;
|
||||
break;
|
||||
case Aspect::Stencil:
|
||||
glAttachment = GL_STENCIL_ATTACHMENT;
|
||||
break;
|
||||
case Aspect::CombinedDepthStencil:
|
||||
case Aspect::None:
|
||||
case Aspect::Plane0:
|
||||
case Aspect::Plane1:
|
||||
UNREACHABLE();
|
||||
}
|
||||
if (srcTarget == GL_TEXTURE_2D) {
|
||||
gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, srcTarget, srcHandle,
|
||||
srcLevel);
|
||||
} else {
|
||||
gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment, srcHandle, srcLevel,
|
||||
src.z + layer);
|
||||
}
|
||||
if (dstTarget == GL_TEXTURE_2D) {
|
||||
gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, dstTarget, dstHandle,
|
||||
dstLevel);
|
||||
} else if (dstTarget == GL_TEXTURE_CUBE_MAP) {
|
||||
GLenum target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + layer;
|
||||
gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, dstHandle,
|
||||
dstLevel);
|
||||
} else {
|
||||
gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment, dstHandle, dstLevel,
|
||||
dst.z + layer);
|
||||
}
|
||||
gl.BlitFramebuffer(src.x, src.y, src.x + size.width, src.y + size.height, dst.x, dst.y,
|
||||
dst.x + size.width, dst.y + size.height, blitMask, GL_NEAREST);
|
||||
}
|
||||
gl.Enable(GL_SCISSOR_TEST);
|
||||
gl.DeleteFramebuffers(1, &readFBO);
|
||||
gl.DeleteFramebuffers(1, &drawFBO);
|
||||
gl.BindFramebuffer(GL_READ_FRAMEBUFFER, prevReadFBO);
|
||||
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, prevDrawFBO);
|
||||
gl.BlitFramebuffer(src.x, src.y, src.x + size.width, src.y + size.height, dst.x, dst.y,
|
||||
dst.x + size.width, dst.y + size.height, blitMask, GL_NEAREST);
|
||||
}
|
||||
gl.Enable(GL_SCISSOR_TEST);
|
||||
gl.DeleteFramebuffers(1, &readFBO);
|
||||
gl.DeleteFramebuffers(1, &drawFBO);
|
||||
gl.BindFramebuffer(GL_READ_FRAMEBUFFER, prevReadFBO);
|
||||
gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, prevDrawFBO);
|
||||
}
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
@@ -20,21 +20,21 @@
|
||||
#include "dawn/native/opengl/opengl_platform.h"
|
||||
|
||||
namespace dawn::native::opengl {
|
||||
struct OpenGLFunctions;
|
||||
struct OpenGLFunctions;
|
||||
|
||||
GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction);
|
||||
GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat);
|
||||
void CopyImageSubData(const OpenGLFunctions& gl,
|
||||
Aspect srcAspects,
|
||||
GLuint srcHandle,
|
||||
GLenum srcTarget,
|
||||
GLint srcLevel,
|
||||
const Origin3D& src,
|
||||
GLuint dstHandle,
|
||||
GLenum dstTarget,
|
||||
GLint dstLevel,
|
||||
const Origin3D& dst,
|
||||
const Extent3D& size);
|
||||
GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction);
|
||||
GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat);
|
||||
void CopyImageSubData(const OpenGLFunctions& gl,
|
||||
Aspect srcAspects,
|
||||
GLuint srcHandle,
|
||||
GLenum srcTarget,
|
||||
GLint srcLevel,
|
||||
const Origin3D& src,
|
||||
GLuint dstHandle,
|
||||
GLenum dstTarget,
|
||||
GLint dstLevel,
|
||||
const Origin3D& dst,
|
||||
const Extent3D& size);
|
||||
|
||||
} // namespace dawn::native::opengl
|
||||
|
||||
|
||||
Reference in New Issue
Block a user