Add .clang-format files and format more files
The .clang-format files tell clang-format to ignore certain directories (replacing code in lint_clang_format.sh which will be removed). $ git ls-tree -r master --name-only | grep '\.\(c\|h\|cpp\|gn\|gni\|mm\|m\|py\)$' | xargs ./append-space-to-files $ git checkout -- generator/templates third_party/khronos/{KHR,vulkan} $ git cl format --full --python Followed by manual reformatting of a few things in Python for readability. Bug: none Change-Id: I4c9e472cc9a5cd80c07286e808f4e597cfef5428 Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/24785 Reviewed-by: Kai Ninomiya <kainino@chromium.org> Commit-Queue: Kai Ninomiya <kainino@chromium.org>
This commit is contained in:
parent
5a1d39ad0b
commit
01aeca22a9
14
PRESUBMIT.py
14
PRESUBMIT.py
|
@ -16,6 +16,7 @@ import os
|
|||
import platform
|
||||
import subprocess
|
||||
|
||||
|
||||
def _DoClangFormat(input_api, output_api):
|
||||
if platform.system() == 'Linux' and platform.architecture()[0] == '64bit':
|
||||
clang_format_path = 'buildtools/linux64/clang-format'
|
||||
|
@ -32,9 +33,7 @@ def _DoClangFormat(input_api, output_api):
|
|||
return []
|
||||
|
||||
lint_cmd = [
|
||||
'scripts/lint_clang_format.sh',
|
||||
clang_format_path,
|
||||
upstream_commit
|
||||
'scripts/lint_clang_format.sh', clang_format_path, upstream_commit
|
||||
]
|
||||
|
||||
# Call the linting script and forward the output as a notification or as an error
|
||||
|
@ -44,15 +43,20 @@ def _DoClangFormat(input_api, output_api):
|
|||
except subprocess.CalledProcessError as e:
|
||||
return [output_api.PresubmitError(e.output)]
|
||||
|
||||
|
||||
def _DoCommonChecks(input_api, output_api):
|
||||
results = []
|
||||
results.extend(input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api))
|
||||
results.extend(input_api.canned_checks.CheckGNFormatted(input_api, output_api))
|
||||
results.extend(
|
||||
input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api))
|
||||
results.extend(
|
||||
input_api.canned_checks.CheckGNFormatted(input_api, output_api))
|
||||
results.extend(_DoClangFormat(input_api, output_api))
|
||||
return results
|
||||
|
||||
|
||||
def CheckChangeOnUpload(input_api, output_api):
|
||||
return _DoCommonChecks(input_api, output_api)
|
||||
|
||||
|
||||
def CheckChangeOnCommit(input_api, output_api):
|
||||
return _DoCommonChecks(input_api, output_api)
|
||||
|
|
|
@ -20,14 +20,15 @@ import("dawn_generator.gni")
|
|||
# files but we can't just put dawn_gen_root because there are more than
|
||||
# autogenerated sources there.
|
||||
_stale_dirs = [
|
||||
"dawn",
|
||||
"dawn_native",
|
||||
"dawn_wire",
|
||||
"mock",
|
||||
"src"
|
||||
"dawn",
|
||||
"dawn_native",
|
||||
"dawn_wire",
|
||||
"mock",
|
||||
"src",
|
||||
]
|
||||
|
||||
_allowed_output_dirs_file = "${dawn_gen_root}/removed_stale_autogen_files.allowed_output_dirs"
|
||||
_allowed_output_dirs_file =
|
||||
"${dawn_gen_root}/removed_stale_autogen_files.allowed_output_dirs"
|
||||
write_file(_allowed_output_dirs_file, dawn_allowed_gen_output_dirs)
|
||||
|
||||
_stale_dirs_file = "${dawn_gen_root}/removed_stale_autogen_files.stale_dirs"
|
||||
|
@ -52,8 +53,11 @@ action("remove_stale_autogen_files") {
|
|||
|
||||
# Have the "list of file" inputs as a dependency so that the action reruns
|
||||
# as soon as they change.
|
||||
inputs = [_allowed_output_dirs_file, _stale_dirs_file]
|
||||
inputs = [
|
||||
_allowed_output_dirs_file,
|
||||
_stale_dirs_file,
|
||||
]
|
||||
|
||||
# Output a stamp file so we don't re-run this action on every build.
|
||||
outputs = [_stamp_file]
|
||||
outputs = [ _stamp_file ]
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ from generator_lib import Generator, run_generator, FileRender
|
|||
# OBJECT MODEL
|
||||
############################################################
|
||||
|
||||
|
||||
class Name:
|
||||
def __init__(self, name, native=False):
|
||||
self.native = native
|
||||
|
@ -44,7 +45,8 @@ class Name:
|
|||
return ''.join(self.chunks)
|
||||
|
||||
def camelCase(self):
|
||||
return self.chunks[0] + ''.join([self.CamelChunk(chunk) for chunk in self.chunks[1:]])
|
||||
return self.chunks[0] + ''.join(
|
||||
[self.CamelChunk(chunk) for chunk in self.chunks[1:]])
|
||||
|
||||
def CamelCase(self):
|
||||
return ''.join([self.CamelChunk(chunk) for chunk in self.chunks])
|
||||
|
@ -63,9 +65,11 @@ class Name:
|
|||
result += chunk.lower()
|
||||
return result
|
||||
|
||||
|
||||
def concat_names(*names):
|
||||
return ' '.join([name.canonical_case() for name in names])
|
||||
|
||||
|
||||
class Type:
|
||||
def __init__(self, name, json_data, native=False):
|
||||
self.json_data = json_data
|
||||
|
@ -74,7 +78,10 @@ class Type:
|
|||
self.category = json_data['category']
|
||||
self.javascript = self.json_data.get('javascript', True)
|
||||
|
||||
|
||||
EnumValue = namedtuple('EnumValue', ['name', 'value', 'valid', 'jsrepr'])
|
||||
|
||||
|
||||
class EnumType(Type):
|
||||
def __init__(self, name, json_data):
|
||||
Type.__init__(self, name, json_data)
|
||||
|
@ -87,42 +94,55 @@ class EnumType(Type):
|
|||
if value != lastValue + 1:
|
||||
self.contiguousFromZero = False
|
||||
lastValue = value
|
||||
self.values.append(EnumValue(
|
||||
Name(m['name']),
|
||||
value,
|
||||
m.get('valid', True),
|
||||
m.get('jsrepr', None)))
|
||||
self.values.append(
|
||||
EnumValue(Name(m['name']), value, m.get('valid', True),
|
||||
m.get('jsrepr', None)))
|
||||
|
||||
# Assert that all values are unique in enums
|
||||
all_values = set()
|
||||
for value in self.values:
|
||||
if value.value in all_values:
|
||||
raise Exception("Duplicate value {} in enum {}".format(value.value, name))
|
||||
raise Exception("Duplicate value {} in enum {}".format(
|
||||
value.value, name))
|
||||
all_values.add(value.value)
|
||||
|
||||
|
||||
BitmaskValue = namedtuple('BitmaskValue', ['name', 'value'])
|
||||
|
||||
|
||||
class BitmaskType(Type):
|
||||
def __init__(self, name, json_data):
|
||||
Type.__init__(self, name, json_data)
|
||||
self.values = [BitmaskValue(Name(m['name']), m['value']) for m in self.json_data['values']]
|
||||
self.values = [
|
||||
BitmaskValue(Name(m['name']), m['value'])
|
||||
for m in self.json_data['values']
|
||||
]
|
||||
self.full_mask = 0
|
||||
for value in self.values:
|
||||
self.full_mask = self.full_mask | value.value
|
||||
|
||||
|
||||
class CallbackType(Type):
|
||||
def __init__(self, name, json_data):
|
||||
Type.__init__(self, name, json_data)
|
||||
self.arguments = []
|
||||
|
||||
|
||||
class NativeType(Type):
|
||||
def __init__(self, name, json_data):
|
||||
Type.__init__(self, name, json_data, native=True)
|
||||
|
||||
|
||||
# Methods and structures are both "records", so record members correspond to
|
||||
# method arguments or structure members.
|
||||
class RecordMember:
|
||||
def __init__(self, name, typ, annotation, optional=False,
|
||||
is_return_value=False, default_value=None,
|
||||
def __init__(self,
|
||||
name,
|
||||
typ,
|
||||
annotation,
|
||||
optional=False,
|
||||
is_return_value=False,
|
||||
default_value=None,
|
||||
skip_serialize=False):
|
||||
self.name = name
|
||||
self.type = typ
|
||||
|
@ -138,13 +158,17 @@ class RecordMember:
|
|||
assert self.type.dict_name == "ObjectHandle"
|
||||
self.handle_type = handle_type
|
||||
|
||||
|
||||
Method = namedtuple('Method', ['name', 'return_type', 'arguments'])
|
||||
|
||||
|
||||
class ObjectType(Type):
|
||||
def __init__(self, name, json_data):
|
||||
Type.__init__(self, name, json_data)
|
||||
self.methods = []
|
||||
self.built_type = None
|
||||
|
||||
|
||||
class Record:
|
||||
def __init__(self, name):
|
||||
self.name = Name(name)
|
||||
|
@ -160,12 +184,15 @@ class Record:
|
|||
else:
|
||||
return False
|
||||
|
||||
self.may_have_dawn_object = any(may_have_dawn_object(member) for member in self.members)
|
||||
self.may_have_dawn_object = any(
|
||||
may_have_dawn_object(member) for member in self.members)
|
||||
|
||||
# set may_have_dawn_object to true if the type is chained or extensible. Chained structs
|
||||
# may contain a Dawn object.
|
||||
# Set may_have_dawn_object to true if the type is chained or
|
||||
# extensible. Chained structs may contain a Dawn object.
|
||||
if isinstance(self, StructureType):
|
||||
self.may_have_dawn_object = self.may_have_dawn_object or self.chained or self.extensible
|
||||
self.may_have_dawn_object = (self.may_have_dawn_object
|
||||
or self.chained or self.extensible)
|
||||
|
||||
|
||||
class StructureType(Record, Type):
|
||||
def __init__(self, name, json_data):
|
||||
|
@ -173,9 +200,11 @@ class StructureType(Record, Type):
|
|||
Type.__init__(self, name, json_data)
|
||||
self.chained = json_data.get("chained", False)
|
||||
self.extensible = json_data.get("extensible", False)
|
||||
# Chained structs inherit from wgpu::ChainedStruct which has nextInChain so setting
|
||||
# both extensible and chained would result in two nextInChain members.
|
||||
assert(not (self.extensible and self.chained))
|
||||
# Chained structs inherit from wgpu::ChainedStruct, which has
|
||||
# nextInChain, so setting both extensible and chained would result in
|
||||
# two nextInChain members.
|
||||
assert not (self.extensible and self.chained)
|
||||
|
||||
|
||||
class Command(Record):
|
||||
def __init__(self, name, members=None):
|
||||
|
@ -184,11 +213,13 @@ class Command(Record):
|
|||
self.derived_object = None
|
||||
self.derived_method = None
|
||||
|
||||
|
||||
def linked_record_members(json_data, types):
|
||||
members = []
|
||||
members_by_name = {}
|
||||
for m in json_data:
|
||||
member = RecordMember(Name(m['name']), types[m['type']],
|
||||
member = RecordMember(Name(m['name']),
|
||||
types[m['type']],
|
||||
m.get('annotation', 'value'),
|
||||
optional=m.get('optional', False),
|
||||
is_return_value=m.get('is_return_value', False),
|
||||
|
@ -207,7 +238,7 @@ def linked_record_members(json_data, types):
|
|||
member.length = "constant"
|
||||
member.constant_length = 1
|
||||
else:
|
||||
assert(False)
|
||||
assert False
|
||||
elif m['length'] == 'strlen':
|
||||
member.length = 'strlen'
|
||||
else:
|
||||
|
@ -215,31 +246,42 @@ def linked_record_members(json_data, types):
|
|||
|
||||
return members
|
||||
|
||||
|
||||
############################################################
|
||||
# PARSE
|
||||
############################################################
|
||||
|
||||
|
||||
def link_object(obj, types):
|
||||
def make_method(json_data):
|
||||
arguments = linked_record_members(json_data.get('args', []), types)
|
||||
return Method(Name(json_data['name']), types[json_data.get('returns', 'void')], arguments)
|
||||
return Method(Name(json_data['name']),
|
||||
types[json_data.get('returns', 'void')], arguments)
|
||||
|
||||
obj.methods = [make_method(m) for m in obj.json_data.get('methods', [])]
|
||||
obj.methods.sort(key=lambda method: method.name.canonical_case())
|
||||
|
||||
|
||||
def link_structure(struct, types):
|
||||
struct.members = linked_record_members(struct.json_data['members'], types)
|
||||
|
||||
def link_callback(callback, types):
|
||||
callback.arguments = linked_record_members(callback.json_data['args'], types)
|
||||
|
||||
# Sort structures so that if struct A has struct B as a member, then B is listed before A
|
||||
# This is a form of topological sort where we try to keep the order reasonably similar to the
|
||||
# original order (though th sort isn't technically stable).
|
||||
# It works by computing for each struct type what is the depth of its DAG of dependents, then
|
||||
# resorting based on that depth using Python's stable sort. This makes a toposort because if
|
||||
# A depends on B then its depth will be bigger than B's. It is also nice because all nodes
|
||||
# with the same depth are kept in the input order.
|
||||
def link_callback(callback, types):
|
||||
callback.arguments = linked_record_members(callback.json_data['args'],
|
||||
types)
|
||||
|
||||
|
||||
# Sort structures so that if struct A has struct B as a member, then B is
|
||||
# listed before A.
|
||||
#
|
||||
# This is a form of topological sort where we try to keep the order reasonably
|
||||
# similar to the original order (though the sort isn't technically stable).
|
||||
#
|
||||
# It works by computing for each struct type what is the depth of its DAG of
|
||||
# dependents, then resorting based on that depth using Python's stable sort.
|
||||
# This makes a toposort because if A depends on B then its depth will be bigger
|
||||
# than B's. It is also nice because all nodes with the same depth are kept in
|
||||
# the input order.
|
||||
def topo_sort_structure(structs):
|
||||
for struct in structs:
|
||||
struct.visited = False
|
||||
|
@ -252,7 +294,8 @@ def topo_sort_structure(structs):
|
|||
max_dependent_depth = 0
|
||||
for member in struct.members:
|
||||
if member.type.category == 'structure':
|
||||
max_dependent_depth = max(max_dependent_depth, compute_depth(member.type) + 1)
|
||||
max_dependent_depth = max(max_dependent_depth,
|
||||
compute_depth(member.type) + 1)
|
||||
|
||||
struct.subdag_depth = max_dependent_depth
|
||||
struct.visited = True
|
||||
|
@ -269,6 +312,7 @@ def topo_sort_structure(structs):
|
|||
|
||||
return result
|
||||
|
||||
|
||||
def parse_json(json):
|
||||
category_to_parser = {
|
||||
'bitmask': BitmaskType,
|
||||
|
@ -303,22 +347,22 @@ def parse_json(json):
|
|||
link_callback(callback, types)
|
||||
|
||||
for category in by_category.keys():
|
||||
by_category[category] = sorted(by_category[category], key=lambda typ: typ.name.canonical_case())
|
||||
by_category[category] = sorted(
|
||||
by_category[category], key=lambda typ: typ.name.canonical_case())
|
||||
|
||||
by_category['structure'] = topo_sort_structure(by_category['structure'])
|
||||
|
||||
for struct in by_category['structure']:
|
||||
struct.update_metadata()
|
||||
|
||||
return {
|
||||
'types': types,
|
||||
'by_category': by_category
|
||||
}
|
||||
return {'types': types, 'by_category': by_category}
|
||||
|
||||
|
||||
############################################################
|
||||
# WIRE STUFF
|
||||
############################################################
|
||||
|
||||
|
||||
# Create wire commands from api methods
|
||||
def compute_wire_params(api_params, wire_json):
|
||||
wire_params = api_params.copy()
|
||||
|
@ -327,7 +371,8 @@ def compute_wire_params(api_params, wire_json):
|
|||
commands = []
|
||||
return_commands = []
|
||||
|
||||
wire_json['special items']['client_handwritten_commands'] += wire_json['special items']['client_side_commands']
|
||||
wire_json['special items']['client_handwritten_commands'] += wire_json[
|
||||
'special items']['client_side_commands']
|
||||
|
||||
# Generate commands from object methods
|
||||
for api_object in wire_params['by_category']['object']:
|
||||
|
@ -335,21 +380,33 @@ def compute_wire_params(api_params, wire_json):
|
|||
command_name = concat_names(api_object.name, method.name)
|
||||
command_suffix = Name(command_name).CamelCase()
|
||||
|
||||
# Only object return values or void are supported. Other methods must be handwritten.
|
||||
if method.return_type.category != 'object' and method.return_type.name.canonical_case() != 'void':
|
||||
assert(command_suffix in wire_json['special items']['client_handwritten_commands'])
|
||||
# Only object return values or void are supported.
|
||||
# Other methods must be handwritten.
|
||||
is_object = method.return_type.category == 'object'
|
||||
is_void = method.return_type.name.canonical_case() == 'void'
|
||||
if not (is_object or is_void):
|
||||
assert command_suffix in (
|
||||
wire_json['special items']['client_handwritten_commands'])
|
||||
continue
|
||||
|
||||
if command_suffix in wire_json['special items']['client_side_commands']:
|
||||
if command_suffix in (
|
||||
wire_json['special items']['client_side_commands']):
|
||||
continue
|
||||
|
||||
# Create object method commands by prepending "self"
|
||||
members = [RecordMember(Name('self'), types[api_object.dict_name], 'value')]
|
||||
members = [
|
||||
RecordMember(Name('self'), types[api_object.dict_name],
|
||||
'value')
|
||||
]
|
||||
members += method.arguments
|
||||
|
||||
# Client->Server commands that return an object return the result object handle
|
||||
# Client->Server commands that return an object return the
|
||||
# result object handle
|
||||
if method.return_type.category == 'object':
|
||||
result = RecordMember(Name('result'), types['ObjectHandle'], 'value', is_return_value=True)
|
||||
result = RecordMember(Name('result'),
|
||||
types['ObjectHandle'],
|
||||
'value',
|
||||
is_return_value=True)
|
||||
result.set_handle_type(method.return_type)
|
||||
members.append(result)
|
||||
|
||||
|
@ -362,7 +419,8 @@ def compute_wire_params(api_params, wire_json):
|
|||
commands.append(Command(name, linked_record_members(json_data, types)))
|
||||
|
||||
for (name, json_data) in wire_json['return commands'].items():
|
||||
return_commands.append(Command(name, linked_record_members(json_data, types)))
|
||||
return_commands.append(
|
||||
Command(name, linked_record_members(json_data, types)))
|
||||
|
||||
wire_params['cmd_records'] = {
|
||||
'command': commands,
|
||||
|
@ -378,12 +436,16 @@ def compute_wire_params(api_params, wire_json):
|
|||
|
||||
return wire_params
|
||||
|
||||
|
||||
#############################################################
|
||||
# Generator
|
||||
#############################################################
|
||||
|
||||
|
||||
def as_varName(*names):
|
||||
return names[0].camelCase() + ''.join([name.CamelCase() for name in names[1:]])
|
||||
return names[0].camelCase() + ''.join(
|
||||
[name.CamelCase() for name in names[1:]])
|
||||
|
||||
|
||||
def as_cType(name):
|
||||
if name.native:
|
||||
|
@ -391,27 +453,32 @@ def as_cType(name):
|
|||
else:
|
||||
return 'WGPU' + name.CamelCase()
|
||||
|
||||
|
||||
def as_cTypeDawn(name):
|
||||
if name.native:
|
||||
return name.concatcase()
|
||||
else:
|
||||
return 'Dawn' + name.CamelCase()
|
||||
|
||||
|
||||
def as_cTypeEnumSpecialCase(typ):
|
||||
if typ.category == 'bitmask':
|
||||
return as_cType(typ.name) + 'Flags'
|
||||
return as_cType(typ.name)
|
||||
|
||||
|
||||
def as_cppType(name):
|
||||
if name.native:
|
||||
return name.concatcase()
|
||||
else:
|
||||
return name.CamelCase()
|
||||
|
||||
|
||||
def as_jsEnumValue(value):
|
||||
if value.jsrepr: return value.jsrepr
|
||||
return "'" + value.name.js_enum_case() + "'"
|
||||
|
||||
|
||||
def convert_cType_to_cppType(typ, annotation, arg, indent=0):
|
||||
if typ.category == 'native':
|
||||
return arg
|
||||
|
@ -422,18 +489,20 @@ def convert_cType_to_cppType(typ, annotation, arg, indent=0):
|
|||
converted_members = [
|
||||
convert_cType_to_cppType(
|
||||
member.type, member.annotation,
|
||||
'{}.{}'.format(arg, as_varName(member.name)),
|
||||
indent + 1)
|
||||
for member in typ.members]
|
||||
'{}.{}'.format(arg, as_varName(member.name)), indent + 1)
|
||||
for member in typ.members
|
||||
]
|
||||
|
||||
converted_members = [(' ' * 4) + m for m in converted_members ]
|
||||
converted_members = [(' ' * 4) + m for m in converted_members]
|
||||
converted_members = ',\n'.join(converted_members)
|
||||
|
||||
return as_cppType(typ.name) + ' {\n' + converted_members + '\n}'
|
||||
else:
|
||||
return 'static_cast<{}>({})'.format(as_cppType(typ.name), arg)
|
||||
else:
|
||||
return 'reinterpret_cast<{} {}>({})'.format(as_cppType(typ.name), annotation, arg)
|
||||
return 'reinterpret_cast<{} {}>({})'.format(as_cppType(typ.name),
|
||||
annotation, arg)
|
||||
|
||||
|
||||
def decorate(name, typ, arg):
|
||||
if arg.annotation == 'value':
|
||||
|
@ -445,46 +514,57 @@ def decorate(name, typ, arg):
|
|||
elif arg.annotation == 'const*const*':
|
||||
return 'const ' + typ + '* const * ' + name
|
||||
else:
|
||||
assert(False)
|
||||
assert False
|
||||
|
||||
|
||||
def annotated(typ, arg):
|
||||
name = as_varName(arg.name)
|
||||
return decorate(name, typ, arg)
|
||||
|
||||
|
||||
def as_cEnum(type_name, value_name):
|
||||
assert(not type_name.native and not value_name.native)
|
||||
assert not type_name.native and not value_name.native
|
||||
return 'WGPU' + type_name.CamelCase() + '_' + value_name.CamelCase()
|
||||
|
||||
|
||||
def as_cEnumDawn(type_name, value_name):
|
||||
assert(not type_name.native and not value_name.native)
|
||||
return 'DAWN' + '_' + type_name.SNAKE_CASE() + '_' + value_name.SNAKE_CASE()
|
||||
assert not type_name.native and not value_name.native
|
||||
return ('DAWN' + '_' + type_name.SNAKE_CASE() + '_' +
|
||||
value_name.SNAKE_CASE())
|
||||
|
||||
|
||||
def as_cppEnum(value_name):
|
||||
assert(not value_name.native)
|
||||
assert not value_name.native
|
||||
if value_name.concatcase()[0].isdigit():
|
||||
return "e" + value_name.CamelCase()
|
||||
return value_name.CamelCase()
|
||||
|
||||
|
||||
def as_cMethod(type_name, method_name):
|
||||
assert(not type_name.native and not method_name.native)
|
||||
assert not type_name.native and not method_name.native
|
||||
return 'wgpu' + type_name.CamelCase() + method_name.CamelCase()
|
||||
|
||||
|
||||
def as_cMethodDawn(type_name, method_name):
|
||||
assert(not type_name.native and not method_name.native)
|
||||
assert not type_name.native and not method_name.native
|
||||
return 'dawn' + type_name.CamelCase() + method_name.CamelCase()
|
||||
|
||||
|
||||
def as_MethodSuffix(type_name, method_name):
|
||||
assert(not type_name.native and not method_name.native)
|
||||
assert not type_name.native and not method_name.native
|
||||
return type_name.CamelCase() + method_name.CamelCase()
|
||||
|
||||
|
||||
def as_cProc(type_name, method_name):
|
||||
assert(not type_name.native and not method_name.native)
|
||||
assert not type_name.native and not method_name.native
|
||||
return 'WGPU' + 'Proc' + type_name.CamelCase() + method_name.CamelCase()
|
||||
|
||||
|
||||
def as_cProcDawn(type_name, method_name):
|
||||
assert(not type_name.native and not method_name.native)
|
||||
assert not type_name.native and not method_name.native
|
||||
return 'Dawn' + 'Proc' + type_name.CamelCase() + method_name.CamelCase()
|
||||
|
||||
|
||||
def as_frontendType(typ):
|
||||
if typ.category == 'object':
|
||||
return typ.name.CamelCase() + 'Base*'
|
||||
|
@ -495,6 +575,7 @@ def as_frontendType(typ):
|
|||
else:
|
||||
return as_cType(typ.name)
|
||||
|
||||
|
||||
def as_wireType(typ):
|
||||
if typ.category == 'object':
|
||||
return typ.name.CamelCase() + '*'
|
||||
|
@ -503,31 +584,50 @@ def as_wireType(typ):
|
|||
else:
|
||||
return as_cppType(typ.name)
|
||||
|
||||
|
||||
def c_methods(types, typ):
|
||||
return typ.methods + [
|
||||
Method(Name('reference'), types['void'], []),
|
||||
Method(Name('release'), types['void'], []),
|
||||
]
|
||||
|
||||
|
||||
def get_c_methods_sorted_by_name(api_params):
|
||||
unsorted = [(as_MethodSuffix(typ.name, method.name), typ, method) \
|
||||
for typ in api_params['by_category']['object'] \
|
||||
for method in c_methods(api_params['types'], typ) ]
|
||||
return [(typ, method) for (_, typ, method) in sorted(unsorted)]
|
||||
|
||||
|
||||
def has_callback_arguments(method):
|
||||
return any(arg.type.category == 'callback' for arg in method.arguments)
|
||||
|
||||
|
||||
class MultiGeneratorFromDawnJSON(Generator):
|
||||
def get_description(self):
|
||||
return 'Generates code for various target from Dawn.json.'
|
||||
|
||||
def add_commandline_arguments(self, parser):
|
||||
allowed_targets = ['dawn_headers', 'dawncpp_headers', 'dawncpp', 'dawn_proc', 'mock_webgpu', 'dawn_wire', "dawn_native_utils"]
|
||||
allowed_targets = [
|
||||
'dawn_headers', 'dawncpp_headers', 'dawncpp', 'dawn_proc',
|
||||
'mock_webgpu', 'dawn_wire', "dawn_native_utils"
|
||||
]
|
||||
|
||||
parser.add_argument('--dawn-json', required=True, type=str, help ='The DAWN JSON definition to use.')
|
||||
parser.add_argument('--wire-json', default=None, type=str, help='The DAWN WIRE JSON definition to use.')
|
||||
parser.add_argument('--targets', required=True, type=str, help='Comma-separated subset of targets to output. Available targets: ' + ', '.join(allowed_targets))
|
||||
parser.add_argument('--dawn-json',
|
||||
required=True,
|
||||
type=str,
|
||||
help='The DAWN JSON definition to use.')
|
||||
parser.add_argument('--wire-json',
|
||||
default=None,
|
||||
type=str,
|
||||
help='The DAWN WIRE JSON definition to use.')
|
||||
parser.add_argument(
|
||||
'--targets',
|
||||
required=True,
|
||||
type=str,
|
||||
help=
|
||||
'Comma-separated subset of targets to output. Available targets: '
|
||||
+ ', '.join(allowed_targets))
|
||||
|
||||
def get_file_renders(self, args):
|
||||
with open(args.dawn_json) as f:
|
||||
|
@ -543,9 +643,10 @@ class MultiGeneratorFromDawnJSON(Generator):
|
|||
|
||||
base_params = {
|
||||
'Name': lambda name: Name(name),
|
||||
|
||||
'as_annotated_cType': lambda arg: annotated(as_cTypeEnumSpecialCase(arg.type), arg),
|
||||
'as_annotated_cppType': lambda arg: annotated(as_cppType(arg.type.name), arg),
|
||||
'as_annotated_cType': \
|
||||
lambda arg: annotated(as_cTypeEnumSpecialCase(arg.type), arg),
|
||||
'as_annotated_cppType': \
|
||||
lambda arg: annotated(as_cppType(arg.type.name), arg),
|
||||
'as_cEnum': as_cEnum,
|
||||
'as_cEnumDawn': as_cEnumDawn,
|
||||
'as_cppEnum': as_cppEnum,
|
||||
|
@ -562,78 +663,145 @@ class MultiGeneratorFromDawnJSON(Generator):
|
|||
'as_varName': as_varName,
|
||||
'decorate': decorate,
|
||||
'c_methods': lambda typ: c_methods(api_params['types'], typ),
|
||||
'c_methods_sorted_by_name': get_c_methods_sorted_by_name(api_params),
|
||||
'c_methods_sorted_by_name': \
|
||||
get_c_methods_sorted_by_name(api_params),
|
||||
}
|
||||
|
||||
renders = []
|
||||
|
||||
if 'dawn_headers' in targets:
|
||||
renders.append(FileRender('webgpu.h', 'src/include/dawn/webgpu.h', [base_params, api_params]))
|
||||
renders.append(FileRender('dawn_proc_table.h', 'src/include/dawn/dawn_proc_table.h', [base_params, api_params]))
|
||||
renders.append(
|
||||
FileRender('webgpu.h', 'src/include/dawn/webgpu.h',
|
||||
[base_params, api_params]))
|
||||
renders.append(
|
||||
FileRender('dawn_proc_table.h',
|
||||
'src/include/dawn/dawn_proc_table.h',
|
||||
[base_params, api_params]))
|
||||
|
||||
if 'dawncpp_headers' in targets:
|
||||
renders.append(FileRender('webgpu_cpp.h', 'src/include/dawn/webgpu_cpp.h', [base_params, api_params]))
|
||||
renders.append(
|
||||
FileRender('webgpu_cpp.h', 'src/include/dawn/webgpu_cpp.h',
|
||||
[base_params, api_params]))
|
||||
|
||||
if 'dawn_proc' in targets:
|
||||
renders.append(FileRender('dawn_proc.c', 'src/dawn/dawn_proc.c', [base_params, api_params]))
|
||||
renders.append(
|
||||
FileRender('dawn_proc.c', 'src/dawn/dawn_proc.c',
|
||||
[base_params, api_params]))
|
||||
|
||||
if 'dawncpp' in targets:
|
||||
renders.append(FileRender('webgpu_cpp.cpp', 'src/dawn/webgpu_cpp.cpp', [base_params, api_params]))
|
||||
renders.append(
|
||||
FileRender('webgpu_cpp.cpp', 'src/dawn/webgpu_cpp.cpp',
|
||||
[base_params, api_params]))
|
||||
|
||||
if 'emscripten_bits' in targets:
|
||||
renders.append(FileRender('webgpu_struct_info.json', 'src/dawn/webgpu_struct_info.json', [base_params, api_params]))
|
||||
renders.append(FileRender('library_webgpu_enum_tables.js', 'src/dawn/library_webgpu_enum_tables.js', [base_params, api_params]))
|
||||
renders.append(
|
||||
FileRender('webgpu_struct_info.json',
|
||||
'src/dawn/webgpu_struct_info.json',
|
||||
[base_params, api_params]))
|
||||
renders.append(
|
||||
FileRender('library_webgpu_enum_tables.js',
|
||||
'src/dawn/library_webgpu_enum_tables.js',
|
||||
[base_params, api_params]))
|
||||
|
||||
if 'mock_webgpu' in targets:
|
||||
mock_params = [
|
||||
base_params,
|
||||
api_params,
|
||||
{
|
||||
base_params, api_params, {
|
||||
'has_callback_arguments': has_callback_arguments
|
||||
}
|
||||
]
|
||||
renders.append(FileRender('mock_webgpu.h', 'src/dawn/mock_webgpu.h', mock_params))
|
||||
renders.append(FileRender('mock_webgpu.cpp', 'src/dawn/mock_webgpu.cpp', mock_params))
|
||||
renders.append(
|
||||
FileRender('mock_webgpu.h', 'src/dawn/mock_webgpu.h',
|
||||
mock_params))
|
||||
renders.append(
|
||||
FileRender('mock_webgpu.cpp', 'src/dawn/mock_webgpu.cpp',
|
||||
mock_params))
|
||||
|
||||
if 'dawn_native_utils' in targets:
|
||||
frontend_params = [
|
||||
base_params,
|
||||
api_params,
|
||||
{
|
||||
'as_frontendType': lambda typ: as_frontendType(typ), # TODO as_frontendType and friends take a Type and not a Name :(
|
||||
'as_annotated_frontendType': lambda arg: annotated(as_frontendType(arg.type), arg)
|
||||
# TODO: as_frontendType and co. take a Type, not a Name :(
|
||||
'as_frontendType': lambda typ: as_frontendType(typ),
|
||||
'as_annotated_frontendType': \
|
||||
lambda arg: annotated(as_frontendType(arg.type), arg),
|
||||
}
|
||||
]
|
||||
|
||||
renders.append(FileRender('dawn_native/ValidationUtils.h', 'src/dawn_native/ValidationUtils_autogen.h', frontend_params))
|
||||
renders.append(FileRender('dawn_native/ValidationUtils.cpp', 'src/dawn_native/ValidationUtils_autogen.cpp', frontend_params))
|
||||
renders.append(FileRender('dawn_native/wgpu_structs.h', 'src/dawn_native/wgpu_structs_autogen.h', frontend_params))
|
||||
renders.append(FileRender('dawn_native/wgpu_structs.cpp', 'src/dawn_native/wgpu_structs_autogen.cpp', frontend_params))
|
||||
renders.append(FileRender('dawn_native/ProcTable.cpp', 'src/dawn_native/ProcTable.cpp', frontend_params))
|
||||
renders.append(
|
||||
FileRender('dawn_native/ValidationUtils.h',
|
||||
'src/dawn_native/ValidationUtils_autogen.h',
|
||||
frontend_params))
|
||||
renders.append(
|
||||
FileRender('dawn_native/ValidationUtils.cpp',
|
||||
'src/dawn_native/ValidationUtils_autogen.cpp',
|
||||
frontend_params))
|
||||
renders.append(
|
||||
FileRender('dawn_native/wgpu_structs.h',
|
||||
'src/dawn_native/wgpu_structs_autogen.h',
|
||||
frontend_params))
|
||||
renders.append(
|
||||
FileRender('dawn_native/wgpu_structs.cpp',
|
||||
'src/dawn_native/wgpu_structs_autogen.cpp',
|
||||
frontend_params))
|
||||
renders.append(
|
||||
FileRender('dawn_native/ProcTable.cpp',
|
||||
'src/dawn_native/ProcTable.cpp', frontend_params))
|
||||
|
||||
if 'dawn_wire' in targets:
|
||||
additional_params = compute_wire_params(api_params, wire_json)
|
||||
|
||||
wire_params = [
|
||||
base_params,
|
||||
api_params,
|
||||
{
|
||||
base_params, api_params, {
|
||||
'as_wireType': as_wireType,
|
||||
'as_annotated_wireType': lambda arg: annotated(as_wireType(arg.type), arg),
|
||||
},
|
||||
additional_params
|
||||
'as_annotated_wireType': \
|
||||
lambda arg: annotated(as_wireType(arg.type), arg),
|
||||
}, additional_params
|
||||
]
|
||||
renders.append(FileRender('dawn_wire/WireCmd.h', 'src/dawn_wire/WireCmd_autogen.h', wire_params))
|
||||
renders.append(FileRender('dawn_wire/WireCmd.cpp', 'src/dawn_wire/WireCmd_autogen.cpp', wire_params))
|
||||
renders.append(FileRender('dawn_wire/client/ApiObjects.h', 'src/dawn_wire/client/ApiObjects_autogen.h', wire_params))
|
||||
renders.append(FileRender('dawn_wire/client/ApiProcs.cpp', 'src/dawn_wire/client/ApiProcs_autogen.cpp', wire_params))
|
||||
renders.append(FileRender('dawn_wire/client/ClientBase.h', 'src/dawn_wire/client/ClientBase_autogen.h', wire_params))
|
||||
renders.append(FileRender('dawn_wire/client/ClientHandlers.cpp', 'src/dawn_wire/client/ClientHandlers_autogen.cpp', wire_params))
|
||||
renders.append(FileRender('dawn_wire/client/ClientPrototypes.inc', 'src/dawn_wire/client/ClientPrototypes_autogen.inc', wire_params))
|
||||
renders.append(FileRender('dawn_wire/server/ServerBase.h', 'src/dawn_wire/server/ServerBase_autogen.h', wire_params))
|
||||
renders.append(FileRender('dawn_wire/server/ServerDoers.cpp', 'src/dawn_wire/server/ServerDoers_autogen.cpp', wire_params))
|
||||
renders.append(FileRender('dawn_wire/server/ServerHandlers.cpp', 'src/dawn_wire/server/ServerHandlers_autogen.cpp', wire_params))
|
||||
renders.append(FileRender('dawn_wire/server/ServerPrototypes.inc', 'src/dawn_wire/server/ServerPrototypes_autogen.inc', wire_params))
|
||||
renders.append(
|
||||
FileRender('dawn_wire/WireCmd.h',
|
||||
'src/dawn_wire/WireCmd_autogen.h', wire_params))
|
||||
renders.append(
|
||||
FileRender('dawn_wire/WireCmd.cpp',
|
||||
'src/dawn_wire/WireCmd_autogen.cpp', wire_params))
|
||||
renders.append(
|
||||
FileRender('dawn_wire/client/ApiObjects.h',
|
||||
'src/dawn_wire/client/ApiObjects_autogen.h',
|
||||
wire_params))
|
||||
renders.append(
|
||||
FileRender('dawn_wire/client/ApiProcs.cpp',
|
||||
'src/dawn_wire/client/ApiProcs_autogen.cpp',
|
||||
wire_params))
|
||||
renders.append(
|
||||
FileRender('dawn_wire/client/ClientBase.h',
|
||||
'src/dawn_wire/client/ClientBase_autogen.h',
|
||||
wire_params))
|
||||
renders.append(
|
||||
FileRender('dawn_wire/client/ClientHandlers.cpp',
|
||||
'src/dawn_wire/client/ClientHandlers_autogen.cpp',
|
||||
wire_params))
|
||||
renders.append(
|
||||
FileRender(
|
||||
'dawn_wire/client/ClientPrototypes.inc',
|
||||
'src/dawn_wire/client/ClientPrototypes_autogen.inc',
|
||||
wire_params))
|
||||
renders.append(
|
||||
FileRender('dawn_wire/server/ServerBase.h',
|
||||
'src/dawn_wire/server/ServerBase_autogen.h',
|
||||
wire_params))
|
||||
renders.append(
|
||||
FileRender('dawn_wire/server/ServerDoers.cpp',
|
||||
'src/dawn_wire/server/ServerDoers_autogen.cpp',
|
||||
wire_params))
|
||||
renders.append(
|
||||
FileRender('dawn_wire/server/ServerHandlers.cpp',
|
||||
'src/dawn_wire/server/ServerHandlers_autogen.cpp',
|
||||
wire_params))
|
||||
renders.append(
|
||||
FileRender(
|
||||
'dawn_wire/server/ServerPrototypes.inc',
|
||||
'src/dawn_wire/server/ServerPrototypes_autogen.inc',
|
||||
wire_params))
|
||||
|
||||
return renders
|
||||
|
||||
|
@ -643,5 +811,6 @@ class MultiGeneratorFromDawnJSON(Generator):
|
|||
deps += [os.path.abspath(args.wire_json)]
|
||||
return deps
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(run_generator(MultiGeneratorFromDawnJSON()))
|
||||
|
|
|
@ -135,9 +135,7 @@ template("generator_lib_action") {
|
|||
# outputs are what's expected and write a depfile for Ninja.
|
||||
action(_json_tarball_target) {
|
||||
script = invoker.script
|
||||
outputs = [
|
||||
_json_tarball,
|
||||
]
|
||||
outputs = [ _json_tarball ]
|
||||
depfile = _json_tarball_depfile
|
||||
args = _generator_args
|
||||
if (defined(invoker.deps)) {
|
||||
|
@ -153,12 +151,8 @@ template("generator_lib_action") {
|
|||
rebase_path(_gen_dir, root_build_dir),
|
||||
]
|
||||
|
||||
deps = [
|
||||
":${_json_tarball_target}",
|
||||
]
|
||||
inputs = [
|
||||
_json_tarball,
|
||||
]
|
||||
deps = [ ":${_json_tarball_target}" ]
|
||||
inputs = [ _json_tarball ]
|
||||
|
||||
# The expected output list is relative to the gen_dir but action
|
||||
# target outputs are from the root dir so we need to rebase them.
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Module to create generators that render multiple Jinja2 templates for GN.
|
||||
|
||||
A helper module that can be used to create generator scripts (clients)
|
||||
|
@ -54,6 +53,7 @@ from collections import namedtuple
|
|||
#
|
||||
FileRender = namedtuple('FileRender', ['template', 'output', 'params_dicts'])
|
||||
|
||||
|
||||
# The interface that must be implemented by generators.
|
||||
class Generator:
|
||||
def get_description(self):
|
||||
|
@ -72,6 +72,7 @@ class Generator:
|
|||
"""Return a list of extra input dependencies."""
|
||||
return []
|
||||
|
||||
|
||||
# Allow custom Jinja2 installation path through an additional python
|
||||
# path from the arguments if present. This isn't done through the regular
|
||||
# argparse because PreprocessingLoader uses jinja2 in the global scope before
|
||||
|
@ -92,6 +93,7 @@ except ValueError:
|
|||
|
||||
import jinja2
|
||||
|
||||
|
||||
# A custom Jinja2 template loader that removes the extra indentation
|
||||
# of the template blocks so that the output is correctly indented
|
||||
class _PreprocessingLoader(jinja2.BaseLoader):
|
||||
|
@ -113,18 +115,21 @@ class _PreprocessingLoader(jinja2.BaseLoader):
|
|||
def preprocess(self, source):
|
||||
lines = source.split('\n')
|
||||
|
||||
# Compute the current indentation level of the template blocks and remove their indentation
|
||||
# Compute the current indentation level of the template blocks and
|
||||
# remove their indentation
|
||||
result = []
|
||||
indentation_level = 0
|
||||
|
||||
# Filter lines that are pure comments. line_comment_prefix is not enough because it removes
|
||||
# the comment but doesn't completely remove the line, resulting in more verbose output.
|
||||
# Filter lines that are pure comments. line_comment_prefix is not
|
||||
# enough because it removes the comment but doesn't completely remove
|
||||
# the line, resulting in more verbose output.
|
||||
lines = filter(lambda line: not line.strip().startswith('//*'), lines)
|
||||
|
||||
# Remove indentation templates have for the Jinja control flow.
|
||||
for line in lines:
|
||||
# The capture in the regex adds one element per block start or end so we divide by two
|
||||
# there is also an extra line chunk corresponding to the line end, so we substract it.
|
||||
# The capture in the regex adds one element per block start or end,
|
||||
# so we divide by two. There is also an extra line chunk
|
||||
# corresponding to the line end, so we subtract it.
|
||||
numends = (len(self.blockend.split(line)) - 1) // 2
|
||||
indentation_level -= numends
|
||||
|
||||
|
@ -142,14 +147,19 @@ class _PreprocessingLoader(jinja2.BaseLoader):
|
|||
elif line.startswith('\t'):
|
||||
line = line[1:]
|
||||
else:
|
||||
assert(line.strip() == '')
|
||||
assert line.strip() == ''
|
||||
return line
|
||||
|
||||
|
||||
_FileOutput = namedtuple('FileOutput', ['name', 'content'])
|
||||
|
||||
|
||||
def _do_renders(renders, template_dir):
|
||||
loader = _PreprocessingLoader(template_dir)
|
||||
env = jinja2.Environment(loader=loader, lstrip_blocks=True, trim_blocks=True, line_comment_prefix='//*')
|
||||
env = jinja2.Environment(loader=loader,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True,
|
||||
line_comment_prefix='//*')
|
||||
|
||||
def do_assert(expr):
|
||||
assert expr
|
||||
|
@ -177,16 +187,17 @@ def _do_renders(renders, template_dir):
|
|||
|
||||
return outputs
|
||||
|
||||
|
||||
# Compute the list of imported, non-system Python modules.
|
||||
# It assumes that any path outside of the root directory is system.
|
||||
def _compute_python_dependencies(root_dir = None):
|
||||
def _compute_python_dependencies(root_dir=None):
|
||||
if not root_dir:
|
||||
# Assume this script is under generator/ by default.
|
||||
root_dir = os.path.join(os.path.dirname(__file__), os.pardir)
|
||||
root_dir = os.path.abspath(root_dir)
|
||||
|
||||
module_paths = (module.__file__ for module in sys.modules.values()
|
||||
if module and hasattr(module, '__file__'))
|
||||
if module and hasattr(module, '__file__'))
|
||||
|
||||
paths = set()
|
||||
for path in module_paths:
|
||||
|
@ -203,37 +214,85 @@ def _compute_python_dependencies(root_dir = None):
|
|||
|
||||
return paths
|
||||
|
||||
|
||||
def run_generator(generator):
|
||||
parser = argparse.ArgumentParser(
|
||||
description = generator.get_description(),
|
||||
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
|
||||
description=generator.get_description(),
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||
)
|
||||
|
||||
generator.add_commandline_arguments(parser);
|
||||
parser.add_argument('--template-dir', default='templates', type=str, help='Directory with template files.')
|
||||
parser.add_argument(kJinja2Path, default=None, type=str, help='Additional python path to set before loading Jinja2')
|
||||
parser.add_argument('--output-json-tarball', default=None, type=str, help='Name of the "JSON tarball" to create (tar is too annoying to use in python).')
|
||||
parser.add_argument('--depfile', default=None, type=str, help='Name of the Ninja depfile to create for the JSON tarball')
|
||||
parser.add_argument('--expected-outputs-file', default=None, type=str, help="File to compare outputs with and fail if it doesn't match")
|
||||
parser.add_argument('--root-dir', default=None, type=str, help='Optional source root directory for Python dependency computations')
|
||||
parser.add_argument('--allowed-output-dirs-file', default=None, type=str, help="File containing a list of allowed directories where files can be output.")
|
||||
parser.add_argument('--print-cmake-dependencies', default=False, action="store_true", help="Prints a semi-colon separated list of dependencies to stdout and exits.")
|
||||
parser.add_argument('--print-cmake-outputs', default=False, action="store_true", help="Prints a semi-colon separated list of outputs to stdout and exits.")
|
||||
parser.add_argument('--output-dir', default=None, type=str, help='Directory where to output generate files.')
|
||||
generator.add_commandline_arguments(parser)
|
||||
parser.add_argument('--template-dir',
|
||||
default='templates',
|
||||
type=str,
|
||||
help='Directory with template files.')
|
||||
parser.add_argument(
|
||||
kJinja2Path,
|
||||
default=None,
|
||||
type=str,
|
||||
help='Additional python path to set before loading Jinja2')
|
||||
parser.add_argument(
|
||||
'--output-json-tarball',
|
||||
default=None,
|
||||
type=str,
|
||||
help=('Name of the "JSON tarball" to create (tar is too annoying '
|
||||
'to use in python).'))
|
||||
parser.add_argument(
|
||||
'--depfile',
|
||||
default=None,
|
||||
type=str,
|
||||
help='Name of the Ninja depfile to create for the JSON tarball')
|
||||
parser.add_argument(
|
||||
'--expected-outputs-file',
|
||||
default=None,
|
||||
type=str,
|
||||
help="File to compare outputs with and fail if it doesn't match")
|
||||
parser.add_argument(
|
||||
'--root-dir',
|
||||
default=None,
|
||||
type=str,
|
||||
help=('Optional source root directory for Python dependency '
|
||||
'computations'))
|
||||
parser.add_argument(
|
||||
'--allowed-output-dirs-file',
|
||||
default=None,
|
||||
type=str,
|
||||
help=("File containing a list of allowed directories where files "
|
||||
"can be output."))
|
||||
parser.add_argument(
|
||||
'--print-cmake-dependencies',
|
||||
default=False,
|
||||
action="store_true",
|
||||
help=("Prints a semi-colon separated list of dependencies to "
|
||||
"stdout and exits."))
|
||||
parser.add_argument(
|
||||
'--print-cmake-outputs',
|
||||
default=False,
|
||||
action="store_true",
|
||||
help=("Prints a semi-colon separated list of outputs to "
|
||||
"stdout and exits."))
|
||||
parser.add_argument('--output-dir',
|
||||
default=None,
|
||||
type=str,
|
||||
help='Directory where to output generate files.')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
renders = generator.get_file_renders(args);
|
||||
renders = generator.get_file_renders(args)
|
||||
|
||||
# Output a list of all dependencies for CMake or the tarball for GN/Ninja.
|
||||
if args.depfile != None or args.print_cmake_dependencies:
|
||||
dependencies = generator.get_dependencies(args)
|
||||
dependencies += [args.template_dir + os.path.sep + render.template for render in renders]
|
||||
dependencies += [
|
||||
args.template_dir + os.path.sep + render.template
|
||||
for render in renders
|
||||
]
|
||||
dependencies += _compute_python_dependencies(args.root_dir)
|
||||
|
||||
if args.depfile != None:
|
||||
with open(args.depfile, 'w') as f:
|
||||
f.write(args.output_json_tarball + ": " + " ".join(dependencies))
|
||||
f.write(args.output_json_tarball + ": " +
|
||||
" ".join(dependencies))
|
||||
|
||||
if args.print_cmake_dependencies:
|
||||
sys.stdout.write(";".join(dependencies))
|
||||
|
@ -248,33 +307,42 @@ def run_generator(generator):
|
|||
actual = {render.output for render in renders}
|
||||
|
||||
if actual != expected:
|
||||
print("Wrong expected outputs, caller expected:\n " + repr(sorted(expected)))
|
||||
print("Wrong expected outputs, caller expected:\n " +
|
||||
repr(sorted(expected)))
|
||||
print("Actual output:\n " + repr(sorted(actual)))
|
||||
return 1
|
||||
|
||||
# Print the list of all the outputs for cmake.
|
||||
if args.print_cmake_outputs:
|
||||
sys.stdout.write(";".join([os.path.join(args.output_dir, render.output) for render in renders]))
|
||||
sys.stdout.write(";".join([
|
||||
os.path.join(args.output_dir, render.output) for render in renders
|
||||
]))
|
||||
return 0
|
||||
|
||||
outputs = _do_renders(renders, args.template_dir)
|
||||
|
||||
# The caller wants to assert that the outputs are only in specific directories.
|
||||
# The caller wants to assert that the outputs are only in specific
|
||||
# directories.
|
||||
if args.allowed_output_dirs_file != None:
|
||||
with open(args.allowed_output_dirs_file) as f:
|
||||
allowed_dirs = set([line.strip() for line in f.readlines()])
|
||||
|
||||
for directory in allowed_dirs:
|
||||
if not directory.endswith('/'):
|
||||
print('Allowed directory entry "{}" doesn\'t end with /'.format(directory))
|
||||
print('Allowed directory entry "{}" doesn\'t '
|
||||
'end with /'.format(directory))
|
||||
return 1
|
||||
|
||||
def check_in_subdirectory(path, directory):
|
||||
return path.startswith(directory) and not '/' in path[len(directory):]
|
||||
return path.startswith(
|
||||
directory) and not '/' in path[len(directory):]
|
||||
|
||||
for render in renders:
|
||||
if not any(check_in_subdirectory(render.output, directory) for directory in allowed_dirs):
|
||||
print('Output file "{}" is not in the allowed directory list below:'.format(render.output))
|
||||
if not any(
|
||||
check_in_subdirectory(render.output, directory)
|
||||
for directory in allowed_dirs):
|
||||
print('Output file "{}" is not in the allowed directory '
|
||||
'list below:'.format(render.output))
|
||||
for directory in sorted(allowed_dirs):
|
||||
print(' "{}"'.format(directory))
|
||||
return 1
|
||||
|
|
|
@ -19,9 +19,10 @@ import xml.etree.ElementTree as etree
|
|||
|
||||
from generator_lib import Generator, run_generator, FileRender
|
||||
|
||||
|
||||
class ProcName:
|
||||
def __init__(self, gl_name, proc_name=None):
|
||||
assert(gl_name.startswith('gl'))
|
||||
assert gl_name.startswith('gl')
|
||||
if proc_name == None:
|
||||
proc_name = gl_name[2:]
|
||||
|
||||
|
@ -40,7 +41,10 @@ class ProcName:
|
|||
def __repr__(self):
|
||||
return 'Proc("{}", "{}")'.format(self.gl_name, self.proc_name)
|
||||
|
||||
|
||||
ProcParam = namedtuple('ProcParam', ['name', 'type'])
|
||||
|
||||
|
||||
class Proc:
|
||||
def __init__(self, element):
|
||||
# Type declaration for return values and arguments all have the same
|
||||
|
@ -72,7 +76,9 @@ class Proc:
|
|||
|
||||
self.params = []
|
||||
for param in element.findall('./param'):
|
||||
self.params.append(ProcParam(param.find('name').text, parse_type_declaration(param)))
|
||||
self.params.append(
|
||||
ProcParam(
|
||||
param.find('name').text, parse_type_declaration(param)))
|
||||
|
||||
self.gl_name = proto.find('name').text
|
||||
self.alias = None
|
||||
|
@ -83,7 +89,7 @@ class Proc:
|
|||
return self.gl_name
|
||||
|
||||
def ProcName(self):
|
||||
assert(self.gl_name.startswith('gl'))
|
||||
assert self.gl_name.startswith('gl')
|
||||
return self.gl_name[2:]
|
||||
|
||||
def PFNGLPROCNAME(self):
|
||||
|
@ -92,11 +98,14 @@ class Proc:
|
|||
def __repr__(self):
|
||||
return 'Proc("{}")'.format(self.gl_name)
|
||||
|
||||
|
||||
EnumDefine = namedtuple('EnumDefine', ['name', 'value'])
|
||||
Version = namedtuple('Version', ['major', 'minor'])
|
||||
VersionBlock = namedtuple('VersionBlock', ['version', 'procs', 'enums'])
|
||||
HeaderBlock = namedtuple('HeaderBlock', ['description', 'procs', 'enums'])
|
||||
ExtensionBlock = namedtuple('ExtensionBlock', ['extension', 'procs', 'enums', 'supported_specs'])
|
||||
ExtensionBlock = namedtuple('ExtensionBlock',
|
||||
['extension', 'procs', 'enums', 'supported_specs'])
|
||||
|
||||
|
||||
def parse_version(version):
|
||||
return Version(*map(int, version.split('.')))
|
||||
|
@ -107,7 +116,7 @@ def compute_params(root, supported_extensions):
|
|||
all_procs = {}
|
||||
for command in root.findall('''commands[@namespace='GL']/command'''):
|
||||
proc = Proc(command)
|
||||
assert(proc.gl_name not in all_procs)
|
||||
assert proc.gl_name not in all_procs
|
||||
all_procs[proc.gl_name] = proc
|
||||
|
||||
all_enums = {}
|
||||
|
@ -117,7 +126,7 @@ def compute_params(root, supported_extensions):
|
|||
if enum_name == 'GL_ACTIVE_PROGRAM_EXT':
|
||||
continue
|
||||
|
||||
assert(enum_name not in all_enums)
|
||||
assert enum_name not in all_enums
|
||||
all_enums[enum_name] = EnumDefine(enum_name, enum.attrib['value'])
|
||||
|
||||
# Get the list of all Desktop OpenGL function removed by the Core Profile.
|
||||
|
@ -126,13 +135,13 @@ def compute_params(root, supported_extensions):
|
|||
core_removed_procs.add(proc.attrib['name'])
|
||||
|
||||
# Get list of enums and procs per OpenGL ES/Desktop OpenGL version
|
||||
def parse_version_blocks(api, removed_procs = set()):
|
||||
def parse_version_blocks(api, removed_procs=set()):
|
||||
blocks = []
|
||||
for section in root.findall('''feature[@api='{}']'''.format(api)):
|
||||
section_procs = []
|
||||
for command in section.findall('./require/command'):
|
||||
proc_name = command.attrib['name']
|
||||
assert(all_procs[proc_name].alias == None)
|
||||
assert all_procs[proc_name].alias == None
|
||||
if proc_name not in removed_procs:
|
||||
section_procs.append(all_procs[proc_name])
|
||||
|
||||
|
@ -140,7 +149,9 @@ def compute_params(root, supported_extensions):
|
|||
for enum in section.findall('./require/enum'):
|
||||
section_enums.append(all_enums[enum.attrib['name']])
|
||||
|
||||
blocks.append(VersionBlock(parse_version(section.attrib['number']), section_procs, section_enums))
|
||||
blocks.append(
|
||||
VersionBlock(parse_version(section.attrib['number']),
|
||||
section_procs, section_enums))
|
||||
|
||||
return blocks
|
||||
|
||||
|
@ -148,12 +159,13 @@ def compute_params(root, supported_extensions):
|
|||
desktop_gl_blocks = parse_version_blocks('gl', core_removed_procs)
|
||||
|
||||
def parse_extension_block(extension):
|
||||
section = root.find('''extensions/extension[@name='{}']'''.format(extension))
|
||||
section = root.find(
|
||||
'''extensions/extension[@name='{}']'''.format(extension))
|
||||
supported_specs = section.attrib['supported'].split('|')
|
||||
section_procs = []
|
||||
for command in section.findall('./require/command'):
|
||||
proc_name = command.attrib['name']
|
||||
assert(all_procs[proc_name].alias == None)
|
||||
assert all_procs[proc_name].alias == None
|
||||
if proc_name not in removed_procs:
|
||||
section_procs.append(all_procs[proc_name])
|
||||
|
||||
|
@ -161,10 +173,11 @@ def compute_params(root, supported_extensions):
|
|||
for enum in section.findall('./require/enum'):
|
||||
section_enums.append(all_enums[enum.attrib['name']])
|
||||
|
||||
return ExtensionBlock(extension, section_procs, section_enums, supported_specs)
|
||||
return ExtensionBlock(extension, section_procs, section_enums,
|
||||
supported_specs)
|
||||
|
||||
extension_desktop_gl_blocks = [];
|
||||
extension_gles_blocks = [];
|
||||
extension_desktop_gl_blocks = []
|
||||
extension_gles_blocks = []
|
||||
for extension in supported_extensions:
|
||||
extension_block = parse_extension_block(extension)
|
||||
if 'gl' in extension_block.supported_specs:
|
||||
|
@ -176,6 +189,7 @@ def compute_params(root, supported_extensions):
|
|||
already_added_header_procs = set()
|
||||
already_added_header_enums = set()
|
||||
header_blocks = []
|
||||
|
||||
def add_header_block(description, block):
|
||||
block_procs = []
|
||||
for proc in block.procs:
|
||||
|
@ -190,13 +204,18 @@ def compute_params(root, supported_extensions):
|
|||
block_enums.append(enum)
|
||||
|
||||
if len(block_procs) > 0 or len(block_enums) > 0:
|
||||
header_blocks.append(HeaderBlock(description, block_procs, block_enums))
|
||||
header_blocks.append(
|
||||
HeaderBlock(description, block_procs, block_enums))
|
||||
|
||||
for block in gles_blocks:
|
||||
add_header_block('OpenGL ES {}.{}'.format(block.version.major, block.version.minor), block)
|
||||
add_header_block(
|
||||
'OpenGL ES {}.{}'.format(block.version.major, block.version.minor),
|
||||
block)
|
||||
|
||||
for block in desktop_gl_blocks:
|
||||
add_header_block('Desktop OpenGL {}.{}'.format(block.version.major, block.version.minor), block)
|
||||
add_header_block(
|
||||
'Desktop OpenGL {}.{}'.format(block.version.major,
|
||||
block.version.minor), block)
|
||||
|
||||
for block in extension_desktop_gl_blocks:
|
||||
add_header_block(block.extension, block)
|
||||
|
@ -212,30 +231,50 @@ def compute_params(root, supported_extensions):
|
|||
'header_blocks': header_blocks,
|
||||
}
|
||||
|
||||
|
||||
class OpenGLLoaderGenerator(Generator):
|
||||
def get_description(self):
|
||||
return 'Generates code to load OpenGL function pointers'
|
||||
|
||||
def add_commandline_arguments(self, parser):
|
||||
parser.add_argument('--gl-xml', required=True, type=str, help='The Khronos gl.xml to use.')
|
||||
parser.add_argument('--supported-extensions', required=True, type=str, help ='The JSON file that defines the OpenGL and GLES extensions to use.')
|
||||
parser.add_argument('--gl-xml',
|
||||
required=True,
|
||||
type=str,
|
||||
help='The Khronos gl.xml to use.')
|
||||
parser.add_argument(
|
||||
'--supported-extensions',
|
||||
required=True,
|
||||
type=str,
|
||||
help=
|
||||
'The JSON file that defines the OpenGL and GLES extensions to use.'
|
||||
)
|
||||
|
||||
def get_file_renders(self, args):
|
||||
supported_extensions = []
|
||||
with open(args.supported_extensions) as f:
|
||||
supported_extensions_json = json.loads(f.read())
|
||||
supported_extensions = supported_extensions_json['supported_extensions']
|
||||
supported_extensions = supported_extensions_json[
|
||||
'supported_extensions']
|
||||
|
||||
params = compute_params(etree.parse(args.gl_xml).getroot(), supported_extensions)
|
||||
params = compute_params(
|
||||
etree.parse(args.gl_xml).getroot(), supported_extensions)
|
||||
|
||||
return [
|
||||
FileRender('opengl/OpenGLFunctionsBase.cpp', 'src/dawn_native/opengl/OpenGLFunctionsBase_autogen.cpp', [params]),
|
||||
FileRender('opengl/OpenGLFunctionsBase.h', 'src/dawn_native/opengl/OpenGLFunctionsBase_autogen.h', [params]),
|
||||
FileRender('opengl/opengl_platform.h', 'src/dawn_native/opengl/opengl_platform_autogen.h', [params]),
|
||||
FileRender(
|
||||
'opengl/OpenGLFunctionsBase.cpp',
|
||||
'src/dawn_native/opengl/OpenGLFunctionsBase_autogen.cpp',
|
||||
[params]),
|
||||
FileRender('opengl/OpenGLFunctionsBase.h',
|
||||
'src/dawn_native/opengl/OpenGLFunctionsBase_autogen.h',
|
||||
[params]),
|
||||
FileRender('opengl/opengl_platform.h',
|
||||
'src/dawn_native/opengl/opengl_platform_autogen.h',
|
||||
[params]),
|
||||
]
|
||||
|
||||
def get_dependencies(self, args):
|
||||
return [os.path.abspath(args.gl_xml)]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(run_generator(OpenGLLoaderGenerator()))
|
||||
|
|
|
@ -15,11 +15,15 @@
|
|||
|
||||
import argparse, glob, os, sys
|
||||
|
||||
|
||||
def check_in_subdirectory(path, directory):
|
||||
return path.startswith(directory) and not '/' in path[len(directory):]
|
||||
|
||||
|
||||
def check_is_allowed(path, allowed_dirs):
|
||||
return any(check_in_subdirectory(path, directory) for directory in allowed_dirs)
|
||||
return any(
|
||||
check_in_subdirectory(path, directory) for directory in allowed_dirs)
|
||||
|
||||
|
||||
def get_all_files_in_dir(find_directory):
|
||||
result = []
|
||||
|
@ -27,15 +31,28 @@ def get_all_files_in_dir(find_directory):
|
|||
result += [os.path.join(directory, filename) for filename in files]
|
||||
return result
|
||||
|
||||
|
||||
def run():
|
||||
# Parse command line arguments
|
||||
parser = argparse.ArgumentParser(
|
||||
description = "Removes stale autogenerated files from gen/ directories."
|
||||
description="Removes stale autogenerated files from gen/ directories.")
|
||||
parser.add_argument(
|
||||
'--root-dir',
|
||||
type=str,
|
||||
help='The root directory, all other paths in files are relative to it.'
|
||||
)
|
||||
parser.add_argument('--root-dir', type=str, help='The root directory, all other paths in files are relative to it.')
|
||||
parser.add_argument('--allowed-output-dirs-file', type=str, help='The file containing a list of allowed directories')
|
||||
parser.add_argument('--stale-dirs-file', type=str, help='The file containing a list of directories to check for stale files')
|
||||
parser.add_argument('--stamp', type=str, help='A stamp written once this script completes')
|
||||
parser.add_argument(
|
||||
'--allowed-output-dirs-file',
|
||||
type=str,
|
||||
help='The file containing a list of allowed directories')
|
||||
parser.add_argument(
|
||||
'--stale-dirs-file',
|
||||
type=str,
|
||||
help=
|
||||
'The file containing a list of directories to check for stale files')
|
||||
parser.add_argument('--stamp',
|
||||
type=str,
|
||||
help='A stamp written once this script completes')
|
||||
args = parser.parse_args()
|
||||
|
||||
root_dir = args.root_dir
|
||||
|
@ -43,11 +60,13 @@ def run():
|
|||
|
||||
# Load the list of allowed and stale directories
|
||||
with open(args.allowed_output_dirs_file) as f:
|
||||
allowed_dirs = set([os.path.join(root_dir, line.strip()) for line in f.readlines()])
|
||||
allowed_dirs = set(
|
||||
[os.path.join(root_dir, line.strip()) for line in f.readlines()])
|
||||
|
||||
for directory in allowed_dirs:
|
||||
if not directory.endswith('/'):
|
||||
print('Allowed directory entry "{}" doesn\'t end with /'.format(directory))
|
||||
print('Allowed directory entry "{}" doesn\'t end with /'.format(
|
||||
directory))
|
||||
return 1
|
||||
|
||||
with open(args.stale_dirs_file) as f:
|
||||
|
@ -67,5 +86,6 @@ def run():
|
|||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(run())
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
DisableFormat: true
|
||||
SortIncludes: false
|
|
@ -14,8 +14,10 @@
|
|||
|
||||
|
||||
def CheckChangeOnUpload(input_api, output_api):
|
||||
return input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api)
|
||||
return input_api.canned_checks.CheckChangedLUCIConfigs(
|
||||
input_api, output_api)
|
||||
|
||||
|
||||
def CheckChangeOnCommit(input_api, output_api):
|
||||
return input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api)
|
||||
return input_api.canned_checks.CheckChangedLUCIConfigs(
|
||||
input_api, output_api)
|
||||
|
|
|
@ -22,7 +22,8 @@ import sys
|
|||
import os
|
||||
import re
|
||||
|
||||
base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
|
||||
base_path = os.path.abspath(
|
||||
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
|
||||
|
||||
# Look for a [Rr]elease build.
|
||||
perftests_paths = glob.glob('out/*elease*')
|
||||
|
@ -35,6 +36,7 @@ if sys.platform == 'win32':
|
|||
|
||||
scores = []
|
||||
|
||||
|
||||
def mean(data):
|
||||
"""Return the sample arithmetic mean of data."""
|
||||
n = len(data)
|
||||
|
@ -102,6 +104,7 @@ if len(sys.argv) >= 2:
|
|||
print('Using test executable: ' + perftests_path)
|
||||
print('Test name: ' + test_name)
|
||||
|
||||
|
||||
def get_results(metric, extra_args=[]):
|
||||
process = subprocess.Popen(
|
||||
[perftests_path, '--gtest_filter=' + test_name] + extra_args,
|
||||
|
@ -139,11 +142,14 @@ for experiment in range(max_experiments):
|
|||
|
||||
if (len(scores) > 1):
|
||||
sys.stdout.write(", mean: %.2f" % mean(scores))
|
||||
sys.stdout.write(", variation: %.2f%%" % (coefficient_of_variation(scores) * 100.0))
|
||||
sys.stdout.write(", variation: %.2f%%" %
|
||||
(coefficient_of_variation(scores) * 100.0))
|
||||
|
||||
if (len(scores) > 7):
|
||||
truncation_n = len(scores) >> 3
|
||||
sys.stdout.write(", truncated mean: %.2f" % truncated_mean(scores, truncation_n))
|
||||
sys.stdout.write(", variation: %.2f%%" % (truncated_cov(scores, truncation_n) * 100.0))
|
||||
sys.stdout.write(", truncated mean: %.2f" %
|
||||
truncated_mean(scores, truncation_n))
|
||||
sys.stdout.write(", variation: %.2f%%" %
|
||||
(truncated_cov(scores, truncation_n) * 100.0))
|
||||
|
||||
print("")
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
DisableFormat: true
|
||||
SortIncludes: false
|
|
@ -41,9 +41,7 @@ config("khronos_headers_public") {
|
|||
}
|
||||
|
||||
source_set("khronos_platform") {
|
||||
sources = [
|
||||
"KHR/khrplatform.h",
|
||||
]
|
||||
sources = [ "KHR/khrplatform.h" ]
|
||||
|
||||
public_configs = [ ":khronos_headers_public" ]
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue