Add negative number parsing into `@test_value`

This CL updates the intrinsics lexer to allow negative values for int
and float numerics. This allows doing `@test_value(-2)` in the def file.

Change-Id: I2cad9b25a2932057ce9bc51dec6c32231e06f0a0
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/107440
Commit-Queue: Dan Sinclair <dsinclair@chromium.org>
Reviewed-by: Ben Clayton <bclayton@google.com>
Auto-Submit: Dan Sinclair <dsinclair@chromium.org>
Kokoro: Kokoro <noreply+kokoro@google.com>
This commit is contained in:
dan sinclair 2022-10-27 18:44:50 +00:00 committed by Dawn LUCI CQ
parent 91ed6f7289
commit c3cbc35650
15 changed files with 162 additions and 94 deletions

View File

@ -405,9 +405,12 @@ type generator struct {
// eval executes the sub-template with the given name and arguments, returning
// the generated output
// args can be a single argument:
// arg[0]
//
// arg[0]
//
// or a list of name-value pairs:
// (args[0]: name, args[1]: value), (args[2]: name, args[3]: value)...
//
// (args[0]: name, args[1]: value), (args[2]: name, args[3]: value)...
func (g *generator) eval(template string, args ...interface{}) (string, error) {
target := g.t.Lookup(template)
if target == nil {

View File

@ -20,8 +20,9 @@
// and 'global-scope' or 'function-scope' HTML class types.
//
// To run:
// go get golang.org/x/net/html # Only required once
// go run tools/check-spec-examples/main.go --compiler=<path-to-tint>
//
// go get golang.org/x/net/html # Only required once
// go run tools/check-spec-examples/main.go --compiler=<path-to-tint>
package main
import (

View File

@ -37,8 +37,9 @@ func reEscape(s string) string {
// UpdateCTSHashInDeps replaces the CTS hashes in 'deps' with 'newCTSHash'.
// Returns:
// newDEPS - the new DEPS content
// oldCTSHash - the old CTS hash in the 'deps'
//
// newDEPS - the new DEPS content
// oldCTSHash - the old CTS hash in the 'deps'
func UpdateCTSHashInDeps(deps, newCTSHash string) (newDEPS, oldCTSHash string, err error) {
// Collect old CTS hashes, and replace these with newCTSHash
b := strings.Builder{}

View File

@ -246,8 +246,10 @@ func parseSection(in string) ([]int, error) {
}
// concatRules concatenate rules slice to make two string outputs;
// txt, a human-readable string
// tsv, a tab separated string
//
// txt, a human-readable string
// tsv, a tab separated string
//
// If testNameFilter is a non-empty string, then only rules whose TestName
// contains the string are included
func concatRules(rules []rule, testNameFilter string) (string, string) {
@ -679,9 +681,10 @@ var (
// example in:
// ` float abs:
// T is f32 or vecN<f32>
// abs(e: T ) -> T
// Returns the absolute value of e (e.g. e with a positive sign bit). Component-wise when T is a vector.
// (GLSLstd450Fabs)`
//
// abs(e: T ) -> T
// Returns the absolute value of e (e.g. e with a positive sign bit). Component-wise when T is a vector.
// (GLSLstd450Fabs)`
//
// example out:
// `float abs:
@ -703,10 +706,11 @@ var (
// cleanUpStartEnd creates a string by removing all extra spaces,
// newlines and tabs form the start and end of the input string.
// Example:
// input: "\s\t\nHello\s\n\t\Bye\s\s\s\t\n\n\n"
// output: "Hello\s\n\tBye"
// input2: "\nbye\n\n"
// output2: "\nbye"
//
// input: "\s\t\nHello\s\n\t\Bye\s\s\s\t\n\n\n"
// output: "Hello\s\n\tBye"
// input2: "\nbye\n\n"
// output2: "\nbye"
func cleanUpStartEnd(in string) string {
out := reCleanUpStartEnd.ReplaceAllString(in, "")
return out
@ -721,13 +725,17 @@ var (
// testName creates a test name given a rule id (ie. section name), description and section
// returns for a builtin rule:
// testName:${section name} + "," + ${builtin name}
// builtinName: ${builtin name}
// err: nil
//
// testName:${section name} + "," + ${builtin name}
// builtinName: ${builtin name}
// err: nil
//
// returns for a other rules:
// testName: ${section name} + "_rule_ + " + ${string(counter)}
// builtinName: ""
// err: nil
//
// testName: ${section name} + "_rule_ + " + ${string(counter)}
// builtinName: ""
// err: nil
//
// if it cannot create a unique name it returns "", "", err.
func testName(id string, desc string, section string) (testName, builtinName string, err error) {
// regex for every thing other than letters and numbers
@ -838,9 +846,10 @@ func getUnimplementedTestPlan(p Parser, path string) error {
// getTestPlanFilePath returns a sort friendly path
// example: if we have 10 sections, and generate filenames naively, this will be the sorted result:
// section1.spec.ts -> section10.spec.ts -> section2.spec.ts -> ...
//
// section1.spec.ts -> section10.spec.ts -> section2.spec.ts -> ...
// if we make all the section numbers have the same number of digits, we will get:
// section01.spec.ts -> section02.spec.ts -> ... -> section10.spec.ts
// section01.spec.ts -> section02.spec.ts -> ... -> section10.spec.ts
func getTestPlanFilePath(path string, x, y, digits int) (string, error) {
fileName := ""
if y != -1 {

View File

@ -39,8 +39,8 @@ type Content struct {
// A chunk ends at the first blank line, or at the transition from an
// expectation to a line-comment.
type Chunk struct {
Comments []string // Line comments at the top of the chunk
Expectations Expectations // Expectations for the chunk
Comments []string // Line comments at the top of the chunk
Expectations Expectations // Expectations for the chunk
}
// Tags holds the tag information parsed in the comments between the
@ -234,9 +234,11 @@ func (e Expectation) Clone() Expectation {
}
// Compare compares the relative order of a and b, returning:
// -1 if a should come before b
// 1 if a should come after b
// 0 if a and b are identical
//
// -1 if a should come before b
// 1 if a should come after b
// 0 if a and b are identical
//
// Note: Only comparing bug, query, and tags (in that order).
func (a Expectation) Compare(b Expectation) int {
switch strings.Compare(a.Bug, b.Bug) {

View File

@ -29,12 +29,12 @@ import (
// results.
//
// Update will:
// Remove any expectation lines that have a query where no results match.
// Remove expectations lines that are in a chunk which is not annotated with
// 'KEEP', and all test results have the status 'Pass'.
// Remove chunks that have had all expectation lines removed.
// Appends new chunks for flaky and failing tests which are not covered by
// existing expectation lines.
// - Remove any expectation lines that have a query where no results match.
// - Remove expectations lines that are in a chunk which is not annotated with
// 'KEEP', and all test results have the status 'Pass'.
// - Remove chunks that have had all expectation lines removed.
// - Appends new chunks for flaky and failing tests which are not covered by
// existing expectation lines.
//
// Update returns a list of diagnostics for things that should be addressed.
//
@ -93,8 +93,8 @@ type updater struct {
}
// Returns 'results' with additional 'consumed' results for tests that have
// 'Skip' expectations. This fills in gaps for results, preventing tree
// reductions from marking skipped results as failure, which could result in
// 'Skip' expectations. This fills in gaps for results, preventing tree
// reductions from marking skipped results as failure, which could result in
// expectation collisions.
func (c *Content) appendConsumedResultsForSkippedTests(results result.List,
testlist []query.Query,
@ -539,10 +539,10 @@ func (u *updater) resultsToExpectations(results result.List, bug, comment string
}
// cleanupTags returns a copy of the provided results with:
// All tags not found in the expectations list removed
// All but the highest priority tag for any tag-set.
// The tag sets are defined by the `BEGIN TAG HEADER` / `END TAG HEADER`
// section at the top of the expectations file.
// - All tags not found in the expectations list removed
// - All but the highest priority tag for any tag-set.
// The tag sets are defined by the `BEGIN TAG HEADER` / `END TAG HEADER`
// section at the top of the expectations file.
func (u *updater) cleanupTags(results result.List) result.List {
return results.TransformTags(func(t result.Tags) result.Tags {
type HighestPrioritySetTag struct {
@ -570,11 +570,11 @@ func (u *updater) cleanupTags(results result.List) result.List {
// treeReducer is a function that can be used by StatusTree.Reduce() to reduce
// tree nodes with the same status.
// treeReducer will collapse trees nodes if any of the following are true:
// All child nodes have the same status
// More than 75% of the child nodes have a non-pass status, and none of the
// children are consumed.
// There are more than 20 child nodes with a non-pass status, and none of the
// children are consumed.
// - All child nodes have the same status
// - More than 75% of the child nodes have a non-pass status, and none of the
// children are consumed.
// - There are more than 20 child nodes with a non-pass status, and none of the
// children are consumed.
func treeReducer(statuses []result.Status) *result.Status {
counts := map[result.Status]int{}
for _, s := range statuses {

View File

@ -37,15 +37,16 @@ import (
// Query represents a WebGPU test query
// Example queries:
// 'suite'
// 'suite:*'
// 'suite:file'
// 'suite:file,*'
// 'suite:file,file'
// 'suite:file,file,*'
// 'suite:file,file,file:test'
// 'suite:file,file,file:test:*'
// 'suite:file,file,file:test,test:case;*'
//
// 'suite'
// 'suite:*'
// 'suite:file'
// 'suite:file,*'
// 'suite:file,file'
// 'suite:file,file,*'
// 'suite:file,file,file:test'
// 'suite:file,file,file:test:*'
// 'suite:file,file,file:test,test:case;*'
type Query struct {
Suite string
Files string
@ -269,9 +270,10 @@ func (q Query) String() string {
}
// Compare compares the relative order of q and o, returning:
// -1 if q should come before o
// 1 if q should come after o
// 0 if q and o are identical
//
// -1 if q should come before o
// 1 if q should come after o
// 0 if q and o are identical
func (q Query) Compare(o Query) int {
for _, cmp := range []struct{ a, b string }{
{q.Suite, o.Suite},
@ -335,9 +337,10 @@ func (q Query) Contains(o Query) bool {
}
// Callback function for Query.Walk()
// q is the query for the current segment.
// t is the target of the query q.
// n is the name of the new segment.
//
// q is the query for the current segment.
// t is the target of the query q.
// n is the name of the new segment.
type WalkCallback func(q Query, t Target, n string) error
// Walk calls 'f' for each suite, file, test segment, and calls f once for all

View File

@ -414,12 +414,12 @@ func (t *Tree[Data]) List() []QueryData[Data] {
// Glob returns a list of QueryData's for every node that is under the given
// query, which holds data.
// Glob handles wildcards as well as non-wildcard queries:
// * A non-wildcard query will match the node itself, along with every node
// under the query. For example: 'a:b' will match every File and Test
// node under 'a:b', including 'a:b' itself.
// * A wildcard Query will include every node under the parent node with the
// matching Query target. For example: 'a:b:*' will match every Test
// node (excluding File nodes) under 'a:b', 'a:b' will not be included.
// - A non-wildcard query will match the node itself, along with every node
// under the query. For example: 'a:b' will match every File and Test
// node under 'a:b', including 'a:b' itself.
// - A wildcard Query will include every node under the parent node with the
// matching Query target. For example: 'a:b:*' will match every Test
// node (excluding File nodes) under 'a:b', 'a:b' will not be included.
func (t *Tree[Data]) Glob(q Query) ([]QueryData[Data], error) {
out := []QueryData[Data]{}
err := t.glob(q, func(n *TreeNode[Data]) error {

View File

@ -43,7 +43,9 @@ type Result struct {
// Format writes the Result to the fmt.State
// The Result is printed as a single line, in the form:
// <query> <tags> <status>
//
// <query> <tags> <status>
//
// This matches the order in which results are sorted.
func (r Result) Format(f fmt.State, verb rune) {
if len(r.Tags) > 0 {
@ -61,9 +63,11 @@ func (r Result) String() string {
}
// Compare compares the relative order of r and o, returning:
// -1 if r should come before o
// 1 if r should come after o
// 0 if r and o are identical
//
// -1 if r should come before o
// 1 if r should come after o
// 0 if r and o are identical
//
// Note: Result.Duration is not considered in comparison.
func (r Result) Compare(o Result) int {
a, b := r, o
@ -89,7 +93,9 @@ func (r Result) Compare(o Result) int {
}
// Parse parses the result from a string of the form:
// <query> <tags> <status>
//
// <query> <tags> <status>
//
// <tags> may be omitted if there were no tags.
func Parse(in string) (Result, error) {
line := in
@ -302,7 +308,7 @@ func (l List) FilterByVariant(tags Tags) List {
})
}
/// FilterByQuery returns the results that match the given query
// / FilterByQuery returns the results that match the given query
func (l List) FilterByQuery(q query.Query) List {
return l.Filter(func(r Result) bool {
return q.Contains(r.Query)

View File

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows
// Package fileutils contains utility functions for files

View File

@ -30,9 +30,10 @@ type Test func(path string) bool
//
// pattern uses forward-slashes for directory separators '/', and may use the
// following wildcards:
// ? - matches any single non-separator character
// * - matches any sequence of non-separator characters
// ** - matches any sequence of characters including separators
//
// ? - matches any single non-separator character
// * - matches any sequence of non-separator characters
// ** - matches any sequence of characters including separators
func New(pattern string) (Test, error) {
// Transform pattern into a regex by replacing the uses of `?`, `*`, `**`
// with corresponding regex patterns.

View File

@ -219,7 +219,8 @@ func (o MatcherOptions) Format(w fmt.State, verb rune) {
// TemplatedNames is a list of TemplatedName
// Example:
// a<b>, c<d, e>
//
// a<b>, c<d, e>
type TemplatedNames []TemplatedName
// Format implements the fmt.Formatter interface
@ -234,7 +235,8 @@ func (l TemplatedNames) Format(w fmt.State, verb rune) {
// TemplatedName is an identifier with optional templated arguments
// Example:
// vec<N, T>
//
// vec<N, T>
type TemplatedName struct {
Source tok.Source
Name string
@ -253,7 +255,8 @@ func (t TemplatedName) Format(w fmt.State, verb rune) {
// MemberNames is a list of MemberName
// Example:
// a.b, c.d
//
// a.b, c.d
type MemberNames []MemberName
// Format implements the fmt.Formatter interface
@ -298,7 +301,8 @@ func (p TypeDecl) Format(w fmt.State, verb rune) {
// TemplateParams is a list of TemplateParam
// Example:
// <A, B : TyB>
//
// <A, B : TyB>
type TemplateParams []TemplateParam
// Format implements the fmt.Formatter interface
@ -317,8 +321,9 @@ func (p TemplateParams) Format(w fmt.State, verb rune) {
// TemplateParam describes a template parameter with optional type
// Example:
// <Name>
// <Name: Type>
//
// <Name>
// <Name: Type>
type TemplateParam struct {
Source tok.Source
Name string
@ -336,7 +341,8 @@ func (t TemplateParam) Format(w fmt.State, verb rune) {
// Attributes is a list of Attribute
// Example:
// [[a(x), b(y)]]
//
// [[a(x), b(y)]]
type Attributes []Attribute
// Format implements the fmt.Formatter interface
@ -363,7 +369,8 @@ func (l *Attributes) Take(name string) *Attribute {
// Attribute describes a single attribute
// Example:
// @a(x)
//
// @a(x)
type Attribute struct {
Source tok.Source
Name string

View File

@ -12,10 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
/// Package gen holds types and helpers for generating templated code from the
/// intrinsic.def file.
///
/// Used by tools/src/cmd/gen/main.go
// Package gen holds types and helpers for generating templated code from the
// intrinsic.def file.
//
// Used by tools/src/cmd/gen/main.go
package gen
import (
@ -338,9 +338,12 @@ func (b *overloadBuilder) matcherIndex(n sem.Named) (int, error) {
// The order of returned matcher indices is always the order of the fully
// qualified name as read from left to right.
// For example, calling collectMatcherIndices() for the fully qualified name:
// A<B<C, D>, E<F, G<H>, I>
//
// A<B<C, D>, E<F, G<H>, I>
//
// Would return the matcher indices:
// A, B, C, D, E, F, G, H, I
//
// A, B, C, D, E, F, G, H, I
func (b *overloadBuilder) collectMatcherIndices(fqn sem.FullyQualifiedName) ([]int, error) {
idx, err := b.matcherIndex(fqn.Target)
if err != nil {
@ -416,9 +419,12 @@ func BuildIntrinsicTable(s *sem.Sem) (*IntrinsicTable, error) {
// SplitDisplayName splits displayName into parts, where text wrapped in {}
// braces are not quoted and the rest is quoted. This is used to help process
// the string value of the [[display()]] decoration. For example:
// SplitDisplayName("vec{N}<{T}>")
//
// SplitDisplayName("vec{N}<{T}>")
//
// would return the strings:
// [`"vec"`, `N`, `"<"`, `T`, `">"`]
//
// [`"vec"`, `N`, `"<"`, `T`, `">"`]
func SplitDisplayName(displayName string) []string {
parts := []string{}
pending := strings.Builder{}

View File

@ -93,7 +93,6 @@ func (l *lexer) lex() error {
case l.match("/", tok.Divide):
case l.match(".", tok.Dot):
case l.match("->", tok.Arrow):
case l.match("-", tok.Minus):
case l.match("fn", tok.Function):
case l.match("op", tok.Operator):
case l.match("enum", tok.Enum):
@ -103,12 +102,22 @@ func (l *lexer) lex() error {
case l.match("match", tok.Match):
case unicode.IsLetter(l.peek(0)) || l.peek(0) == '_':
l.tok(l.count(alphaNumericOrUnderscore), tok.Identifier)
case unicode.IsNumber(l.peek(0)):
case unicode.IsNumber(l.peek(0)) || l.peek(0) == '-':
isFloat := false
isNegative := false
isFirst := true
pred := func(r rune) bool {
if isFirst && r == '-' {
isNegative = true
isFirst = false
return true
}
isFirst = false
if unicode.IsNumber(r) {
return true
}
if !isFloat && r == '.' {
isFloat = true
return true
@ -116,7 +125,9 @@ func (l *lexer) lex() error {
return false
}
n := l.count(pred)
if isFloat {
if isNegative && n == 1 {
l.tok(1, tok.Minus)
} else if isFloat {
l.tok(n, tok.Float)
} else {
l.tok(n, tok.Integer)

View File

@ -47,9 +47,15 @@ func TestLexTokens(t *testing.T) {
{"123456789", []tok.Token{{Kind: tok.Integer, Runes: []rune("123456789"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 10, 9),
}}}},
{"-123456789", []tok.Token{{Kind: tok.Integer, Runes: []rune("-123456789"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 11, 10),
}}}},
{"1234.56789", []tok.Token{{Kind: tok.Float, Runes: []rune("1234.56789"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 11, 10),
}}}},
{"-1234.56789", []tok.Token{{Kind: tok.Float, Runes: []rune("-1234.56789"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 12, 11),
}}}},
{"123.456.789", []tok.Token{
{Kind: tok.Float, Runes: []rune("123.456"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 8, 7),
@ -61,6 +67,14 @@ func TestLexTokens(t *testing.T) {
S: loc(1, 9, 8), E: loc(1, 12, 11),
}},
}},
{"-123.456-789", []tok.Token{
{Kind: tok.Float, Runes: []rune("-123.456"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 9, 8),
}},
{Kind: tok.Integer, Runes: []rune("-789"), Source: tok.Source{
S: loc(1, 9, 8), E: loc(1, 13, 12),
}},
}},
{"match", []tok.Token{{Kind: tok.Match, Runes: []rune("match"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 6, 5),
}}}},
@ -88,6 +102,9 @@ func TestLexTokens(t *testing.T) {
{",", []tok.Token{{Kind: tok.Comma, Runes: []rune(","), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 2, 1),
}}}},
{"-", []tok.Token{{Kind: tok.Minus, Runes: []rune("-"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 2, 1),
}}}},
{"<", []tok.Token{{Kind: tok.Lt, Runes: []rune("<"), Source: tok.Source{
S: loc(1, 1, 0), E: loc(1, 2, 1),
}}}},