Clean up for automatically detected issues
Mostly typos and one case of a return not being error checked. Change-Id: Id33e93402b05c165aa129bb22e4b0c7f55b0a71b Reviewed-on: https://dawn-review.googlesource.com/c/tint/+/79141 Kokoro: Ryan Harrison <rharrison@chromium.org> Reviewed-by: Antonio Maiorano <amaiorano@google.com> Commit-Queue: Ryan Harrison <rharrison@chromium.org>
This commit is contained in:
parent
148711d275
commit
43ad59d3dc
|
@ -67,7 +67,7 @@ var (
|
|||
globalRuleCounter = 0
|
||||
)
|
||||
|
||||
// Holds all the information about a wgsl rule
|
||||
// Holds all the information about a WGSL rule
|
||||
type rule struct {
|
||||
Number int // The index of this obj in an array of 'rules'
|
||||
Section int // The section this rule belongs to
|
||||
|
@ -75,7 +75,7 @@ type rule struct {
|
|||
URL string // The section's URL of this rule
|
||||
Description string // The rule's description
|
||||
TestName string // The suggested test name to use when writing CTS
|
||||
Keyword string // The keyword eg. MUST, ALGORITHM, ..., ie. Indicating why the rule is added
|
||||
Keyword string // The keyword e.g. MUST, ALGORITHM, ..., i.e. Indicating why the rule is added
|
||||
Desc []string
|
||||
Sha string
|
||||
}
|
||||
|
@ -149,7 +149,10 @@ if omitted, a human readable version of the rules is written to stdout`)
|
|||
rules := parser.rules
|
||||
|
||||
if *ctsDir != "" {
|
||||
getUnimplementedTestPlan(*parser, *ctsDir)
|
||||
err := getUnimplementedTestPlan(*parser, *ctsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
txt, tsv := concatRules(rules)
|
||||
|
@ -175,7 +178,7 @@ if omitted, a human readable version of the rules is written to stdout`)
|
|||
|
||||
// getSectionRange scans all the rules and returns the rule index interval of a given section.
|
||||
// The sections range is the interval: rules[start:end].
|
||||
// example: section = [x, y, z] ie. x.y.z(.w)* it returns (start = min(w),end = max(w))
|
||||
// example: section = [x, y, z] i.e. x.y.z(.w)* it returns (start = min(w),end = max(w))
|
||||
// if there are no rules extracted from x.y.z it returns (-1, -1)
|
||||
func getSectionRange(rules []rule, s []int) (start, end int, err error) {
|
||||
start = -1
|
||||
|
@ -222,7 +225,7 @@ func getSectionRange(rules []rule, s []int) (start, end int, err error) {
|
|||
return start, end, nil
|
||||
}
|
||||
|
||||
// parseSection return the numbers for any dot-seprated string of numbers
|
||||
// parseSection return the numbers for any dot-separated string of numbers
|
||||
// example: x.y.z.w returns [x, y, z, w]
|
||||
// returns an error if the string does not match "^\d(.\d)*$"
|
||||
func parseSection(in string) ([]int, error) {
|
||||
|
@ -365,7 +368,7 @@ func parseSpec(args []string) (*html.Node, error) {
|
|||
|
||||
// containsKeyword returns (true, 'kw'), if input string 'data' contains an
|
||||
// element of the string list, otherwise it returns (false, "")
|
||||
// search is not case sensitive
|
||||
// search is not case-sensitive
|
||||
func containsKeyword(data string, list []string) (bool, string) {
|
||||
for _, kw := range list {
|
||||
if strings.Contains(
|
||||
|
@ -378,7 +381,7 @@ func containsKeyword(data string, list []string) (bool, string) {
|
|||
return false, ""
|
||||
}
|
||||
|
||||
// parser holds the information extracted from the spec
|
||||
// Parser holds the information extracted from the spec
|
||||
// TODO(sarahM0): https://bugs.c/tint/1149/ clean up the vars holding section information
|
||||
type Parser struct {
|
||||
rules []rule // a slice to store the rules extracted from the spec
|
||||
|
@ -398,9 +401,9 @@ func (p *Parser) getRules(node *html.Node) error {
|
|||
section, subSection, err := getSectionInfo(node)
|
||||
|
||||
if err != nil {
|
||||
//skip this node and move on to its children
|
||||
// skip this node and move on to its children
|
||||
} else {
|
||||
// Do not generate rules for introdoctory sections
|
||||
// Do not generate rules for introductory sections
|
||||
if section > 2 {
|
||||
// Check if this node is visited before. This is necessary since
|
||||
// sometimes to create rule description we visit siblings or children
|
||||
|
@ -435,7 +438,7 @@ func (p *Parser) getRules(node *html.Node) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// gatherKeyworkRules scans the HTML node data, adds a new rules if it contains one
|
||||
// gatherKeywordRules scans the HTML node data, adds a new rules if it contains one
|
||||
// of the keywords
|
||||
func (p *Parser) getKeywordRule(node *html.Node, section int, subSection string) error {
|
||||
if node.Type != html.TextNode {
|
||||
|
@ -503,7 +506,7 @@ func getNodeData(node *html.Node) string {
|
|||
|
||||
// getAlgorithmRules scans the HTML node for blocks that
|
||||
// contain an 'algorithm' class, populating the rule slice.
|
||||
// ie. <tr algorithm=...> and <p algorithm=...>
|
||||
// i.e. <tr algorithm=...> and <p algorithm=...>
|
||||
func (p *Parser) getAlgorithmRule(node *html.Node, section int, subSection string) error {
|
||||
if !hasClass(node, "algorithm") {
|
||||
return nil
|
||||
|
@ -783,7 +786,7 @@ func printNodeText(node *html.Node, sb *strings.Builder) {
|
|||
}
|
||||
|
||||
// getNodeAttrValue scans attributes of 'node' and returns the value of attribute 'key'
|
||||
// or an empty string if 'node' doesn't have attribute 'key'
|
||||
// or an empty string if 'node' doesn't have an attribute 'key'
|
||||
func getNodeAttrValue(node *html.Node, key string) string {
|
||||
for _, attr := range node.Attr {
|
||||
if attr.Key == key {
|
||||
|
|
Loading…
Reference in New Issue