diff --git a/src/tint/reader/wgsl/lexer.cc b/src/tint/reader/wgsl/lexer.cc index 89804f2fd0..6371db7408 100644 --- a/src/tint/reader/wgsl/lexer.cc +++ b/src/tint/reader/wgsl/lexer.cc @@ -96,9 +96,10 @@ std::vector Lexer::Lex() { while (true) { tokens.emplace_back(next()); - // If the token can be split, we insert a placeholder element into - // the stream to hold the split character. - if (tokens.back().IsSplittable()) { + // If the token can be split, we insert a placeholder element(s) into the stream to hold the + // split character. + size_t num_placeholders = tokens.back().NumPlaceholders(); + for (size_t i = 0; i < num_placeholders; i++) { auto src = tokens.back().source(); src.range.begin.column++; tokens.emplace_back(Token(Token::Type::kPlaceholder, src)); diff --git a/src/tint/reader/wgsl/lexer_test.cc b/src/tint/reader/wgsl/lexer_test.cc index ffa85bd8fc..a1a476401b 100644 --- a/src/tint/reader/wgsl/lexer_test.cc +++ b/src/tint/reader/wgsl/lexer_test.cc @@ -989,7 +989,6 @@ INSTANTIATE_TEST_SUITE_P(LexerTest, TokenData{"&=", Token::Type::kAndEqual}, TokenData{"|=", Token::Type::kOrEqual}, TokenData{"^=", Token::Type::kXorEqual}, - TokenData{">>=", Token::Type::kShiftRightEqual}, TokenData{"<<=", Token::Type::kShiftLeftEqual})); using SplittablePunctuationTest = testing::TestWithParam; @@ -1010,18 +1009,23 @@ TEST_P(SplittablePunctuationTest, Parses) { EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input)); } - { - auto& t = list[1]; + const size_t num_placeholders = list[0].NumPlaceholders(); + EXPECT_GT(num_placeholders, 0u); + ASSERT_EQ(list.size(), 2u + num_placeholders); + + for (size_t i = 0; i < num_placeholders; i++) { + auto& t = list[1 + i]; EXPECT_TRUE(t.Is(Token::Type::kPlaceholder)); EXPECT_EQ(t.source().range.begin.line, 1u); - EXPECT_EQ(t.source().range.begin.column, 2u); + EXPECT_EQ(t.source().range.begin.column, 2u + i); EXPECT_EQ(t.source().range.end.line, 1u); EXPECT_EQ(t.source().range.end.column, 1u + strlen(params.input)); } { - auto& t = list[2]; - EXPECT_EQ(t.source().range.begin.column, 1 + std::string(params.input).size()); + auto& t = list.back(); + EXPECT_TRUE(t.Is(Token::Type::kEOF)); + EXPECT_EQ(t.source().range.begin.column, 2u + num_placeholders); } } INSTANTIATE_TEST_SUITE_P(LexerTest, @@ -1029,7 +1033,8 @@ INSTANTIATE_TEST_SUITE_P(LexerTest, testing::Values(TokenData{"&&", Token::Type::kAndAnd}, TokenData{">=", Token::Type::kGreaterThanEqual}, TokenData{"--", Token::Type::kMinusMinus}, - TokenData{">>", Token::Type::kShiftRight})); + TokenData{">>", Token::Type::kShiftRight}, + TokenData{">>=", Token::Type::kShiftRightEqual})); using KeywordTest = testing::TestWithParam; TEST_P(KeywordTest, Parses) { diff --git a/src/tint/reader/wgsl/token.h b/src/tint/reader/wgsl/token.h index 2aeffa9267..1c8531b13e 100644 --- a/src/tint/reader/wgsl/token.h +++ b/src/tint/reader/wgsl/token.h @@ -379,10 +379,20 @@ class Token { return type_ == Type::kVec2 || type_ == Type::kVec3 || type_ == Type::kVec4; } - /// @returns true if the token can be split during parse into component tokens - bool IsSplittable() const { - return Is(Type::kShiftRight) || Is(Type::kGreaterThanEqual) || Is(Type::kAndAnd) || - Is(Type::kMinusMinus); + /// @returns the number of placeholder tokens required to follow the token, in order to provide + /// space for token splitting. + size_t NumPlaceholders() const { + switch (type_) { + case Type::kShiftRightEqual: + return 2; + case Type::kShiftRight: + case Type::kGreaterThanEqual: + case Type::kAndAnd: + case Type::kMinusMinus: + return 1; + default: + return 0; + } } /// @returns true if the token is a binary operator