Add File & Range information to tint::Source

This is the first step in improving the error messages produced while parsing.

The `line` and `column` information of `Source` has been moved to `Source::Location`.

`Source::Range` has been added that contains a `Location` interval - allowing error messages to highlight the full region of the error.

The `File` information provides an optional file path, and pre-splits the content into lines. These lines can be used to print the full line containing an error.

This CL contains a few temporary changes that help split up this work, and to ease integration with Tint.

Bug: tint:282
Change-Id: I7aa501b0a9631f286e8e93fd7396bdbe38175727
Reviewed-on: https://dawn-review.googlesource.com/c/tint/+/31420
Reviewed-by: dan sinclair <dsinclair@chromium.org>
Reviewed-by: David Neto <dneto@google.com>
Commit-Queue: David Neto <dneto@google.com>
This commit is contained in:
Ben Clayton
2020-10-30 20:44:53 +00:00
committed by Commit Bot service account
parent 17e0deaeba
commit 5bee67fced
47 changed files with 372 additions and 198 deletions

View File

@@ -429,7 +429,7 @@ class StructuredTraverser {
/// @param src a source record
/// @returns true if |src| is a non-default Source
bool HasSource(const Source& src) {
return src.line != 0 || src.column != 0;
return src.range.begin.line > 0 || src.range.begin.column != 0;
}
} // namespace

View File

@@ -474,12 +474,12 @@ bool ParserImpl::ParseInternalModule() {
}
void ParserImpl::RegisterLineNumbers() {
Source instruction_number{0, 0};
Source::Location instruction_number{0, 0};
// Has there been an OpLine since the last OpNoLine or start of the module?
bool in_op_line_scope = false;
// The source location provided by the most recent OpLine instruction.
Source op_line_source{0, 0};
Source::Location op_line_source{0, 0};
const bool run_on_debug_insts = true;
module_->ForEachInst(
[this, &in_op_line_scope, &op_line_source,
@@ -515,7 +515,7 @@ Source ParserImpl::GetSourceForInst(
if (where == inst_source_.end()) {
return {};
}
return where->second;
return Source{where->second};
}
bool ParserImpl::ParseInternalModuleExceptFunctions() {

View File

@@ -454,7 +454,8 @@ class ParserImpl : Reader {
// is in effect for the instruction, map the instruction to its position
// in the SPIR-V module, counting by instructions, where the first
// instruction is line 1.
std::unordered_map<const spvtools::opt::Instruction*, Source> inst_source_;
std::unordered_map<const spvtools::opt::Instruction*, Source::Location>
inst_source_;
// The set of IDs that are imports of the GLSL.std.450 extended instruction
// sets.

View File

@@ -139,14 +139,14 @@ TEST_F(SpvParserTest, Impl_Source_NoOpLine) {
EXPECT_TRUE(p->error().empty());
// Use instruction counting.
auto s5 = p->GetSourceForResultIdForTest(5);
EXPECT_EQ(7u, s5.line);
EXPECT_EQ(0u, s5.column);
EXPECT_EQ(7u, s5.range.begin.line);
EXPECT_EQ(0u, s5.range.begin.column);
auto s60 = p->GetSourceForResultIdForTest(60);
EXPECT_EQ(8u, s60.line);
EXPECT_EQ(0u, s60.column);
EXPECT_EQ(8u, s60.range.begin.line);
EXPECT_EQ(0u, s60.range.begin.column);
auto s1 = p->GetSourceForResultIdForTest(1);
EXPECT_EQ(10u, s1.line);
EXPECT_EQ(0u, s1.column);
EXPECT_EQ(10u, s1.range.begin.line);
EXPECT_EQ(0u, s1.range.begin.column);
}
TEST_F(SpvParserTest, Impl_Source_WithOpLine_WithOpNoLine) {
@@ -172,15 +172,15 @@ TEST_F(SpvParserTest, Impl_Source_WithOpLine_WithOpNoLine) {
EXPECT_TRUE(p->error().empty());
// Use the information from the OpLine that is still in scope.
auto s5 = p->GetSourceForResultIdForTest(5);
EXPECT_EQ(42u, s5.line);
EXPECT_EQ(53u, s5.column);
EXPECT_EQ(42u, s5.range.begin.line);
EXPECT_EQ(53u, s5.range.begin.column);
auto s60 = p->GetSourceForResultIdForTest(60);
EXPECT_EQ(42u, s60.line);
EXPECT_EQ(53u, s60.column);
EXPECT_EQ(42u, s60.range.begin.line);
EXPECT_EQ(53u, s60.range.begin.column);
// After OpNoLine, revert back to instruction counting.
auto s1 = p->GetSourceForResultIdForTest(1);
EXPECT_EQ(13u, s1.line);
EXPECT_EQ(0u, s1.column);
EXPECT_EQ(13u, s1.range.begin.line);
EXPECT_EQ(0u, s1.range.begin.column);
}
TEST_F(SpvParserTest, Impl_Source_InvalidId) {
@@ -201,8 +201,8 @@ TEST_F(SpvParserTest, Impl_Source_InvalidId) {
EXPECT_TRUE(p->Parse());
EXPECT_TRUE(p->error().empty());
auto s99 = p->GetSourceForResultIdForTest(99);
EXPECT_EQ(0u, s99.line);
EXPECT_EQ(0u, s99.column);
EXPECT_EQ(0u, s99.range.begin.line);
EXPECT_EQ(0u, s99.range.begin.column);
}
TEST_F(SpvParserTest, Impl_IsValidIdentifier) {

View File

@@ -32,8 +32,10 @@ bool is_whitespace(char c) {
} // namespace
Lexer::Lexer(const std::string& input)
: input_(input), len_(static_cast<uint32_t>(input.size())) {}
Lexer::Lexer(Source::File const* file)
: file_(file),
len_(static_cast<uint32_t>(file->content.size())),
location_{1, 1} {}
Lexer::~Lexer() = default;
@@ -79,7 +81,11 @@ Token Lexer::next() {
}
Source Lexer::make_source() const {
return Source{line_, column_};
Source src{};
src.file = file_;
src.range.begin = location_;
src.range.end = location_;
return src;
}
bool Lexer::is_eof() const {
@@ -103,24 +109,24 @@ bool Lexer::is_hex(char ch) const {
}
bool Lexer::matches(size_t pos, const std::string& substr) {
if (pos >= input_.size())
if (pos >= len_)
return false;
return input_.substr(pos, substr.size()) == substr;
return file_->content.substr(pos, substr.size()) == substr;
}
void Lexer::skip_whitespace() {
for (;;) {
auto pos = pos_;
while (!is_eof() && is_whitespace(input_[pos_])) {
while (!is_eof() && is_whitespace(file_->content[pos_])) {
if (matches(pos_, "\n")) {
pos_++;
line_++;
column_ = 1;
location_.line++;
location_.column = 1;
continue;
}
pos_++;
column_++;
location_.column++;
}
skip_comments();
@@ -139,7 +145,7 @@ void Lexer::skip_comments() {
while (!is_eof() && !matches(pos_, "\n")) {
pos_++;
column_++;
location_.column++;
}
}
@@ -152,7 +158,7 @@ Token Lexer::try_float() {
if (matches(end, "-")) {
end++;
}
while (end < len_ && is_digit(input_[end])) {
while (end < len_ && is_digit(file_->content[end])) {
end++;
}
@@ -161,7 +167,7 @@ Token Lexer::try_float() {
}
end++;
while (end < len_ && is_digit(input_[end])) {
while (end < len_ && is_digit(file_->content[end])) {
end++;
}
@@ -173,7 +179,7 @@ Token Lexer::try_float() {
}
auto exp_start = end;
while (end < len_ && isdigit(input_[end])) {
while (end < len_ && isdigit(file_->content[end])) {
end++;
}
@@ -182,14 +188,14 @@ Token Lexer::try_float() {
return {};
}
auto str = input_.substr(start, end - start);
auto str = file_->content.substr(start, end - start);
if (str == "." || str == "-.")
return {};
pos_ = end;
column_ += (end - start);
location_.column += (end - start);
auto res = strtod(input_.c_str() + start, nullptr);
auto res = strtod(file_->content.c_str() + start, nullptr);
// This handles if the number is a really small in the exponent
if (res > 0 && res < static_cast<double>(std::numeric_limits<float>::min())) {
return {Token::Type::kError, source, "f32 (" + str + " too small"};
@@ -205,28 +211,31 @@ Token Lexer::try_float() {
return {source, static_cast<float>(res)};
}
Token Lexer::build_token_from_int_if_possible(const Source& source,
Token Lexer::build_token_from_int_if_possible(Source source,
size_t start,
size_t end,
int32_t base) {
auto res = strtoll(input_.c_str() + start, nullptr, base);
auto res = strtoll(file_->content.c_str() + start, nullptr, base);
if (matches(pos_, "u")) {
if (static_cast<uint64_t>(res) >
static_cast<uint64_t>(std::numeric_limits<uint32_t>::max())) {
return {Token::Type::kError, source,
"u32 (" + input_.substr(start, end - start) + ") too large"};
return {
Token::Type::kError, source,
"u32 (" + file_->content.substr(start, end - start) + ") too large"};
}
pos_ += 1;
return {source, static_cast<uint32_t>(res)};
}
if (res < static_cast<int64_t>(std::numeric_limits<int32_t>::min())) {
return {Token::Type::kError, source,
"i32 (" + input_.substr(start, end - start) + ") too small"};
return {
Token::Type::kError, source,
"i32 (" + file_->content.substr(start, end - start) + ") too small"};
}
if (res > static_cast<int64_t>(std::numeric_limits<int32_t>::max())) {
return {Token::Type::kError, source,
"i32 (" + input_.substr(start, end - start) + ") too large"};
return {
Token::Type::kError, source,
"i32 (" + file_->content.substr(start, end - start) + ") too large"};
}
return {source, static_cast<int32_t>(res)};
}
@@ -245,12 +254,12 @@ Token Lexer::try_hex_integer() {
}
end += 2;
while (!is_eof() && is_hex(input_[end])) {
while (!is_eof() && is_hex(file_->content[end])) {
end += 1;
}
pos_ = end;
column_ += (end - start);
location_.column += (end - start);
return build_token_from_int_if_possible(source, start, end, 16);
}
@@ -264,41 +273,41 @@ Token Lexer::try_integer() {
if (matches(end, "-")) {
end++;
}
if (end >= len_ || !is_digit(input_[end])) {
if (end >= len_ || !is_digit(file_->content[end])) {
return {};
}
auto first = end;
while (end < len_ && is_digit(input_[end])) {
while (end < len_ && is_digit(file_->content[end])) {
end++;
}
// If the first digit is a zero this must only be zero as leading zeros
// are not allowed.
if (input_[first] == '0' && (end - first != 1))
if (file_->content[first] == '0' && (end - first != 1))
return {};
pos_ = end;
column_ += (end - start);
location_.column += (end - start);
return build_token_from_int_if_possible(source, start, end, 10);
}
Token Lexer::try_ident() {
// Must begin with an a-zA-Z_
if (!is_alpha(input_[pos_])) {
if (!is_alpha(file_->content[pos_])) {
return {};
}
auto source = make_source();
auto s = pos_;
while (!is_eof() && is_alphanum(input_[pos_])) {
while (!is_eof() && is_alphanum(file_->content[pos_])) {
pos_++;
column_++;
location_.column++;
}
auto str = input_.substr(s, pos_ - s);
auto str = file_->content.substr(s, pos_ - s);
auto t = check_reserved(source, str);
if (!t.IsUninitialized()) {
return t;
@@ -325,10 +334,10 @@ Token Lexer::try_string() {
}
auto end = pos_;
pos_++;
column_ += (pos_ - start) + 1;
location_.column += (pos_ - start) + 1;
return {Token::Type::kStringLiteral, source,
input_.substr(start, end - start)};
file_->content.substr(start, end - start)};
}
Token Lexer::try_punctuation() {
@@ -338,131 +347,131 @@ Token Lexer::try_punctuation() {
if (matches(pos_, "[[")) {
type = Token::Type::kAttrLeft;
pos_ += 2;
column_ += 2;
location_.column += 2;
} else if (matches(pos_, "]]")) {
type = Token::Type::kAttrRight;
pos_ += 2;
column_ += 2;
location_.column += 2;
} else if (matches(pos_, "(")) {
type = Token::Type::kParenLeft;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, ")")) {
type = Token::Type::kParenRight;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "[")) {
type = Token::Type::kBracketLeft;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "]")) {
type = Token::Type::kBracketRight;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "{")) {
type = Token::Type::kBraceLeft;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "}")) {
type = Token::Type::kBraceRight;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "&&")) {
type = Token::Type::kAndAnd;
pos_ += 2;
column_ += 2;
location_.column += 2;
} else if (matches(pos_, "&")) {
type = Token::Type::kAnd;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "/")) {
type = Token::Type::kForwardSlash;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "!=")) {
type = Token::Type::kNotEqual;
pos_ += 2;
column_ += 2;
location_.column += 2;
} else if (matches(pos_, "!")) {
type = Token::Type::kBang;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "::")) {
type = Token::Type::kNamespace;
pos_ += 2;
column_ += 2;
location_.column += 2;
} else if (matches(pos_, ":")) {
type = Token::Type::kColon;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, ",")) {
type = Token::Type::kComma;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "==")) {
type = Token::Type::kEqualEqual;
pos_ += 2;
column_ += 2;
location_.column += 2;
} else if (matches(pos_, "=")) {
type = Token::Type::kEqual;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, ">=")) {
type = Token::Type::kGreaterThanEqual;
pos_ += 2;
column_ += 2;
location_.column += 2;
} else if (matches(pos_, ">")) {
type = Token::Type::kGreaterThan;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "<=")) {
type = Token::Type::kLessThanEqual;
pos_ += 2;
column_ += 2;
location_.column += 2;
} else if (matches(pos_, "<")) {
type = Token::Type::kLessThan;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "%")) {
type = Token::Type::kMod;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "->")) {
type = Token::Type::kArrow;
pos_ += 2;
column_ += 2;
location_.column += 2;
} else if (matches(pos_, "-")) {
type = Token::Type::kMinus;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, ".")) {
type = Token::Type::kPeriod;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "+")) {
type = Token::Type::kPlus;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "||")) {
type = Token::Type::kOrOr;
pos_ += 2;
column_ += 2;
location_.column += 2;
} else if (matches(pos_, "|")) {
type = Token::Type::kOr;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, ";")) {
type = Token::Type::kSemicolon;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "*")) {
type = Token::Type::kStar;
pos_ += 1;
column_ += 1;
location_.column += 1;
} else if (matches(pos_, "^")) {
type = Token::Type::kXor;
pos_ += 1;
column_ += 1;
location_.column += 1;
}
return {type, source};

View File

@@ -28,8 +28,8 @@ namespace wgsl {
class Lexer {
public:
/// Creates a new Lexer
/// @param input the input to parse
explicit Lexer(const std::string& input);
/// @param file the input file to parse
explicit Lexer(Source::File const* file);
~Lexer();
/// Returns the next token in the input stream
@@ -40,7 +40,7 @@ class Lexer {
void skip_whitespace();
void skip_comments();
Token build_token_from_int_if_possible(const Source& source,
Token build_token_from_int_if_possible(Source source,
size_t start,
size_t end,
int32_t base);
@@ -63,15 +63,13 @@ class Lexer {
bool matches(size_t pos, const std::string& substr);
/// The source to parse
std::string input_;
Source::File const* file_;
/// The length of the input
uint32_t len_ = 0;
/// The current position within the input
uint32_t pos_ = 0;
/// The current line within the input
uint32_t line_ = 1;
/// The current column within the input
uint32_t column_ = 1;
/// The current location within the input
Source::Location location_;
};
} // namespace wgsl

View File

@@ -26,13 +26,15 @@ namespace {
using LexerTest = testing::Test;
TEST_F(LexerTest, Empty) {
Lexer l("");
Source::File file("test.wgsl", "");
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.IsEof());
}
TEST_F(LexerTest, Skips_Whitespace) {
Lexer l("\t\r\n\t ident\t\n\t \r ");
Source::File file("test.wgsl", "\t\r\n\t ident\t\n\t \r ");
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.IsIdentifier());
@@ -45,10 +47,11 @@ TEST_F(LexerTest, Skips_Whitespace) {
}
TEST_F(LexerTest, Skips_Comments) {
Lexer l(R"(#starts with comment
Source::File file("test.wgsl", R"(#starts with comment
ident1 #ends with comment
# blank line
ident2)");
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.IsIdentifier());
@@ -67,7 +70,8 @@ ident1 #ends with comment
}
TEST_F(LexerTest, StringTest_Parse) {
Lexer l(R"(id "this is string content" id2)");
Source::File file("test.wgsl", R"(id "this is string content" id2)");
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.IsIdentifier());
@@ -89,7 +93,8 @@ TEST_F(LexerTest, StringTest_Parse) {
}
TEST_F(LexerTest, StringTest_Unterminated) {
Lexer l(R"(id "this is string content)");
Source::File file("test.wgsl", R"(id "this is string content)");
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.IsIdentifier());
@@ -116,7 +121,8 @@ inline std::ostream& operator<<(std::ostream& out, FloatData data) {
using FloatTest = testing::TestWithParam<FloatData>;
TEST_P(FloatTest, Parse) {
auto params = GetParam();
Lexer l(std::string(params.input));
Source::File file("test.wgsl", params.input);
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.IsFloatLiteral());
@@ -149,7 +155,8 @@ INSTANTIATE_TEST_SUITE_P(LexerTest,
using FloatTest_Invalid = testing::TestWithParam<const char*>;
TEST_P(FloatTest_Invalid, Handles) {
Lexer l(GetParam());
Source::File file("test.wgsl", GetParam());
Lexer l(&file);
auto t = l.next();
EXPECT_FALSE(t.IsFloatLiteral());
@@ -166,7 +173,8 @@ INSTANTIATE_TEST_SUITE_P(LexerTest,
using IdentifierTest = testing::TestWithParam<const char*>;
TEST_P(IdentifierTest, Parse) {
Lexer l(GetParam());
Source::File file("test.wgsl", GetParam());
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.IsIdentifier());
@@ -180,7 +188,8 @@ INSTANTIATE_TEST_SUITE_P(
testing::Values("test01", "_test_", "test_", "_test", "_01", "_test01"));
TEST_F(LexerTest, IdentifierTest_DoesNotStartWithNumber) {
Lexer l("01test");
Source::File file("test.wgsl", "01test");
Lexer l(&file);
auto t = l.next();
EXPECT_FALSE(t.IsIdentifier());
@@ -198,7 +207,8 @@ inline std::ostream& operator<<(std::ostream& out, HexSignedIntData data) {
using IntegerTest_HexSigned = testing::TestWithParam<HexSignedIntData>;
TEST_P(IntegerTest_HexSigned, Matches) {
auto params = GetParam();
Lexer l(std::string(params.input));
Source::File file("test.wgsl", params.input);
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.IsSintLiteral());
@@ -218,14 +228,16 @@ INSTANTIATE_TEST_SUITE_P(
HexSignedIntData{"0x7FFFFFFF", std::numeric_limits<int32_t>::max()}));
TEST_F(LexerTest, IntegerTest_HexSignedTooLarge) {
Lexer l("0x80000000");
Source::File file("test.wgsl", "0x80000000");
Lexer l(&file);
auto t = l.next();
ASSERT_TRUE(t.IsError());
EXPECT_EQ(t.to_str(), "i32 (0x80000000) too large");
}
TEST_F(LexerTest, IntegerTest_HexSignedTooSmall) {
Lexer l("-0x8000000F");
Source::File file("test.wgsl", "-0x8000000F");
Lexer l(&file);
auto t = l.next();
ASSERT_TRUE(t.IsError());
EXPECT_EQ(t.to_str(), "i32 (-0x8000000F) too small");
@@ -242,7 +254,8 @@ inline std::ostream& operator<<(std::ostream& out, HexUnsignedIntData data) {
using IntegerTest_HexUnsigned = testing::TestWithParam<HexUnsignedIntData>;
TEST_P(IntegerTest_HexUnsigned, Matches) {
auto params = GetParam();
Lexer l(std::string(params.input));
Source::File file("test.wgsl", params.input);
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.IsUintLiteral());
@@ -265,7 +278,8 @@ INSTANTIATE_TEST_SUITE_P(
std::numeric_limits<uint32_t>::max()}));
TEST_F(LexerTest, IntegerTest_HexUnsignedTooLarge) {
Lexer l("0xffffffffffu");
Source::File file("test.wgsl", "0xffffffffffu");
Lexer l(&file);
auto t = l.next();
ASSERT_TRUE(t.IsError());
EXPECT_EQ(t.to_str(), "u32 (0xffffffffff) too large");
@@ -282,7 +296,8 @@ inline std::ostream& operator<<(std::ostream& out, UnsignedIntData data) {
using IntegerTest_Unsigned = testing::TestWithParam<UnsignedIntData>;
TEST_P(IntegerTest_Unsigned, Matches) {
auto params = GetParam();
Lexer l(params.input);
Source::File file("test.wgsl", params.input);
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.IsUintLiteral());
@@ -308,7 +323,8 @@ inline std::ostream& operator<<(std::ostream& out, SignedIntData data) {
using IntegerTest_Signed = testing::TestWithParam<SignedIntData>;
TEST_P(IntegerTest_Signed, Matches) {
auto params = GetParam();
Lexer l(params.input);
Source::File file("test.wgsl", params.input);
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.IsSintLiteral());
@@ -328,7 +344,8 @@ INSTANTIATE_TEST_SUITE_P(
using IntegerTest_Invalid = testing::TestWithParam<const char*>;
TEST_P(IntegerTest_Invalid, Parses) {
Lexer l(GetParam());
Source::File file("test.wgsl", GetParam());
Lexer l(&file);
auto t = l.next();
EXPECT_FALSE(t.IsSintLiteral());
@@ -349,7 +366,8 @@ inline std::ostream& operator<<(std::ostream& out, TokenData data) {
using PunctuationTest = testing::TestWithParam<TokenData>;
TEST_P(PunctuationTest, Parses) {
auto params = GetParam();
Lexer l(params.input);
Source::File file("test.wgsl", params.input);
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.Is(params.type));
@@ -398,7 +416,8 @@ INSTANTIATE_TEST_SUITE_P(
using KeywordTest = testing::TestWithParam<TokenData>;
TEST_P(KeywordTest, Parses) {
auto params = GetParam();
Lexer l(params.input);
Source::File file("test.wgsl", params.input);
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.Is(params.type)) << params.input;
@@ -547,7 +566,8 @@ INSTANTIATE_TEST_SUITE_P(
using KeywordTest_Reserved = testing::TestWithParam<const char*>;
TEST_P(KeywordTest_Reserved, Parses) {
auto* keyword = GetParam();
Lexer l(keyword);
Source::File file("test.wgsl", keyword);
Lexer l(&file);
auto t = l.next();
EXPECT_TRUE(t.IsReservedKeyword());

View File

@@ -20,8 +20,14 @@ namespace tint {
namespace reader {
namespace wgsl {
Parser::Parser(Context* ctx, const std::string& input)
: Reader(ctx), impl_(std::make_unique<ParserImpl>(ctx, input)) {}
Parser::Parser(Context* ctx, Source::File const* file)
: Reader(ctx), impl_(std::make_unique<ParserImpl>(ctx, file, false)) {}
Parser::Parser(Context* ctx, const std::string& content)
: Reader(ctx),
impl_(std::make_unique<ParserImpl>(ctx,
new Source::File("", content),
true)) {}
Parser::~Parser() = default;

View File

@@ -19,6 +19,7 @@
#include <string>
#include "src/reader/reader.h"
#include "src/source.h"
namespace tint {
namespace reader {
@@ -29,10 +30,18 @@ class ParserImpl;
/// Parser for WGSL source data
class Parser : public Reader {
public:
/// Creates a new parser
/// Creates a new parser from the given file.
/// @param ctx the non-null context object
/// @param input the input string to parse
Parser(Context* ctx, const std::string& input);
/// @param file the input source file to parse
Parser(Context* ctx, Source::File const* file);
/// Creates a new parser from the given file content.
/// @param ctx the non-null context object
/// @param content the input string to parse
/// TODO(bclayton): Remove this constructor.
/// It purely exists to break up changes into bite sized pieces.
Parser(Context* ctx, const std::string& content);
~Parser() override;
/// Run the parser

View File

@@ -119,10 +119,17 @@ bool IsFunctionDecoration(Token t) {
} // namespace
ParserImpl::ParserImpl(Context* ctx, const std::string& input)
: ctx_(*ctx), lexer_(std::make_unique<Lexer>(input)) {}
ParserImpl::ParserImpl(Context* ctx, Source::File const* file, bool owns_file)
: ctx_(*ctx),
lexer_(std::make_unique<Lexer>(file)),
file_(file),
owns_file_(owns_file) {}
ParserImpl::~ParserImpl() = default;
ParserImpl::~ParserImpl() {
if (owns_file_) {
delete file_;
}
}
void ParserImpl::set_error(const Token& t, const std::string& err) {
auto prefix =

View File

@@ -76,17 +76,21 @@ struct ForHeader {
/// ParserImpl for WGSL source data
class ParserImpl {
public:
/// Creates a new parser
/// Creates a new parser using the given file
/// @param ctx the non-null context object
/// @param input the input string to parse
ParserImpl(Context* ctx, const std::string& input);
/// @param file the input source file to parse
/// @param owns_file if true, the file will be deleted on parser destruction.
/// TODO(bclayton): Remove owns_file.
/// It purely exists to break up changes into bite sized pieces.
ParserImpl(Context* ctx, Source::File const* file, bool owns_file = false);
~ParserImpl();
/// Run the parser
/// @returns true if the parse was successful, false otherwise.
bool Parse();
/// @returns true if an error was encountered
/// @returns true if an error was encountered.
bool has_error() const { return error_.size() > 0; }
/// @returns the parser error string
const std::string& error() const { return error_; }
@@ -411,6 +415,9 @@ class ParserImpl {
std::deque<Token> token_queue_;
std::unordered_map<std::string, ast::type::Type*> registered_constructs_;
ast::Module module_;
Source::File const* file_;
bool owns_file_;
};
} // namespace wgsl

View File

@@ -27,6 +27,7 @@ void ParserImplTest::SetUp() {
void ParserImplTest::TearDown() {
impl_ = nullptr;
files_.clear();
}
} // namespace wgsl

View File

@@ -43,7 +43,9 @@ class ParserImplTest : public testing::Test {
/// @param str the string to parse
/// @returns the parser implementation
ParserImpl* parser(const std::string& str) {
impl_ = std::make_unique<ParserImpl>(&ctx_, str);
auto file = std::make_unique<Source::File>("test.wgsl", str);
impl_ = std::make_unique<ParserImpl>(&ctx_, file.get());
files_.emplace_back(std::move(file));
return impl_.get();
}
@@ -51,6 +53,7 @@ class ParserImplTest : public testing::Test {
TypeManager* tm() { return &(ctx_.type_mgr()); }
private:
std::vector<std::unique_ptr<Source::File>> files_;
std::unique_ptr<ParserImpl> impl_;
Context ctx_;
};
@@ -67,13 +70,18 @@ class ParserImplTestWithParam : public testing::TestWithParam<T> {
void SetUp() override { ctx_.Reset(); }
/// Tears down the test helper
void TearDown() override { impl_ = nullptr; }
void TearDown() override {
impl_ = nullptr;
files_.clear();
}
/// Retrieves the parser from the helper
/// @param str the string to parse
/// @returns the parser implementation
ParserImpl* parser(const std::string& str) {
impl_ = std::make_unique<ParserImpl>(&ctx_, str);
auto file = std::make_unique<Source::File>("test.wgsl", str);
impl_ = std::make_unique<ParserImpl>(&ctx_, file.get());
files_.emplace_back(std::move(file));
return impl_.get();
}
@@ -81,6 +89,7 @@ class ParserImplTestWithParam : public testing::TestWithParam<T> {
TypeManager* tm() { return &(ctx_.type_mgr()); }
private:
std::vector<std::unique_ptr<Source::File>> files_;
std::unique_ptr<ParserImpl> impl_;
Context ctx_;
};

View File

@@ -29,8 +29,8 @@ TEST_F(ParserImplTest, VariableDecl_Parses) {
ASSERT_NE(var, nullptr);
ASSERT_EQ(var->name(), "my_var");
ASSERT_NE(var->type(), nullptr);
ASSERT_EQ(var->source().line, 1u);
ASSERT_EQ(var->source().column, 1u);
ASSERT_EQ(var->source().range.begin.line, 1u);
ASSERT_EQ(var->source().range.begin.column, 1u);
ASSERT_TRUE(var->type()->IsF32());
}

View File

@@ -26,14 +26,14 @@ using ParserTest = testing::Test;
TEST_F(ParserTest, Empty) {
Context ctx;
Parser p(&ctx, "");
Source::File file("test.wgsl", "");
Parser p(&ctx, &file);
ASSERT_TRUE(p.Parse()) << p.error();
}
TEST_F(ParserTest, Parses) {
Context ctx;
Parser p(&ctx, R"(
Source::File file("test.wgsl", R"(
[[location(0)]] var<out> gl_FragColor : vec4<f32>;
[[stage(vertex)]]
@@ -41,6 +41,7 @@ fn main() -> void {
gl_FragColor = vec4<f32>(.4, .2, .3, 1);
}
)");
Parser p(&ctx, &file);
ASSERT_TRUE(p.Parse()) << p.error();
auto m = p.module();
@@ -50,10 +51,11 @@ fn main() -> void {
TEST_F(ParserTest, HandlesError) {
Context ctx;
Parser p(&ctx, R"(
Source::File file("test.wgsl", R"(
fn main() -> { # missing return type
return;
})");
Parser p(&ctx, &file);
ASSERT_FALSE(p.Parse());
ASSERT_TRUE(p.has_error());

View File

@@ -778,10 +778,12 @@ class Token {
/// @returns true if token is a 'workgroup_size'
bool IsWorkgroupSize() const { return type_ == Type::kWorkgroupSize; }
// TODO(bclayton): Deprecate - use source().range.begin instead
/// @returns the source line of the token
size_t line() const { return source_.line; }
size_t line() const { return source_.range.begin.line; }
/// @returns the source column of the token
size_t column() const { return source_.column; }
size_t column() const { return source_.range.begin.column; }
/// @returns the source information for this token
Source source() const { return source_; }