// line_map->stop();
}
+bool
+Lexer::input_source_is_valid_utf8 ()
+{
+ return raw_input_source->is_valid ();
+}
+
/* TODO: need to optimise somehow to avoid the virtual function call in the
* tight loop. Best idea at the moment is CRTP, but that might make lexer
* implementation annoying when storing the "base class" (i.e. would need
Lexer (Lexer &&other) = default;
Lexer &operator= (Lexer &&other) = default;
+ bool input_source_is_valid_utf8 ();
+
// Returns token n tokens ahead of current position.
const_TokenPtr peek_token (int n) { return token_queue.peek (n); }
// Peeks the current token.
Codepoint next_codepoint ()
{
- uint8_t input = next_byte ();
+ uint32_t input = next_byte ();
- if ((int8_t) input == EOF)
+ if ((int32_t) input == EOF)
return Codepoint::eof ();
else if (input < 128)
{
// 3 bytes or UTF-8 BOM
uint8_t input2 = next_byte ();
// If the second byte is equal to 0xBB then the input is no longer a
- // valid UTF-8 char.
+ // valid UTF-8 char. Then, we check if the third byte makes up a UTF
+ // BOM.
if (input == 0xEF && input2 == 0xBB)
{
uint8_t input3 = next_byte ();
if (input3 == 0xBF)
+ // found BOM
return next_codepoint ();
else
return {0xFFFE};
}
else
{
- // rust_error_at (get_current_location (),
- // "invalid UTF-8 [SECND] (too long)");
return {0xFFFE};
}
}
{
if (offs >= buffer.size ())
return EOF;
-
- return buffer.at (offs++);
+ return (uint8_t) buffer.at (offs++);
}
public:
Lexer lex (filename, std::move (file_wrap), linemap, dump_lex_opt);
+ if (!lex.input_source_is_valid_utf8 ())
+ {
+ rust_error_at (Linemap::unknown_location (),
+ "cannot read %s; stream did not contain valid UTF-8",
+ filename);
+ return;
+ }
+
Parser<Lexer> parser (lex);
// generate crate from parser