std::unique_ptr<AST::Token> (new AST::Token (l_tok)));
}
+void
+MacroInvocLexer::split_current_token (std::vector<TokenPtr> new_tokens)
+{
+ rust_assert (new_tokens.size () > 0);
+
+ auto current_pos = token_stream.begin () + offs;
+
+ token_stream.erase (current_pos);
+
+ for (size_t i = 1; i < new_tokens.size (); i++)
+ {
+ token_stream.insert (current_pos + i, std::unique_ptr<AST::Token> (
+ new AST::Token (new_tokens[i])));
+ }
+}
+
std::vector<std::unique_ptr<AST::Token>>
MacroInvocLexer::get_token_slice (size_t start_idx, size_t end_idx) const
{
// this will only work with "simple" tokens like punctuation.
void split_current_token (TokenId new_left, TokenId new_right);
+ void split_current_token (std::vector<TokenPtr> new_tokens);
+
std::vector<std::unique_ptr<AST::Token>>
get_token_slice (size_t start_idx, size_t end_idx) const;
};
token_stream.insert (current_pos, r_tok);
}
+void
+ProcMacroInvocLexer::split_current_token (std::vector<TokenPtr> new_tokens)
+{
+ rust_assert (new_tokens.size () > 0);
+
+ auto current_pos = token_stream.begin () + offs;
+
+ token_stream.erase (current_pos);
+
+ for (size_t i = 1; i < new_tokens.size (); i++)
+ {
+ token_stream.insert (current_pos + i, new_tokens[i]);
+ }
+}
+
} // namespace Rust
// closes (i.e. T<U<X>> where >> is wrongly lexed as one token). Note that
// this will only work with "simple" tokens like punctuation.
void split_current_token (TokenId new_left, TokenId new_right);
+
+ void split_current_token (std::vector<TokenPtr> new_tokens);
};
} // namespace Rust
token_queue.insert (1, std::move (new_right_tok));
}
+void
+Lexer::split_current_token (std::vector<TokenPtr> new_tokens)
+{
+ rust_assert (new_tokens.size () > 0);
+ token_queue.replace_current_value (new_tokens[0]);
+
+ for (size_t i = 1; i < new_tokens.size (); i++)
+ {
+ token_queue.insert (i, new_tokens[i]);
+ }
+}
+
void
Lexer::start_line (int current_line, int current_column)
{
* this will only work with "simple" tokens like punctuation. */
void split_current_token (TokenId new_left, TokenId new_right);
+ void split_current_token (std::vector<TokenPtr> new_tokens);
+
Linemap *get_line_map () { return line_map; }
std::string get_filename () { return std::string (input.get_filename ()); }
std::move (outer_attrs),
restrictions);
}
+ else if (next_tok->get_id () == FLOAT_LITERAL)
+ {
+ // Lexer has misidentified a tuple index as a float literal
+ // eg: `(x, (y, z)).1.0` -> 1.0 has been identified as a float
+ // literal. This means we should split it into three new separate
+ // tokens, the first tuple index, the dot and the second tuple
+ // index.
+ auto current_loc = next_tok->get_locus ();
+ auto str = next_tok->get_str ();
+ auto dot_pos = str.find (".");
+ auto prefix = str.substr (0, dot_pos);
+ auto suffix = str.substr (dot_pos + 1);
+ lexer.split_current_token (
+ {Token::make_int (current_loc, std::move (prefix),
+ CORETYPE_PURE_DECIMAL),
+ Token::make (DOT, current_loc + 1),
+ Token::make_int (current_loc + 2, std::move (suffix),
+ CORETYPE_PURE_DECIMAL)});
+ return parse_tuple_index_expr (tok, std::move (left),
+ std::move (outer_attrs),
+ restrictions);
+ }
else if (next_tok->get_id () == IDENTIFIER
&& lexer.peek_token (1)->get_id () != LEFT_PAREN
&& lexer.peek_token (1)->get_id () != SCOPE_RESOLUTION)