\r
block_open\r
=\r
- | ( {SP}* @:"{%-" {SP}* )\r
- | @:"{%" {SP}*\r
+ | ( {SP}* block_open_symbol "-" {SP}* )\r
+ | block_open_symbol {SP}*\r
+ ;\r
+\r
+block_open_symbol\r
+ =\r
+ "{%"\r
;\r
\r
block_close\r
=\r
- | ( @:"-%}" {SP}* )\r
- | @:"%}"\r
+ | ( "-" block_close_symbol {SP}* )\r
+ | block_close_symbol\r
+ ;\r
+\r
+block_close_symbol\r
+ =\r
+ "%}"\r
;\r
\r
line_block_expression\r
\r
line_block_end\r
=\r
- line_block_open "end" name:IDENTIFIER "\n"\r
+ line_block_open "end" name:IDENTIFIER ("\n" | $)\r
;\r
\r
line_block_open\r
=\r
- "\n" {SP}* @:"# " {SP}*\r
+ "\n" {SP}* line_block_open_symbol {SP}*\r
+ ;\r
+\r
+line_block_open_symbol\r
+ =\r
+ "#"\r
;\r
\r
line_block_parameters\r
=\r
variable_open type:`variable` name:conditional_expression variable_close\r
;\r
+\r
variable_open\r
=\r
- | ( {SP}* @:"{{-" {SP}* )\r
- | ( @:"{{" {SP}* )\r
+ | ( {SP}* variable_open_symbol "-" {SP}* )\r
+ | ( variable_open_symbol {SP}* )\r
+ ;\r
+\r
+variable_open_symbol\r
+ =\r
+ "{{"\r
;\r
+\r
variable_close\r
=\r
- | ( {SP}* @:"-}}" {SP}* )\r
- | ( {SP}* @:"}}" )\r
+ | ( {SP}* "-" variable_close_symbol {SP}* )\r
+ | ( {SP}* variable_close_symbol )\r
+ ;\r
+\r
+variable_close_symbol\r
+ =\r
+ "}}"\r
;\r
\r
variable_identifier\r
comment_open comment:comment_content comment_close\r
;\r
\r
-comment_open\r
+comment_open =\r
+ comment_open_symbol\r
+ ;\r
+\r
+comment_open_symbol\r
=\r
"{#"\r
;\r
\r
comment_close\r
+ =\r
+ comment_close_symbol\r
+ ;\r
+\r
+comment_close_symbol\r
=\r
"#}"\r
;\r
\r
line_comment_open\r
=\r
- {SP}* "## "\r
+ {SP}* line_comment_open_symbol\r
+ ;\r
+\r
+line_comment_open_symbol\r
+ =\r
+ '##'\r
;\r
\r
line_comment_content\r
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
+_grammar_cache = LRUCache(10)
def get_spontaneous_environment(cls, *args):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, filename).parse()
+ def get_grammar(self):
+ import tatsu
+
+ grammar_extensions = ''
+
+ with open('grammar.ebnf', 'r') as grammar_file:
+ base_grammar = grammar_file.read()
+
+ if self.block_start_string:
+ grammar_extensions += '''
+ @override
+ block_open_symbol = %r;
+ ''' % (self.block_start_string)
+
+ if self.block_end_string:
+ grammar_extensions += '''
+ @override
+ block_close_symbol = %r;
+ ''' % (self.block_end_string)
+
+ if self.variable_start_string:
+ grammar_extensions += '''
+ @override
+ variable_open_symbol = %r;
+ ''' % (self.variable_start_string)
+
+ if self.variable_end_string:
+ grammar_extensions += '''
+ @override
+ variable_close_symbol = %r;
+ ''' % (self.variable_end_string)
+
+ if self.comment_start_string:
+ grammar_extensions += '''
+ @override
+ comment_open_symbol = %r;
+ ''' % (self.comment_start_string)
+
+ if self.comment_end_string:
+ grammar_extensions += '''
+ @override
+ comment_close_symbol = %r;
+ ''' % (self.comment_end_string)
+
+ if self.line_statement_prefix:
+ grammar_extensions += '''
+ @override
+ line_block_open_symbol = %r;
+ ''' % (self.line_statement_prefix)
+
+ if self.line_comment_prefix:
+ grammar_extensions += '''
+ @override
+ line_comment_open_symbol = %r;
+ ''' % (self.line_comment_prefix)
+
+ final_grammar = base_grammar + grammar_extensions
+
+ if final_grammar not in _grammar_cache:
+ _grammar_cache[final_grammar] = tatsu.compile(final_grammar)
+
+ return _grammar_cache[final_grammar]
+
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
from .lexer import describe_token
from .lexer import describe_token_expr
-import tatsu
-
-with open('grammar.ebnf', 'r') as grammar_file:
- grammar = tatsu.compile(grammar_file.read())
-
_statement_keywords = frozenset(
[
def __init__(self, environment, source, name=None, filename=None, state=None):
self.environment = environment
self.source = source
+ self.grammar = environment.get_grammar()
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
from .new_parser import JinjaSemantics, parse_template
result = parse_template(
- grammar.parse(
+ self.grammar.parse(
self.source.rstrip('\n'),
whitespace='',
parseinfo=True,