'ja': 'ja_JP', 'km': 'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV',
'mk': 'mk_MK', 'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL',
'pt': 'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI',
- 'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA'
+ 'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA',
}
def parse_locale(
identifier: str,
- sep: str = '_'
+ sep: str = '_',
) -> tuple[str, str | None, str | None, str | None] | tuple[str, str | None, str | None, str | None, str | None]:
"""Parse a locale identifier into a tuple of the form ``(language,
territory, script, variant, modifier)``.
return city_name
return region_format % (fallback_format % {
'0': city_name,
- '1': territory_name
+ '1': territory_name,
})
('day', 3600 * 24),
('hour', 3600),
('minute', 60),
- ('second', 1)
+ ('second', 1),
)
self,
value: datetime.date | datetime.time,
locale: Locale | str,
- reference_date: datetime.date | None = None
+ reference_date: datetime.date | None = None,
) -> None:
assert isinstance(value, (datetime.date, datetime.datetime, datetime.time))
if isinstance(value, (datetime.datetime, datetime.time)) and value.tzinfo is None:
'm': [1, 2], # minute
's': [1, 2], 'S': None, 'A': None, # second
'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4, 5], 'O': [1, 4], 'v': [1, 4], # zone
- 'V': [1, 2, 3, 4], 'x': [1, 2, 3, 4, 5], 'X': [1, 2, 3, 4, 5] # zone
+ 'V': [1, 2, 3, 4], 'x': [1, 2, 3, 4, 5], 'X': [1, 2, 3, 4, 5], # zone
}
#: The pattern characters declared in the Date Field Symbol Table
if style not in locale.list_patterns:
raise ValueError(
f'Locale {locale} does not support list formatting style {style!r} '
- f'(supported are {sorted(locale.list_patterns)})'
+ f'(supported are {sorted(locale.list_patterns)})',
)
patterns = locale.list_patterns[style]
if tzinfo is None:
raise LookupError(
f"Can not find timezone {tzenv}. \n"
- "Timezone names are generally in the form `Continent/City`."
+ "Timezone names are generally in the form `Continent/City`.",
)
return tzinfo
if not isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = tuple(
- [message.string] + ([''] * (len(message.id) - 1))
+ [message.string] + ([''] * (len(message.id) - 1)),
)
elif len(message.string) != self.num_plurals:
fuzzy = True
_string_format_compatibilities = [
{'i', 'd', 'u'},
{'x', 'X'},
- {'f', 'F', 'g', 'G'}
+ {'f', 'F', 'g', 'G'},
]
elif not _compatible(typechar, type_map[name]):
raise TranslationError(
f'incompatible format for placeholder {name!r}: '
- f'{typechar!r} and {type_map[name]!r} are not compatible'
+ f'{typechar!r} and {type_map[name]!r} are not compatible',
)
'dngettext': (2, 3),
'N_': None,
'pgettext': ((1, 'c'), 2),
- 'npgettext': ((1, 'c'), 2, 3)
+ 'npgettext': ((1, 'c'), 2, 3),
}
DEFAULT_MAPPING: list[tuple[str, str]] = [('**.py', 'python')]
keywords=keywords,
comment_tags=comment_tags,
options=options,
- strip_comment_tags=strip_comment_tags
+ strip_comment_tags=strip_comment_tags,
):
yield (filename, *message_tuple)
filename = (getattr(fileobj, "name", None) or "(unknown)")
sys.stderr.write(
f"{filename}:{lineno}: warning: Empty msgid. It is reserved by GNU gettext: gettext(\"\") "
- f"returns the header entry with meta information, not the empty string.\n"
+ f"returns the header entry with meta information, not the empty string.\n",
)
return
builtin = {
'ignore': extract_nothing,
'python': extract_python,
- 'javascript': extract_javascript
+ 'javascript': extract_javascript,
}
func = builtin.get(method)
jsx=options.get("jsx", True),
template_string=options.get("template_string", True),
dotted=dotted,
- lineno=lineno
+ lineno=lineno,
):
if ( # Turn keyword`foo` expressions into keyword("foo") calls:
funcname and # have a keyword...
('use-fuzzy', 'f',
'also include fuzzy translations'),
('statistics', None,
- 'print statistics about translations')
+ 'print statistics about translations'),
]
boolean_options = ['use-fuzzy', 'statistics']
percentage = translated * 100 // len(catalog)
self.log.info(
'%d of %d messages (%d%%) translated in %s',
- translated, len(catalog), percentage, po_file
+ translated, len(catalog), percentage, po_file,
)
if catalog.fuzzy and not self.use_fuzzy:
for message, errors in catalog_errors:
for error in errors:
self.log.error(
- 'error: %s:%d: %s', po_file, message.lineno, error
+ 'error: %s:%d: %s', po_file, message.lineno, error,
)
self.log.info('compiling catalog %s to %s', po_file, mo_file)
]
boolean_options = [
'no-default-keywords', 'no-location', 'omit-header', 'no-wrap',
- 'sort-output', 'sort-by-file', 'strip-comments'
+ 'sort-output', 'sort-by-file', 'strip-comments',
]
as_args = 'input-paths'
multiple_value_options = (
'strip-comments': ('--strip-comment-tags',),
}
option_choices = {
- 'add-location': ('full', 'file', 'never',),
+ 'add-location': ('full', 'file', 'never'),
}
def initialize_options(self):
self.input_paths = self.input_dirs
else:
raise OptionError(
- 'input-dirs and input-paths are mutually exclusive'
+ 'input-dirs and input-paths are mutually exclusive',
)
keywords = {} if self.no_default_keywords else DEFAULT_KEYWORDS.copy()
if not self.keywords:
raise OptionError(
- 'you must specify new keywords if you disable the default ones'
+ 'you must specify new keywords if you disable the default ones',
)
if not self.output_file:
raise OptionError('no output file specified')
if self.no_wrap and self.width:
raise OptionError(
- "'--no-wrap' and '--width' are mutually exclusive"
+ "'--no-wrap' and '--width' are mutually exclusive",
)
if not self.no_wrap and not self.width:
self.width = 76
if self.sort_output and self.sort_by_file:
raise OptionError(
- "'--sort-output' and '--sort-by-file' are mutually exclusive"
+ "'--sort-output' and '--sort-by-file' are mutually exclusive",
)
if self.input_paths:
extracted = check_and_call_extract_file(
path, method_map, options_map,
callback, self.keywords, self.add_comments,
- self.strip_comments, current_dir
+ self.strip_comments, current_dir,
)
else:
extracted = extract_from_dir(
def run(self):
self.log.info(
- 'creating catalog %s based on %s', self.output_file, self.input_file
+ 'creating catalog %s based on %s', self.output_file, self.input_file,
)
with open(self.input_file, 'rb') as infile:
if not self.locale:
raise OptionError(
'you must specify the locale for '
- 'the init-missing option to work'
+ 'the init-missing option to work',
)
try:
check_status[filename] = False
continue
self.log.info(
- 'creating catalog %s based on %s', filename, self.input_file
+ 'creating catalog %s based on %s', filename, self.input_file,
)
with open(self.input_file, 'rb') as infile:
'compile': 'compile message catalogs to MO files',
'extract': 'extract messages from source files and generate a POT file',
'init': 'create new message catalogs from a POT file',
- 'update': 'update existing message catalogs from a POT file'
+ 'update': 'update existing message catalogs from a POT file',
}
command_classes = {
parser = optparse.OptionParser(
usage=self.usage % (cmdname, ''),
- description=self.commands[cmdname]
+ description=self.commands[cmdname],
)
as_args = getattr(cmdclass, "as_args", ())
for long, short, help in cmdclass.user_options:
'+', '-', '*', '%', '!=', '==', '<', '>', '<=', '>=', '=',
'+=', '-=', '*=', '%=', '<<', '>>', '>>>', '<<=', '>>=',
'>>>=', '&', '&=', '|', '|=', '&&', '||', '^', '^=', '(', ')',
- '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':'
+ '[', ']', '{', '}', '!', '--', '++', '~', ',', ';', '.', ':',
], key=len, reverse=True)
escapes: dict[str, str] = {'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t'}
('string', re.compile(r'''(
'(?:[^'\\]*(?:\\.[^'\\]*)*)' |
"(?:[^"\\]*(?:\\.[^"\\]*)*)"
- )''', re.VERBOSE | re.DOTALL))
+ )''', re.VERBOSE | re.DOTALL)),
]
len(messages), # number of entries
7 * 4, # start of key index
7 * 4 + len(messages) * 8, # start of value index
- 0, 0 # size and offset of hash table
+ 0, 0, # size and offset of hash table
) + array.array.tobytes(array.array("i", offsets)) + ids + strs)
6,
'(n==1 ? 0 : n%10==1 && n%100!=11 && n%100!=71 && n%100!=91 ? 1 : n%10==2 && n%100!=12 && n%100!=72 && '
'n%100!=92 ? 2 : (n%10==3 || n%10==4 || n%10==9) && n%100!=13 && n%100!=14 && n%100!=19 && n%100!=73 && '
- 'n%100!=74 && n%100!=79 && n%100!=93 && n%100!=94 && n%100!=99 ? 3 : n%1000000==0 ? 4 : 5)'
+ 'n%100!=74 && n%100!=79 && n%100!=93 && n%100!=94 && n%100!=99 ? 3 : n%1000000==0 ? 4 : 5)',
),
# Bosnian
'bs': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
)
if len(message.previous_id) > 1:
_write_comment('msgid_plural %s' % _normalize(
- message.previous_id[1]
+ message.previous_id[1],
), prefix='|')
_write_message(message)
if not ignore_obsolete:
for message in _sort_messages(
catalog.obsolete.values(),
- sort_by=sort_by
+ sort_by=sort_by,
):
for comment in message.user_comments:
_write_comment(comment)
assert name == "message_extractors"
if not isinstance(value, dict):
raise SetupError(
- 'the value of the "message_extractors" parameter must be a dictionary'
+ 'the value of the "message_extractors" parameter must be a dictionary',
)
*,
format_type: Literal["short"] = "short",
locale: Locale | str | None = LC_NUMERIC,
- fraction_digits: int = 0
+ fraction_digits: int = 0,
) -> str:
"""Format a number as a currency value in compact form.
self._quantize_value(value, locale, frac_prec, group_separator),
get_exponential_symbol(locale),
exp_sign, # type: ignore # exp_sign is always defined here
- self._format_int(str(exp), self.exp_prec[0], self.exp_prec[1], locale) # type: ignore # exp is always defined here
+ self._format_int(str(exp), self.exp_prec[0], self.exp_prec[1], locale), # type: ignore # exp is always defined here
])
# Is it a significant digits pattern?
('word', re.compile(fr'\b(and|or|is|(?:with)?in|not|mod|[{"".join(_VARS)}])\b')),
('value', re.compile(r'\d+')),
('symbol', re.compile(r'%|,|!=|=')),
- ('ellipsis', re.compile(r'\.{2,3}|\u2026', re.UNICODE)) # U+2026: ELLIPSIS
+ ('ellipsis', re.compile(r'\.{2,3}|\u2026', re.UNICODE)), # U+2026: ELLIPSIS
]
self._func,
enable_cache=self._is_cache_enabled,
*self._args, # noqa: B026
- **self._kwargs
+ **self._kwargs,
)
def __deepcopy__(self, memo: Any) -> LazyProxy:
deepcopy(self._func, memo),
enable_cache=deepcopy(self._is_cache_enabled, memo),
*deepcopy(self._args, memo), # noqa: B026
- **deepcopy(self._kwargs, memo)
+ **deepcopy(self._kwargs, memo),
)
formatted_numerator = numerator_value
elif numerator_unit: # Numerator has unit
formatted_numerator = format_unit(
- numerator_value, numerator_unit, length=length, format=format, locale=locale
+ numerator_value, numerator_unit, length=length, format=format, locale=locale,
)
else: # Unitless numerator
formatted_numerator = format_decimal(numerator_value, format=format, locale=locale)
class TextWrapper(textwrap.TextWrapper):
wordsep_re = re.compile(
r'(\s+|' # any whitespace
- r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))' # em-dash
+ r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))', # em-dash
)
'index': ['sidebar-about.html', 'localtoc.html', 'sidebar-links.html',
'searchbox.html'],
'**': ['sidebar-logo.html', 'localtoc.html', 'relations.html',
- 'searchbox.html']
+ 'searchbox.html'],
}
# Additional templates that should be rendered to pages, maps page names to
select = [
"B",
"C",
+ "COM",
"E",
"F",
"I",
# Make sure we're using Babel source, and not some previously installed version
CHECKOUT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
- '..'
+ '..',
))
BABEL_PACKAGE_ROOT = os.path.join(CHECKOUT_ROOT, "babel")
sys.path.insert(0, CHECKOUT_ROOT)
'eraAbbr': 'abbreviated',
'eraNames': 'wide',
'eraNarrow': 'narrow',
- 'timeFormats': 'time_formats'
+ 'timeFormats': 'time_formats',
}
log = logging.getLogger("import_cldr")
parser = OptionParser(usage='%prog path/to/cldr')
parser.add_option(
'-f', '--force', dest='force', action='store_true', default=False,
- help='force import even if destination file seems up to date'
+ help='force import even if destination file seems up to date',
)
parser.add_option(
'-j', '--json', dest='dump_json', action='store_true', default=False,
- help='also export debugging JSON dumps of locale data'
+ help='also export debugging JSON dumps of locale data',
)
parser.add_option(
'-q', '--quiet', dest='quiet', action='store_true', default=bool(os.environ.get('BABEL_CLDR_QUIET')),
srcdir=args[0],
destdir=BABEL_PACKAGE_ROOT,
force=bool(options.force),
- dump_json=bool(options.dump_json)
+ dump_json=bool(options.dump_json),
)
locale_id = '_'.join(filter(None, [
language,
- territory != '001' and territory or None
+ territory != '001' and territory or None,
]))
data['locale_id'] = locale_id
if unsupported_number_systems_string:
log.warning(
f"{locale_id}: unsupported number systems were ignored: "
- f"{unsupported_number_systems_string}"
+ f"{unsupported_number_systems_string}",
)
write_datafile(data_filename, data, dump_json=dump_json)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['months', ctxt_type, width_type],
- elem.attrib['path'])
+ elem.attrib['path']),
)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['days', ctxt_type, width_type],
- elem.attrib['path'])
+ elem.attrib['path']),
)
elif elem.tag == 'alias':
eras[width_type] = Alias(
_translate_alias(['eras', width_type],
- elem.attrib['path'])
+ elem.attrib['path']),
)
continue
try:
date_formats[type] = dates.parse_pattern(
- str(elem.findtext('dateFormat/pattern'))
+ str(elem.findtext('dateFormat/pattern')),
)
except ValueError as e:
log.error(e)
elif elem.tag == 'alias':
date_formats = Alias(_translate_alias(
- ['date_formats'], elem.attrib['path'])
+ ['date_formats'], elem.attrib['path']),
)
continue
try:
time_formats[type] = dates.parse_pattern(
- str(elem.findtext('timeFormat/pattern'))
+ str(elem.findtext('timeFormat/pattern')),
)
except ValueError as e:
log.error(e)
elif elem.tag == 'alias':
time_formats = Alias(_translate_alias(
- ['time_formats'], elem.attrib['path'])
+ ['time_formats'], elem.attrib['path']),
)
log.error(e)
elif elem.tag == 'alias':
datetime_formats = Alias(_translate_alias(
- ['datetime_formats'], elem.attrib['path'])
+ ['datetime_formats'], elem.attrib['path']),
)
elif elem.tag == 'availableFormats':
for datetime_skeleton in elem.findall('dateFormatItem'):
if child.tag == 'alias':
currency_formats[type] = Alias(
_translate_alias(['currency_formats', elem.attrib['type']],
- child.attrib['path'])
+ child.attrib['path']),
)
elif child.tag == 'pattern':
pattern_type = child.attrib.get('type')
ignore = babel.messages.extract:extract_nothing
python = babel.messages.extract:extract_python
javascript = babel.messages.extract:extract_javascript
- """
+ """,
)
assert messages == [
(3, '_', 'Page arg 1', []),
(3, '_', 'Page arg 2', []),
- (8, '_', 'log entry', [])
+ (8, '_', 'log entry', []),
]
def test_multiline(self):
'project': [
('**/ignored/**.*', 'ignore', None),
('**.py', 'python', None),
- ]
+ ],
}
self.cmd.copyright_holder = 'FooBar, Inc.'
self.cmd.msgid_bugs_address = 'bugs.address@email.tld'
'project': [
('**/ignored/**.*', 'ignore', None),
('**.py', 'python', None),
- ]
+ ],
}
self.cmd.output_file = 'project/i18n/temp.pot'
self.cmd.add_location = 'file'
self.cli.run(sys.argv + ['init',
'-l', 'fi',
'-o', po_file,
- '-i', tmpl_file
+ '-i', tmpl_file,
])
with open(po_file) as infp:
catalog = read_po(infp)
self.cli.run(sys.argv + ['init',
'-l', 'fi',
'-o', po_file,
- '-i', tmpl_file
+ '-i', tmpl_file,
])
with open(po_file) as infp:
catalog = read_po(infp)
self.cli.run(sys.argv + ['init',
'-l', 'fi_FI',
'-o', po_file,
- '-i', tmpl_file
+ '-i', tmpl_file,
])
# Update the catalog file
self.cli.run(sys.argv + ['init',
'-l', 'fi_FI',
'-o', po_file,
- '-i', tmpl_file
+ '-i', tmpl_file,
])
# Update the catalog file
None: (1,),
2: (2,),
3: ((2, 'c'), 3),
- }
+ },
}
# (Both of those invocation styles should be equivalent, so there is no parametrization from here on out)
cmdinst = configure_cli_command(
- f"extract -F babel-django.cfg --add-comments Translators: -o django232.pot {kwarg_text} ."
+ f"extract -F babel-django.cfg --add-comments Translators: -o django232.pot {kwarg_text} .",
)
assert isinstance(cmdinst, ExtractMessages)
assert set(cmdinst.keywords.keys()) == {'_', 'dgettext', 'dngettext',
assert messages == [
(5, ('bunny', 'bunnies'), [], None),
(8, 'Rabbit', [], None),
- (10, ('Page', 'Pages'), [], None)
+ (10, ('Page', 'Pages'), [], None),
]
def test_dotted_keyword_extract():
buf = BytesIO(b"msg1 = com.corporate.i18n.formatMessage('Insert coin to continue')")
messages = list(
- extract.extract('javascript', buf, {"com.corporate.i18n.formatMessage": None}, [], {})
+ extract.extract('javascript', buf, {"com.corporate.i18n.formatMessage": None}, [], {}),
)
assert messages == [(1, 'Insert coin to continue', [], None)]
def test_template_string_standard_usage():
buf = BytesIO(b"msg1 = gettext(`Very template, wow`)")
messages = list(
- extract.extract('javascript', buf, {"gettext": None}, [], {})
+ extract.extract('javascript', buf, {"gettext": None}, [], {}),
)
assert messages == [(1, 'Very template, wow', [], None)]
def test_template_string_tag_usage():
buf = BytesIO(b"function() { if(foo) msg1 = i18n`Tag template, wow`; }")
messages = list(
- extract.extract('javascript', buf, {"i18n": None}, [], {})
+ extract.extract('javascript', buf, {"i18n": None}, [], {}),
)
assert messages == [(1, 'Tag template, wow', [], None)]
def test_inside_template_string():
buf = BytesIO(b"const msg = `${gettext('Hello')} ${user.name}`")
messages = list(
- extract.extract('javascript', buf, {"gettext": None}, [], {'parse_template_string': True})
+ extract.extract('javascript', buf, {"gettext": None}, [], {'parse_template_string': True}),
)
assert messages == [(1, 'Hello', [], None)]
}`
""")
messages = list(
- extract.extract('javascript', buf, {"gettext": None}, [], {'parse_template_string': True})
+ extract.extract('javascript', buf, {"gettext": None}, [], {'parse_template_string': True}),
)
assert messages == [(1, 'Username', [], None), (3, 'Hello', [], None), (5, 'Are you having a nice day?', [], None), (8, 'Howdy', [], None), (10, 'Are you doing ok?', [], None)]
def test_inside_nested_template_string():
buf = BytesIO(b"const msg = `${gettext('Greetings!')} ${ evening ? `${user.name}: ${gettext('This is a lovely evening.')}` : `${gettext('The day is really nice!')} ${user.name}`}`")
messages = list(
- extract.extract('javascript', buf, {"gettext": None}, [], {'parse_template_string': True})
+ extract.extract('javascript', buf, {"gettext": None}, [], {'parse_template_string': True}),
)
assert messages == [(1, 'Greetings!', [], None), (1, 'This is a lovely evening.', [], None), (1, 'The day is really nice!', [], None)]
('name', 'foo.bar', 1),
('operator', '(', 1),
('name', 'quux', 1),
- ('operator', ')', 1)
+ ('operator', ')', 1),
]
def test_template_string():
assert list(jslexer.tokenize("gettext `foo\"bar\"p`", template_string=True)) == [
('name', 'gettext', 1),
- ('template_string', '`foo"bar"p`', 1)
+ ('template_string', '`foo"bar"p`', 1),
]
('operator', '}', 7),
('jsx_tag', '/>', 7),
('jsx_tag', '</comp2', 8),
- ('operator', '>', 8)
+ ('operator', '>', 8),
]
def test_wrap_locations_with_hyphens(self):
catalog = Catalog()
catalog.add('foo', locations=[
- ('doupy/templates/base/navmenu.inc.html.py', 60)
+ ('doupy/templates/base/navmenu.inc.html.py', 60),
])
catalog.add('foo', locations=[
- ('doupy/templates/job-offers/helpers.html', 22)
+ ('doupy/templates/job-offers/helpers.html', 22),
])
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=True)
full = dates.format_datetime(
dt, 'full',
tzinfo=timezone_getter('Europe/Paris'),
- locale='fr_FR'
+ locale='fr_FR',
)
assert full == (
'dimanche 1 avril 2007, 17:30:00 heure '
custom = dates.format_datetime(
dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz",
tzinfo=timezone_getter('US/Eastern'),
- locale='en'
+ locale='en',
)
assert custom == '2007.04.01 AD at 11:30:00 EDT'
alias = localedata.Alias('x')
d1 = {
'x': {'a': 1, 'b': 2, 'c': 3},
- 'y': alias
+ 'y': alias,
}
d2 = {
'x': {'a': 1, 'b': 12, 'd': 14},
- 'y': {'b': 22, 'e': 25}
+ 'y': {'b': 22, 'e': 25},
}
localedata.merge(d1, d2)
assert d1 == {'x': {'a': 1, 'b': 12, 'c': 3, 'd': 14}, 'y': (alias, {'b': 22, 'e': 25})}
'currency': 'USD',
'from': date(1792, 1, 1),
'to': None,
- 'tender': True
+ 'tender': True,
}]
assert numbers.get_territory_currencies('LS', date(2013, 1, 1)) == ['ZAR', 'LSL']
WELL_FORMED_TOKEN_TESTS = (
- ('', []),
- ('n = 1', [('value', '1'), ('symbol', '='), ('word', 'n'), ]),
- ('n = 1 @integer 1', [('value', '1'), ('symbol', '='), ('word', 'n'), ]),
- ('n is 1', [('value', '1'), ('word', 'is'), ('word', 'n'), ]),
- ('n % 100 = 3..10', [('value', '10'), ('ellipsis', '..'), ('value', '3'),
- ('symbol', '='), ('value', '100'), ('symbol', '%'),
- ('word', 'n'), ]),
+ ("", []),
+ (
+ "n = 1",
+ [
+ ("value", "1"),
+ ("symbol", "="),
+ ("word", "n"),
+ ],
+ ),
+ (
+ "n = 1 @integer 1",
+ [
+ ("value", "1"),
+ ("symbol", "="),
+ ("word", "n"),
+ ],
+ ),
+ (
+ "n is 1",
+ [
+ ("value", "1"),
+ ("word", "is"),
+ ("word", "n"),
+ ],
+ ),
+ (
+ "n % 100 = 3..10",
+ [
+ ("value", "10"),
+ ("ellipsis", ".."),
+ ("value", "3"),
+ ("symbol", "="),
+ ("value", "100"),
+ ("symbol", "%"),
+ ("word", "n"),
+ ],
+ ),
)
('relation', ('in',
('mod', (self.n,
plural.value_node(100))),
- (make_range_list((1, 19)))))))
+ (make_range_list((1, 19))))))),
))