from tornado.escape import to_unicode as u
LOCALE_NAMES = {
- "af_ZA": {"name_en": u("Afrikaans"), "name": u("Afrikaans")},
- "am_ET": {"name_en": u("Amharic"), "name": u("አማርኛ")},
- "ar_AR": {"name_en": u("Arabic"), "name": u("العربية")},
- "bg_BG": {"name_en": u("Bulgarian"), "name": u("Български")},
- "bn_IN": {"name_en": u("Bengali"), "name": u("বাংলা")},
- "bs_BA": {"name_en": u("Bosnian"), "name": u("Bosanski")},
- "ca_ES": {"name_en": u("Catalan"), "name": u("Català")},
- "cs_CZ": {"name_en": u("Czech"), "name": u("Čeština")},
- "cy_GB": {"name_en": u("Welsh"), "name": u("Cymraeg")},
- "da_DK": {"name_en": u("Danish"), "name": u("Dansk")},
- "de_DE": {"name_en": u("German"), "name": u("Deutsch")},
- "el_GR": {"name_en": u("Greek"), "name": u("Ελληνικά")},
- "en_GB": {"name_en": u("English (UK)"), "name": u("English (UK)")},
- "en_US": {"name_en": u("English (US)"), "name": u("English (US)")},
- "es_ES": {"name_en": u("Spanish (Spain)"), "name": u("Español (España)")},
- "es_LA": {"name_en": u("Spanish"), "name": u("Español")},
- "et_EE": {"name_en": u("Estonian"), "name": u("Eesti")},
- "eu_ES": {"name_en": u("Basque"), "name": u("Euskara")},
- "fa_IR": {"name_en": u("Persian"), "name": u("فارسی")},
- "fi_FI": {"name_en": u("Finnish"), "name": u("Suomi")},
- "fr_CA": {"name_en": u("French (Canada)"), "name": u("Français (Canada)")},
- "fr_FR": {"name_en": u("French"), "name": u("Français")},
- "ga_IE": {"name_en": u("Irish"), "name": u("Gaeilge")},
- "gl_ES": {"name_en": u("Galician"), "name": u("Galego")},
- "he_IL": {"name_en": u("Hebrew"), "name": u("עברית")},
- "hi_IN": {"name_en": u("Hindi"), "name": u("हिन्दी")},
- "hr_HR": {"name_en": u("Croatian"), "name": u("Hrvatski")},
- "hu_HU": {"name_en": u("Hungarian"), "name": u("Magyar")},
- "id_ID": {"name_en": u("Indonesian"), "name": u("Bahasa Indonesia")},
- "is_IS": {"name_en": u("Icelandic"), "name": u("Íslenska")},
- "it_IT": {"name_en": u("Italian"), "name": u("Italiano")},
- "ja_JP": {"name_en": u("Japanese"), "name": u("日本語")},
- "ko_KR": {"name_en": u("Korean"), "name": u("한국어")},
- "lt_LT": {"name_en": u("Lithuanian"), "name": u("Lietuvių")},
- "lv_LV": {"name_en": u("Latvian"), "name": u("Latviešu")},
- "mk_MK": {"name_en": u("Macedonian"), "name": u("Македонски")},
- "ml_IN": {"name_en": u("Malayalam"), "name": u("മലയാളം")},
- "ms_MY": {"name_en": u("Malay"), "name": u("Bahasa Melayu")},
- "nb_NO": {"name_en": u("Norwegian (bokmal)"), "name": u("Norsk (bokmål)")},
- "nl_NL": {"name_en": u("Dutch"), "name": u("Nederlands")},
- "nn_NO": {"name_en": u("Norwegian (nynorsk)"), "name": u("Norsk (nynorsk)")},
- "pa_IN": {"name_en": u("Punjabi"), "name": u("ਪੰਜਾਬੀ")},
- "pl_PL": {"name_en": u("Polish"), "name": u("Polski")},
- "pt_BR": {"name_en": u("Portuguese (Brazil)"), "name": u("Português (Brasil)")},
- "pt_PT": {"name_en": u("Portuguese (Portugal)"), "name": u("Português (Portugal)")},
- "ro_RO": {"name_en": u("Romanian"), "name": u("Română")},
- "ru_RU": {"name_en": u("Russian"), "name": u("Русский")},
- "sk_SK": {"name_en": u("Slovak"), "name": u("Slovenčina")},
- "sl_SI": {"name_en": u("Slovenian"), "name": u("Slovenščina")},
- "sq_AL": {"name_en": u("Albanian"), "name": u("Shqip")},
- "sr_RS": {"name_en": u("Serbian"), "name": u("Српски")},
- "sv_SE": {"name_en": u("Swedish"), "name": u("Svenska")},
- "sw_KE": {"name_en": u("Swahili"), "name": u("Kiswahili")},
- "ta_IN": {"name_en": u("Tamil"), "name": u("தமிழ்")},
- "te_IN": {"name_en": u("Telugu"), "name": u("తెలుగు")},
- "th_TH": {"name_en": u("Thai"), "name": u("ภาษาไทย")},
- "tl_PH": {"name_en": u("Filipino"), "name": u("Filipino")},
- "tr_TR": {"name_en": u("Turkish"), "name": u("Türkçe")},
- "uk_UA": {"name_en": u("Ukraini "), "name": u("Українська")},
- "vi_VN": {"name_en": u("Vietnamese"), "name": u("Tiếng Việt")},
- "zh_CN": {"name_en": u("Chinese (Simplified)"), "name": u("中文(简体)")},
- "zh_TW": {"name_en": u("Chinese (Traditional)"), "name": u("中文(繁體)")},
+ "af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"},
+ "am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"},
+ "ar_AR": {"name_en": u"Arabic", "name": u"العربية"},
+ "bg_BG": {"name_en": u"Bulgarian", "name": u"Български"},
+ "bn_IN": {"name_en": u"Bengali", "name": u"বাংলা"},
+ "bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"},
+ "ca_ES": {"name_en": u"Catalan", "name": u"Català"},
+ "cs_CZ": {"name_en": u"Czech", "name": u"Čeština"},
+ "cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"},
+ "da_DK": {"name_en": u"Danish", "name": u"Dansk"},
+ "de_DE": {"name_en": u"German", "name": u"Deutsch"},
+ "el_GR": {"name_en": u"Greek", "name": u"Ελληνικά"},
+ "en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"},
+ "en_US": {"name_en": u"English (US)", "name": u"English (US)"},
+ "es_ES": {"name_en": u"Spanish (Spain)", "name": u"Español (España)"},
+ "es_LA": {"name_en": u"Spanish", "name": u"Español"},
+ "et_EE": {"name_en": u"Estonian", "name": u"Eesti"},
+ "eu_ES": {"name_en": u"Basque", "name": u"Euskara"},
+ "fa_IR": {"name_en": u"Persian", "name": u"فارسی"},
+ "fi_FI": {"name_en": u"Finnish", "name": u"Suomi"},
+ "fr_CA": {"name_en": u"French (Canada)", "name": u"Français (Canada)"},
+ "fr_FR": {"name_en": u"French", "name": u"Français"},
+ "ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"},
+ "gl_ES": {"name_en": u"Galician", "name": u"Galego"},
+ "he_IL": {"name_en": u"Hebrew", "name": u"עברית"},
+ "hi_IN": {"name_en": u"Hindi", "name": u"हिन्दी"},
+ "hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"},
+ "hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"},
+ "id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"},
+ "is_IS": {"name_en": u"Icelandic", "name": u"Íslenska"},
+ "it_IT": {"name_en": u"Italian", "name": u"Italiano"},
+ "ja_JP": {"name_en": u"Japanese", "name": u"日本語"},
+ "ko_KR": {"name_en": u"Korean", "name": u"한국어"},
+ "lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvių"},
+ "lv_LV": {"name_en": u"Latvian", "name": u"Latviešu"},
+ "mk_MK": {"name_en": u"Macedonian", "name": u"Македонски"},
+ "ml_IN": {"name_en": u"Malayalam", "name": u"മലയാളം"},
+ "ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"},
+ "nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokmål)"},
+ "nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"},
+ "nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"},
+ "pa_IN": {"name_en": u"Punjabi", "name": u"ਪੰਜਾਬੀ"},
+ "pl_PL": {"name_en": u"Polish", "name": u"Polski"},
+ "pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Português (Brasil)"},
+ "pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Português (Portugal)"},
+ "ro_RO": {"name_en": u"Romanian", "name": u"Română"},
+ "ru_RU": {"name_en": u"Russian", "name": u"Русский"},
+ "sk_SK": {"name_en": u"Slovak", "name": u"Slovenčina"},
+ "sl_SI": {"name_en": u"Slovenian", "name": u"Slovenščina"},
+ "sq_AL": {"name_en": u"Albanian", "name": u"Shqip"},
+ "sr_RS": {"name_en": u"Serbian", "name": u"Српски"},
+ "sv_SE": {"name_en": u"Swedish", "name": u"Svenska"},
+ "sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"},
+ "ta_IN": {"name_en": u"Tamil", "name": u"தமிழ்"},
+ "te_IN": {"name_en": u"Telugu", "name": u"తెలుగు"},
+ "th_TH": {"name_en": u"Thai", "name": u"ภาษาไทย"},
+ "tl_PH": {"name_en": u"Filipino", "name": u"Filipino"},
+ "tr_TR": {"name_en": u"Turkish", "name": u"Türkçe"},
+ "uk_UA": {"name_en": u"Ukraini ", "name": u"Українська"},
+ "vi_VN": {"name_en": u"Vietnamese", "name": u"Tiếng Việt"},
+ "zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"中文(简体)"},
+ "zh_TW": {"name_en": u"Chinese (Traditional)", "name": u"中文(繁體)"},
}
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.items())
- args["openid.mode"] = u("check_authentication")
+ args["openid.mode"] = u"check_authentication"
url = self._OPENID_ENDPOINT
if http_client is None:
http_client = self.get_auth_http_client()
ax_ns = None
for name in self.request.arguments:
if name.startswith("openid.ns.") and \
- self.get_argument(name) == u("http://openid.net/srv/ax/1.0"):
+ self.get_argument(name) == u"http://openid.net/srv/ax/1.0":
ax_ns = name[10:]
break
def get_ax_arg(uri):
if not ax_ns:
- return u("")
+ return u""
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name in self.request.arguments.keys():
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name:
- return u("")
- return self.get_argument(ax_name, u(""))
+ return u""
+ return self.get_argument(ax_name, u"")
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
# have a status bar, such as Safari by default)
params += ' title="%s"' % href
- return u('<a href="%s"%s>%s</a>') % (href, params, url)
+ return u'<a href="%s"%s>%s</a>' % (href, params, url)
# First HTML-escape so that our strings are all safe.
# The regex is modified to avoid character entites other than & so
def __init__(self, code, translations):
self.code = code
- self.name = LOCALE_NAMES.get(code, {}).get("name", u("Unknown"))
+ self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown")
self.rtl = False
for prefix in ["fa", "ar", "he"]:
if self.code.startswith(prefix):
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
elif self.code == "zh_CN":
str_time = "%s%d:%02d" % (
- (u('\u4e0a\u5348'), u('\u4e0b\u5348'))[local_date.hour >= 12],
+ (u'\u4e0a\u5348', u'\u4e0b\u5348')[local_date.hour >= 12],
local_date.hour % 12 or 12, local_date.minute)
else:
str_time = "%d:%02d %s" % (
return ""
if len(parts) == 1:
return parts[0]
- comma = u(' \u0648 ') if self.code.startswith("fa") else u(", ")
+ comma = u' \u0648 ' if self.code.startswith("fa") else u", "
return _("%(commas)s and %(last)s") % {
"commas": comma.join(parts[:-1]),
"last": parts[len(parts) - 1],
response.rethrow()
parsed = json_decode(response.body)
self.assertEqual(parsed,
- {u('access_token'): {u('key'): u('hjkl'),
- u('screen_name'): u('foo'),
- u('secret'): u('vbnm')},
- u('name'): u('Foo'),
- u('screen_name'): u('foo'),
- u('username'): u('foo')})
+ {u'access_token': {u'key': u'hjkl',
+ u'screen_name': u'foo',
+ u'secret': u'vbnm'},
+ u'name': u'Foo',
+ u'screen_name': u'foo',
+ u'username': u'foo'})
def test_twitter_show_user(self):
response = self.fetch('/twitter/client/show_user?name=somebody')
def test_google_login(self):
response = self.fetch('/client/login')
self.assertDictEqual({
- u('name'): u('Foo'),
- u('email'): u('foo@example.com'),
- u('access_token'): u('fake-access-token'),
+ u'name': u'Foo',
+ u'email': u'foo@example.com',
+ u'access_token': u'fake-access-token',
}, json_decode(response.body))
# (input, linkify_kwargs, expected_output)
("hello http://world.com/!", {},
- u('hello <a href="http://world.com/">http://world.com/</a>!')),
+ u'hello <a href="http://world.com/">http://world.com/</a>!'),
("hello http://world.com/with?param=true&stuff=yes", {},
- u('hello <a href="http://world.com/with?param=true&stuff=yes">http://world.com/with?param=true&stuff=yes</a>')),
+ u'hello <a href="http://world.com/with?param=true&stuff=yes">http://world.com/with?param=true&stuff=yes</a>'),
# an opened paren followed by many chars killed Gruber's regex
("http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", {},
- u('<a href="http://url.com/w">http://url.com/w</a>(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')),
+ u'<a href="http://url.com/w">http://url.com/w</a>(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),
# as did too many dots at the end
("http://url.com/withmany.......................................", {},
- u('<a href="http://url.com/withmany">http://url.com/withmany</a>.......................................')),
+ u'<a href="http://url.com/withmany">http://url.com/withmany</a>.......................................'),
("http://url.com/withmany((((((((((((((((((((((((((((((((((a)", {},
- u('<a href="http://url.com/withmany">http://url.com/withmany</a>((((((((((((((((((((((((((((((((((a)')),
+ u'<a href="http://url.com/withmany">http://url.com/withmany</a>((((((((((((((((((((((((((((((((((a)'),
# some examples from http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
# plus a fex extras (such as multiple parentheses).
("http://foo.com/blah_blah", {},
- u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>')),
+ u'<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>'),
("http://foo.com/blah_blah/", {},
- u('<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>')),
+ u'<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>'),
("(Something like http://foo.com/blah_blah)", {},
- u('(Something like <a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>)')),
+ u'(Something like <a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>)'),
("http://foo.com/blah_blah_(wikipedia)", {},
- u('<a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>')),
+ u'<a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>'),
("http://foo.com/blah_(blah)_(wikipedia)_blah", {},
- u('<a href="http://foo.com/blah_(blah)_(wikipedia)_blah">http://foo.com/blah_(blah)_(wikipedia)_blah</a>')),
+ u'<a href="http://foo.com/blah_(blah)_(wikipedia)_blah">http://foo.com/blah_(blah)_(wikipedia)_blah</a>'),
("(Something like http://foo.com/blah_blah_(wikipedia))", {},
- u('(Something like <a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>)')),
+ u'(Something like <a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>)'),
("http://foo.com/blah_blah.", {},
- u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>.')),
+ u'<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>.'),
("http://foo.com/blah_blah/.", {},
- u('<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>.')),
+ u'<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>.'),
("<http://foo.com/blah_blah>", {},
- u('<<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>>')),
+ u'<<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>>'),
("<http://foo.com/blah_blah/>", {},
- u('<<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>>')),
+ u'<<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>>'),
("http://foo.com/blah_blah,", {},
- u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>,')),
+ u'<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>,'),
("http://www.example.com/wpstyle/?p=364.", {},
- u('<a href="http://www.example.com/wpstyle/?p=364">http://www.example.com/wpstyle/?p=364</a>.')),
+ u'<a href="http://www.example.com/wpstyle/?p=364">http://www.example.com/wpstyle/?p=364</a>.'),
("rdar://1234",
{"permitted_protocols": ["http", "rdar"]},
- u('<a href="rdar://1234">rdar://1234</a>')),
+ u'<a href="rdar://1234">rdar://1234</a>'),
("rdar:/1234",
{"permitted_protocols": ["rdar"]},
- u('<a href="rdar:/1234">rdar:/1234</a>')),
+ u'<a href="rdar:/1234">rdar:/1234</a>'),
("http://userid:password@example.com:8080", {},
- u('<a href="http://userid:password@example.com:8080">http://userid:password@example.com:8080</a>')),
+ u'<a href="http://userid:password@example.com:8080">http://userid:password@example.com:8080</a>'),
("http://userid@example.com", {},
- u('<a href="http://userid@example.com">http://userid@example.com</a>')),
+ u'<a href="http://userid@example.com">http://userid@example.com</a>'),
("http://userid@example.com:8080", {},
- u('<a href="http://userid@example.com:8080">http://userid@example.com:8080</a>')),
+ u'<a href="http://userid@example.com:8080">http://userid@example.com:8080</a>'),
("http://userid:password@example.com", {},
- u('<a href="http://userid:password@example.com">http://userid:password@example.com</a>')),
+ u'<a href="http://userid:password@example.com">http://userid:password@example.com</a>'),
("message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
{"permitted_protocols": ["http", "message"]},
- u('<a href="message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e">message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e</a>')),
+ u'<a href="message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e">message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e</a>'),
- (u("http://\u27a1.ws/\u4a39"), {},
- u('<a href="http://\u27a1.ws/\u4a39">http://\u27a1.ws/\u4a39</a>')),
+ (u"http://\u27a1.ws/\u4a39", {},
+ u'<a href="http://\u27a1.ws/\u4a39">http://\u27a1.ws/\u4a39</a>'),
("<tag>http://example.com</tag>", {},
- u('<tag><a href="http://example.com">http://example.com</a></tag>')),
+ u'<tag><a href="http://example.com">http://example.com</a></tag>'),
("Just a www.example.com link.", {},
- u('Just a <a href="http://www.example.com">www.example.com</a> link.')),
+ u'Just a <a href="http://www.example.com">www.example.com</a> link.'),
("Just a www.example.com link.",
{"require_protocol": True},
- u('Just a www.example.com link.')),
+ u'Just a www.example.com link.'),
("A http://reallylong.com/link/that/exceedsthelenglimit.html",
{"require_protocol": True, "shorten": True},
- u('A <a href="http://reallylong.com/link/that/exceedsthelenglimit.html" title="http://reallylong.com/link/that/exceedsthelenglimit.html">http://reallylong.com/link...</a>')),
+ u'A <a href="http://reallylong.com/link/that/exceedsthelenglimit.html" title="http://reallylong.com/link/that/exceedsthelenglimit.html">http://reallylong.com/link...</a>'),
("A http://reallylongdomainnamethatwillbetoolong.com/hi!",
{"shorten": True},
- u('A <a href="http://reallylongdomainnamethatwillbetoolong.com/hi" title="http://reallylongdomainnamethatwillbetoolong.com/hi">http://reallylongdomainnametha...</a>!')),
+ u'A <a href="http://reallylongdomainnamethatwillbetoolong.com/hi" title="http://reallylongdomainnamethatwillbetoolong.com/hi">http://reallylongdomainnametha...</a>!'),
("A file:///passwords.txt and http://web.com link", {},
- u('A file:///passwords.txt and <a href="http://web.com">http://web.com</a> link')),
+ u'A file:///passwords.txt and <a href="http://web.com">http://web.com</a> link'),
("A file:///passwords.txt and http://web.com link",
{"permitted_protocols": ["file"]},
- u('A <a href="file:///passwords.txt">file:///passwords.txt</a> and http://web.com link')),
+ u'A <a href="file:///passwords.txt">file:///passwords.txt</a> and http://web.com link'),
("www.external-link.com",
{"extra_params": 'rel="nofollow" class="external"'},
- u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>')),
+ u'<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>'),
("www.external-link.com and www.internal-link.com/blogs extra",
{"extra_params": lambda href: 'class="internal"' if href.startswith("http://www.internal-link.com") else 'rel="nofollow" class="external"'},
- u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a> and <a href="http://www.internal-link.com/blogs" class="internal">www.internal-link.com/blogs</a> extra')),
+ u'<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a> and <a href="http://www.internal-link.com/blogs" class="internal">www.internal-link.com/blogs</a> extra'),
("www.external-link.com",
{"extra_params": lambda href: ' rel="nofollow" class="external" '},
- u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>')),
+ u'<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>'),
]
def test_xhtml_escape(self):
tests = [
("<foo>", "<foo>"),
- (u("<foo>"), u("<foo>")),
+ (u"<foo>", u"<foo>"),
(b"<foo>", b"<foo>"),
("<>&\"'", "<>&"'"),
("&", "&amp;"),
- (u("<\u00e9>"), u("<\u00e9>")),
+ (u"<\u00e9>", u"<\u00e9>"),
(b"<\xc3\xa9>", b"<\xc3\xa9>"),
]
for unescaped, escaped in tests:
('foo bar', 'foo bar'),
('foo bar', 'foo bar'),
('foo bar', 'foo bar'),
- ('foo઼bar', u('foo\u0abcbar')),
+ ('foo઼bar', u'foo\u0abcbar'),
('foo&#xyz;bar', 'foo&#xyz;bar'), # invalid encoding
('foo&#;bar', 'foo&#;bar'), # invalid encoding
('foo&#x;bar', 'foo&#x;bar'), # invalid encoding
(u('\u00e9').encode('latin1'), '%E9'),
# unicode strings become utf8
- (u('\u00e9'), '%C3%A9'),
+ (u'\u00e9', '%C3%A9'),
]
for unescaped, escaped in tests:
self.assertEqual(url_escape(unescaped), escaped)
def test_url_unescape_unicode(self):
tests = [
- ('%C3%A9', u('\u00e9'), 'utf8'),
- ('%C3%A9', u('\u00c3\u00a9'), 'latin1'),
- ('%C3%A9', utf8(u('\u00e9')), None),
+ ('%C3%A9', u'\u00e9', 'utf8'),
+ ('%C3%A9', u'\u00c3\u00a9', 'latin1'),
+ ('%C3%A9', utf8(u'\u00e9'), None),
]
for escaped, unescaped, encoding in tests:
# input strings to url_unescape should only contain ascii
# On python2 the escape methods should generally return the same
# type as their argument
self.assertEqual(type(xhtml_escape("foo")), str)
- self.assertEqual(type(xhtml_escape(u("foo"))), unicode_type)
+ self.assertEqual(type(xhtml_escape(u"foo")), unicode_type)
def test_json_decode(self):
# json_decode accepts both bytes and unicode, but strings it returns
# are always unicode.
- self.assertEqual(json_decode(b'"foo"'), u("foo"))
- self.assertEqual(json_decode(u('"foo"')), u("foo"))
+ self.assertEqual(json_decode(b'"foo"'), u"foo")
+ self.assertEqual(json_decode(u'"foo"'), u"foo")
# Non-ascii bytes are interpreted as utf8
- self.assertEqual(json_decode(utf8(u('"\u00e9"'))), u("\u00e9"))
+ self.assertEqual(json_decode(utf8(u'"\u00e9"')), u"\u00e9")
def test_json_encode(self):
# json deals with strings, not bytes. On python 2 byte strings will
# convert automatically if they are utf8; on python 3 byte strings
# are not allowed.
- self.assertEqual(json_decode(json_encode(u("\u00e9"))), u("\u00e9"))
+ self.assertEqual(json_decode(json_encode(u"\u00e9")), u"\u00e9")
if bytes is str:
- self.assertEqual(json_decode(json_encode(utf8(u("\u00e9")))), u("\u00e9"))
+ self.assertEqual(json_decode(json_encode(utf8(u"\u00e9"))), u"\u00e9")
self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
def test_squeeze(self):
- self.assertEqual(squeeze(u('sequences of whitespace chars')), u('sequences of whitespace chars'))
+ self.assertEqual(squeeze(u'sequences of whitespace chars'), u'sequences of whitespace chars')
def test_recursive_unicode(self):
tests = {
'tuple': (b"foo", b"bar"),
'bytes': b"foo"
}
- self.assertEqual(recursive_unicode(tests['dict']), {u("foo"): u("bar")})
- self.assertEqual(recursive_unicode(tests['list']), [u("foo"), u("bar")])
- self.assertEqual(recursive_unicode(tests['tuple']), (u("foo"), u("bar")))
- self.assertEqual(recursive_unicode(tests['bytes']), u("foo"))
+ self.assertEqual(recursive_unicode(tests['dict']), {u"foo": u"bar"})
+ self.assertEqual(recursive_unicode(tests['list']), [u"foo", u"bar"])
+ self.assertEqual(recursive_unicode(tests['tuple']), (u"foo", u"bar"))
+ self.assertEqual(recursive_unicode(tests['bytes']), u"foo")
response.body)
def test_body_encoding(self):
- unicode_body = u("\xe9")
+ unicode_body = u"\xe9"
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
- user_agent=u("foo"))
+ user_agent=u"foo")
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
- for value in [u("MyUserAgent"), b"MyUserAgent"]:
+ for value in [u"MyUserAgent", b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers['User-Agent'] = value
b"",
]))
data = json_decode(response)
- self.assertEqual(u("\u00e9"), data["header"])
- self.assertEqual(u("\u00e1"), data["argument"])
- self.assertEqual(u("\u00f3"), data["filename"])
- self.assertEqual(u("\u00fa"), data["filebody"])
+ self.assertEqual(u"\u00e9", data["header"])
+ self.assertEqual(u"\u00e1", data["argument"])
+ self.assertEqual(u"\u00f3", data["filename"])
+ self.assertEqual(u"\u00fa", data["filebody"])
def test_newlines(self):
# We support both CRLF and bare LF as line separators.
def test_query_string_encoding(self):
response = self.fetch("/echo?foo=%C3%A9")
data = json_decode(response.body)
- self.assertEqual(data, {u("foo"): [u("\u00e9")]})
+ self.assertEqual(data, {u"foo": [u"\u00e9"]})
def test_empty_query_string(self):
response = self.fetch("/echo?foo=&foo=")
data = json_decode(response.body)
- self.assertEqual(data, {u("foo"): [u(""), u("")]})
+ self.assertEqual(data, {u"foo": [u"", u""]})
def test_empty_post_parameters(self):
response = self.fetch("/echo", method="POST", body="foo=&bar=")
data = json_decode(response.body)
- self.assertEqual(data, {u("foo"): [u("")], u("bar"): [u("")]})
+ self.assertEqual(data, {u"foo": [u""], u"bar": [u""]})
def test_types(self):
headers = {"Cookie": "foo=bar"}
""".replace(b"\n", b"\r\n"))
read_stream_body(self.stream, self.stop)
headers, response = self.wait()
- self.assertEqual(json_decode(response), {u('foo'): [u('bar')]})
+ self.assertEqual(json_decode(response), {u'foo': [u'bar']})
class XHeaderTest(HandlerBaseTestCase):
def test_uncompressed(self):
response = self.fetch('/', method='POST', body='foo=bar')
- self.assertEquals(json_decode(response.body), {u('foo'): [u('bar')]})
+ self.assertEquals(json_decode(response.body), {u'foo': [u'bar']})
class GzipTest(GzipBaseTest, AsyncHTTPTestCase):
def test_gzip(self):
response = self.post_gzip('foo=bar')
- self.assertEquals(json_decode(response.body), {u('foo'): [u('bar')]})
+ self.assertEquals(json_decode(response.body), {u'foo': [u'bar']})
class GzipUnsupportedTest(GzipBaseTest, AsyncHTTPTestCase):
# and cpython's unicodeobject.c (which defines the implementation
# of unicode_type.splitlines(), and uses a different list than TR13).
newlines = [
- u('\u001b'), # VERTICAL TAB
- u('\u001c'), # FILE SEPARATOR
- u('\u001d'), # GROUP SEPARATOR
- u('\u001e'), # RECORD SEPARATOR
- u('\u0085'), # NEXT LINE
- u('\u2028'), # LINE SEPARATOR
- u('\u2029'), # PARAGRAPH SEPARATOR
+ u'\u001b', # VERTICAL TAB
+ u'\u001c', # FILE SEPARATOR
+ u'\u001d', # GROUP SEPARATOR
+ u'\u001e', # RECORD SEPARATOR
+ u'\u0085', # NEXT LINE
+ u'\u2028', # LINE SEPARATOR
+ u'\u2029', # PARAGRAPH SEPARATOR
]
for newline in newlines:
# Try the utf8 and latin1 representations of each newline
os.path.join(os.path.dirname(__file__), 'csv_translations'))
locale = tornado.locale.get("fr_FR")
self.assertTrue(isinstance(locale, tornado.locale.CSVLocale))
- self.assertEqual(locale.translate("school"), u("\u00e9cole"))
+ self.assertEqual(locale.translate("school"), u"\u00e9cole")
# tempfile.mkdtemp is not available on app engine.
@skipOnAppEngine
tornado.locale.load_translations(tmpdir)
locale = tornado.locale.get('fr_FR')
self.assertIsInstance(locale, tornado.locale.CSVLocale)
- self.assertEqual(locale.translate("school"), u("\u00e9cole"))
+ self.assertEqual(locale.translate("school"), u"\u00e9cole")
finally:
shutil.rmtree(tmpdir)
"tornado_test")
locale = tornado.locale.get("fr_FR")
self.assertTrue(isinstance(locale, tornado.locale.GettextLocale))
- self.assertEqual(locale.translate("school"), u("\u00e9cole"))
- self.assertEqual(locale.pgettext("law", "right"), u("le droit"))
- self.assertEqual(locale.pgettext("good", "right"), u("le bien"))
- self.assertEqual(locale.pgettext("organization", "club", "clubs", 1), u("le club"))
- self.assertEqual(locale.pgettext("organization", "club", "clubs", 2), u("les clubs"))
- self.assertEqual(locale.pgettext("stick", "club", "clubs", 1), u("le b\xe2ton"))
- self.assertEqual(locale.pgettext("stick", "club", "clubs", 2), u("les b\xe2tons"))
+ self.assertEqual(locale.translate("school"), u"\u00e9cole")
+ self.assertEqual(locale.pgettext("law", "right"), u"le droit")
+ self.assertEqual(locale.pgettext("good", "right"), u"le bien")
+ self.assertEqual(locale.pgettext("organization", "club", "clubs", 1), u"le club")
+ self.assertEqual(locale.pgettext("organization", "club", "clubs", 2), u"les clubs")
+ self.assertEqual(locale.pgettext("stick", "club", "clubs", 1), u"le b\xe2ton")
+ self.assertEqual(locale.pgettext("stick", "club", "clubs", 2), u"les b\xe2tons")
class LocaleDataTest(unittest.TestCase):
def test_non_ascii_name(self):
name = tornado.locale.LOCALE_NAMES['es_LA']['name']
self.assertTrue(isinstance(name, unicode_type))
- self.assertEqual(name, u('Espa\u00f1ol'))
+ self.assertEqual(name, u'Espa\u00f1ol')
self.assertEqual(utf8(name), b'Espa\xc3\xb1ol')
# for testing. (testing with color off fails to expose some potential
# encoding issues from the control characters)
self.formatter._colors = {
- logging.ERROR: u("\u0001"),
+ logging.ERROR: u"\u0001",
}
- self.formatter._normal = u("\u0002")
+ self.formatter._normal = u"\u0002"
# construct a Logger directly to bypass getLogger's caching
self.logger = logging.Logger('LogFormatterTest')
self.logger.propagate = False
if issubclass(bytes, basestring_type):
# on python 2, utf8 byte strings (and by extension ascii byte
# strings) are passed through as-is.
- self.assertEqual(self.get_output(), utf8(u("\u00e9")))
+ self.assertEqual(self.get_output(), utf8(u"\u00e9"))
else:
# on python 3, byte strings always get repr'd even if
# they're ascii-only, so this degenerates into another
# copy of test_bytes_logging.
- self.assertEqual(self.get_output(), utf8(repr(utf8(u("\u00e9")))))
+ self.assertEqual(self.get_output(), utf8(repr(utf8(u"\u00e9"))))
def test_bytes_exception_logging(self):
try:
return logging.FileHandler(filename, encoding="utf8")
def test_unicode_logging(self):
- self.logger.error(u("\u00e9"))
- self.assertEqual(self.get_output(), utf8(u("\u00e9")))
+ self.logger.error(u"\u00e9")
+ self.assertEqual(self.get_output(), utf8(u"\u00e9"))
class EnablePrettyLoggingTest(unittest.TestCase):
# this deadlock.
resolver = ThreadedResolver()
-IOLoop.current().run_sync(lambda: resolver.resolve(u('localhost'), 80))
+IOLoop.current().run_sync(lambda: resolver.resolve(u'localhost', 80))
b"expr {{jquery expr}}")
def test_unicode_template(self):
- template = Template(utf8(u("\u00e9")))
- self.assertEqual(template.generate(), utf8(u("\u00e9")))
+ template = Template(utf8(u"\u00e9"))
+ self.assertEqual(template.generate(), utf8(u"\u00e9"))
def test_unicode_literal_expression(self):
# Unicode literals should be usable in templates. Note that this
if str is unicode_type:
# python 3 needs a different version of this test since
# 2to3 doesn't run on template internals
- template = Template(utf8(u('{{ "\u00e9" }}')))
+ template = Template(utf8(u'{{ "\u00e9" }}'))
else:
- template = Template(utf8(u('{{ u"\u00e9" }}')))
- self.assertEqual(template.generate(), utf8(u("\u00e9")))
+ template = Template(utf8(u'{{ u"\u00e9" }}'))
+ self.assertEqual(template.generate(), utf8(u"\u00e9"))
def test_custom_namespace(self):
loader = DictLoader({"test.html": "{{ inc(5) }}"}, namespace={"inc": lambda x: x + 1})
def test_unicode_apply(self):
def upper(s):
return to_unicode(s).upper()
- template = Template(utf8(u("{% apply upper %}foo \u00e9{% end %}")))
- self.assertEqual(template.generate(upper=upper), utf8(u("FOO \u00c9")))
+ template = Template(utf8(u"{% apply upper %}foo \u00e9{% end %}"))
+ self.assertEqual(template.generate(upper=upper), utf8(u"FOO \u00c9"))
def test_bytes_apply(self):
def upper(s):
return utf8(to_unicode(s).upper())
- template = Template(utf8(u("{% apply upper %}foo \u00e9{% end %}")))
- self.assertEqual(template.generate(upper=upper), utf8(u("FOO \u00c9")))
+ template = Template(utf8(u"{% apply upper %}foo \u00e9{% end %}"))
+ self.assertEqual(template.generate(upper=upper), utf8(u"FOO \u00c9"))
def test_if(self):
template = Template(utf8("{% if x > 4 %}yes{% else %}no{% end %}"))
self.assertEqual(template.generate(), '0')
def test_non_ascii_name(self):
- loader = DictLoader({u("t\u00e9st.html"): "hello"})
- self.assertEqual(loader.load(u("t\u00e9st.html")).generate(), b"hello")
+ loader = DictLoader({u"t\u00e9st.html": "hello"})
+ self.assertEqual(loader.load(u"t\u00e9st.html").generate(), b"hello")
class StackTraceTest(unittest.TestCase):
def test_utf8_in_file(self):
tmpl = self.loader.load("utf8.html")
result = tmpl.generate()
- self.assertEqual(to_unicode(result).strip(), u("H\u00e9llo"))
+ self.assertEqual(to_unicode(result).strip(), u"H\u00e9llo")
class UnicodeLiteralTest(unittest.TestCase):
def test_unicode_escapes(self):
- self.assertEqual(utf8(u('\u00e9')), b'\xc3\xa9')
+ self.assertEqual(utf8(u'\u00e9'), b'\xc3\xa9')
class ExecInTest(unittest.TestCase):
self.assertIs(import_object('tornado.escape.utf8'), utf8)
def test_import_member_unicode(self):
- self.assertIs(import_object(u('tornado.escape.utf8')), utf8)
+ self.assertIs(import_object(u'tornado.escape.utf8'), utf8)
def test_import_module(self):
self.assertIs(import_object('tornado.escape'), tornado.escape)
# The internal implementation of __import__ differs depending on
# whether the thing being imported is a module or not.
# This variant requires a byte string in python 2.
- self.assertIs(import_object(u('tornado.escape')), tornado.escape)
+ self.assertIs(import_object(u'tornado.escape'), tornado.escape)
# Try setting cookies with different argument types
# to ensure that everything gets encoded correctly
self.set_cookie("str", "asdf")
- self.set_cookie("unicode", u("qwer"))
+ self.set_cookie("unicode", u"qwer")
self.set_cookie("bytes", b"zxcv")
class GetCookieHandler(RequestHandler):
def get(self):
# unicode domain and path arguments shouldn't break things
# either (see bug #285)
- self.set_cookie("unicode_args", "blah", domain=u("foo.com"),
- path=u("/foo"))
+ self.set_cookie("unicode_args", "blah", domain=u"foo.com",
+ path=u"/foo")
class SetCookieSpecialCharHandler(RequestHandler):
def get(self):
def test_group_encoding(self):
# Path components and query arguments should be decoded the same way
self.assertEqual(self.fetch_json('/group/%C3%A9?arg=%C3%A9'),
- {u("path"): u("/group/%C3%A9"),
- u("path_args"): [u("\u00e9")],
- u("args"): {u("arg"): [u("\u00e9")]}})
+ {u"path": u"/group/%C3%A9",
+ u"path_args": [u"\u00e9"],
+ u"args": {u"arg": [u"\u00e9"]}})
def test_slashes(self):
# Slashes may be escaped to appear as a single "directory" in the path,
response = self.fetch(req_url)
response.rethrow()
data = json_decode(response.body)
- self.assertEqual(data, {u('path'): [u('unicode'), u('\u00e9')],
- u('query'): [u('unicode'), u('\u00e9')],
+ self.assertEqual(data, {u'path': [u'unicode', u'\u00e9'],
+ u'query': [u'unicode', u'\u00e9'],
})
response = self.fetch("/decode_arg/%C3%A9?foo=%C3%A9")
response.rethrow()
data = json_decode(response.body)
- self.assertEqual(data, {u('path'): [u('bytes'), u('c3a9')],
- u('query'): [u('bytes'), u('c3a9')],
+ self.assertEqual(data, {u'path': [u'bytes', u'c3a9'],
+ u'query': [u'bytes', u'c3a9'],
})
def test_decode_argument_invalid_unicode(self):
response = self.fetch(req_url)
response.rethrow()
data = json_decode(response.body)
- self.assertEqual(data, {u('path'): [u('unicode'), u('1 + 1')],
- u('query'): [u('unicode'), u('1 + 1')],
+ self.assertEqual(data, {u'path': [u'unicode', u'1 + 1'],
+ u'query': [u'unicode', u'1 + 1'],
})
def test_reverse_url(self):
'/decode_arg/42')
self.assertEqual(self.app.reverse_url('decode_arg', b'\xe9'),
'/decode_arg/%E9')
- self.assertEqual(self.app.reverse_url('decode_arg', u('\u00e9')),
+ self.assertEqual(self.app.reverse_url('decode_arg', u'\u00e9'),
'/decode_arg/%C3%A9')
self.assertEqual(self.app.reverse_url('decode_arg', '1 + 1'),
'/decode_arg/1%20%2B%201')
def test_optional_path(self):
self.assertEqual(self.fetch_json("/optional_path/foo"),
- {u("path"): u("foo")})
+ {u"path": u"foo"})
self.assertEqual(self.fetch_json("/optional_path/"),
- {u("path"): None})
+ {u"path": None})
def test_multi_header(self):
response = self.fetch("/multi_header")
self.write(path)
return [("/str/(?P<path>.*)", EchoHandler),
- (u("/unicode/(?P<path>.*)"), EchoHandler)]
+ (u"/unicode/(?P<path>.*)", EchoHandler)]
def test_named_urlspec_groups(self):
response = self.fetch("/str/foo")
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect('/echo')
- ws.write_message(u('hello \u00e9'))
+ ws.write_message(u'hello \u00e9')
response = yield ws.read_message()
- self.assertEqual(response, u('hello \u00e9'))
+ self.assertEqual(response, u'hello \u00e9')
yield self.close(ws)
@gen_test