]> git.ipfire.org Git - thirdparty/tornado.git/commitdiff
Convert all uses of t.util.u() to real unicode literals.
authorBen Darnell <ben@bendarnell.com>
Sun, 25 Oct 2015 15:25:38 +0000 (11:25 -0400)
committerBen Darnell <ben@bendarnell.com>
Fri, 6 Nov 2015 23:53:11 +0000 (18:53 -0500)
16 files changed:
tornado/_locale_data.py
tornado/auth.py
tornado/escape.py
tornado/locale.py
tornado/test/auth_test.py
tornado/test/escape_test.py
tornado/test/httpclient_test.py
tornado/test/httpserver_test.py
tornado/test/httputil_test.py
tornado/test/locale_test.py
tornado/test/log_test.py
tornado/test/resolve_test_helper.py
tornado/test/template_test.py
tornado/test/util_test.py
tornado/test/web_test.py
tornado/test/websocket_test.py

index 47c1df618c88804de9496715a3f7c7a110e82ab5..26531282ddffc722c70c25bab09192decae6fdd1 100644 (file)
@@ -29,66 +29,66 @@ from __future__ import absolute_import, division, print_function, with_statement
 from tornado.escape import to_unicode as u
 
 LOCALE_NAMES = {
-    "af_ZA": {"name_en": u("Afrikaans"), "name": u("Afrikaans")},
-    "am_ET": {"name_en": u("Amharic"), "name": u("አማርኛ")},
-    "ar_AR": {"name_en": u("Arabic"), "name": u("العربية")},
-    "bg_BG": {"name_en": u("Bulgarian"), "name": u("Български")},
-    "bn_IN": {"name_en": u("Bengali"), "name": u("বাংলা")},
-    "bs_BA": {"name_en": u("Bosnian"), "name": u("Bosanski")},
-    "ca_ES": {"name_en": u("Catalan"), "name": u("Català")},
-    "cs_CZ": {"name_en": u("Czech"), "name": u("Čeština")},
-    "cy_GB": {"name_en": u("Welsh"), "name": u("Cymraeg")},
-    "da_DK": {"name_en": u("Danish"), "name": u("Dansk")},
-    "de_DE": {"name_en": u("German"), "name": u("Deutsch")},
-    "el_GR": {"name_en": u("Greek"), "name": u("Ελληνικά")},
-    "en_GB": {"name_en": u("English (UK)"), "name": u("English (UK)")},
-    "en_US": {"name_en": u("English (US)"), "name": u("English (US)")},
-    "es_ES": {"name_en": u("Spanish (Spain)"), "name": u("Español (España)")},
-    "es_LA": {"name_en": u("Spanish"), "name": u("Español")},
-    "et_EE": {"name_en": u("Estonian"), "name": u("Eesti")},
-    "eu_ES": {"name_en": u("Basque"), "name": u("Euskara")},
-    "fa_IR": {"name_en": u("Persian"), "name": u("فارسی")},
-    "fi_FI": {"name_en": u("Finnish"), "name": u("Suomi")},
-    "fr_CA": {"name_en": u("French (Canada)"), "name": u("Français (Canada)")},
-    "fr_FR": {"name_en": u("French"), "name": u("Français")},
-    "ga_IE": {"name_en": u("Irish"), "name": u("Gaeilge")},
-    "gl_ES": {"name_en": u("Galician"), "name": u("Galego")},
-    "he_IL": {"name_en": u("Hebrew"), "name": u("עברית")},
-    "hi_IN": {"name_en": u("Hindi"), "name": u("हिन्दी")},
-    "hr_HR": {"name_en": u("Croatian"), "name": u("Hrvatski")},
-    "hu_HU": {"name_en": u("Hungarian"), "name": u("Magyar")},
-    "id_ID": {"name_en": u("Indonesian"), "name": u("Bahasa Indonesia")},
-    "is_IS": {"name_en": u("Icelandic"), "name": u("Íslenska")},
-    "it_IT": {"name_en": u("Italian"), "name": u("Italiano")},
-    "ja_JP": {"name_en": u("Japanese"), "name": u("日本語")},
-    "ko_KR": {"name_en": u("Korean"), "name": u("한국어")},
-    "lt_LT": {"name_en": u("Lithuanian"), "name": u("Lietuvių")},
-    "lv_LV": {"name_en": u("Latvian"), "name": u("Latviešu")},
-    "mk_MK": {"name_en": u("Macedonian"), "name": u("Македонски")},
-    "ml_IN": {"name_en": u("Malayalam"), "name": u("മലയാളം")},
-    "ms_MY": {"name_en": u("Malay"), "name": u("Bahasa Melayu")},
-    "nb_NO": {"name_en": u("Norwegian (bokmal)"), "name": u("Norsk (bokmål)")},
-    "nl_NL": {"name_en": u("Dutch"), "name": u("Nederlands")},
-    "nn_NO": {"name_en": u("Norwegian (nynorsk)"), "name": u("Norsk (nynorsk)")},
-    "pa_IN": {"name_en": u("Punjabi"), "name": u("ਪੰਜਾਬੀ")},
-    "pl_PL": {"name_en": u("Polish"), "name": u("Polski")},
-    "pt_BR": {"name_en": u("Portuguese (Brazil)"), "name": u("Português (Brasil)")},
-    "pt_PT": {"name_en": u("Portuguese (Portugal)"), "name": u("Português (Portugal)")},
-    "ro_RO": {"name_en": u("Romanian"), "name": u("Română")},
-    "ru_RU": {"name_en": u("Russian"), "name": u("Русский")},
-    "sk_SK": {"name_en": u("Slovak"), "name": u("Slovenčina")},
-    "sl_SI": {"name_en": u("Slovenian"), "name": u("Slovenščina")},
-    "sq_AL": {"name_en": u("Albanian"), "name": u("Shqip")},
-    "sr_RS": {"name_en": u("Serbian"), "name": u("Српски")},
-    "sv_SE": {"name_en": u("Swedish"), "name": u("Svenska")},
-    "sw_KE": {"name_en": u("Swahili"), "name": u("Kiswahili")},
-    "ta_IN": {"name_en": u("Tamil"), "name": u("தமிழ்")},
-    "te_IN": {"name_en": u("Telugu"), "name": u("తెలుగు")},
-    "th_TH": {"name_en": u("Thai"), "name": u("ภาษาไทย")},
-    "tl_PH": {"name_en": u("Filipino"), "name": u("Filipino")},
-    "tr_TR": {"name_en": u("Turkish"), "name": u("Türkçe")},
-    "uk_UA": {"name_en": u("Ukraini "), "name": u("Українська")},
-    "vi_VN": {"name_en": u("Vietnamese"), "name": u("Tiếng Việt")},
-    "zh_CN": {"name_en": u("Chinese (Simplified)"), "name": u("中文(简体)")},
-    "zh_TW": {"name_en": u("Chinese (Traditional)"), "name": u("中文(繁體)")},
+    "af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"},
+    "am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"},
+    "ar_AR": {"name_en": u"Arabic", "name": u"العربية"},
+    "bg_BG": {"name_en": u"Bulgarian", "name": u"Български"},
+    "bn_IN": {"name_en": u"Bengali", "name": u"বাংলা"},
+    "bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"},
+    "ca_ES": {"name_en": u"Catalan", "name": u"Català"},
+    "cs_CZ": {"name_en": u"Czech", "name": u"Čeština"},
+    "cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"},
+    "da_DK": {"name_en": u"Danish", "name": u"Dansk"},
+    "de_DE": {"name_en": u"German", "name": u"Deutsch"},
+    "el_GR": {"name_en": u"Greek", "name": u"Ελληνικά"},
+    "en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"},
+    "en_US": {"name_en": u"English (US)", "name": u"English (US)"},
+    "es_ES": {"name_en": u"Spanish (Spain)", "name": u"Español (España)"},
+    "es_LA": {"name_en": u"Spanish", "name": u"Español"},
+    "et_EE": {"name_en": u"Estonian", "name": u"Eesti"},
+    "eu_ES": {"name_en": u"Basque", "name": u"Euskara"},
+    "fa_IR": {"name_en": u"Persian", "name": u"فارسی"},
+    "fi_FI": {"name_en": u"Finnish", "name": u"Suomi"},
+    "fr_CA": {"name_en": u"French (Canada)", "name": u"Français (Canada)"},
+    "fr_FR": {"name_en": u"French", "name": u"Français"},
+    "ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"},
+    "gl_ES": {"name_en": u"Galician", "name": u"Galego"},
+    "he_IL": {"name_en": u"Hebrew", "name": u"עברית"},
+    "hi_IN": {"name_en": u"Hindi", "name": u"हिन्दी"},
+    "hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"},
+    "hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"},
+    "id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"},
+    "is_IS": {"name_en": u"Icelandic", "name": u"Íslenska"},
+    "it_IT": {"name_en": u"Italian", "name": u"Italiano"},
+    "ja_JP": {"name_en": u"Japanese", "name": u"日本語"},
+    "ko_KR": {"name_en": u"Korean", "name": u"한국어"},
+    "lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvių"},
+    "lv_LV": {"name_en": u"Latvian", "name": u"Latviešu"},
+    "mk_MK": {"name_en": u"Macedonian", "name": u"Македонски"},
+    "ml_IN": {"name_en": u"Malayalam", "name": u"മലയാളം"},
+    "ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"},
+    "nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokmål)"},
+    "nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"},
+    "nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"},
+    "pa_IN": {"name_en": u"Punjabi", "name": u"ਪੰਜਾਬੀ"},
+    "pl_PL": {"name_en": u"Polish", "name": u"Polski"},
+    "pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Português (Brasil)"},
+    "pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Português (Portugal)"},
+    "ro_RO": {"name_en": u"Romanian", "name": u"Română"},
+    "ru_RU": {"name_en": u"Russian", "name": u"Русский"},
+    "sk_SK": {"name_en": u"Slovak", "name": u"Slovenčina"},
+    "sl_SI": {"name_en": u"Slovenian", "name": u"Slovenščina"},
+    "sq_AL": {"name_en": u"Albanian", "name": u"Shqip"},
+    "sr_RS": {"name_en": u"Serbian", "name": u"Српски"},
+    "sv_SE": {"name_en": u"Swedish", "name": u"Svenska"},
+    "sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"},
+    "ta_IN": {"name_en": u"Tamil", "name": u"தமிழ்"},
+    "te_IN": {"name_en": u"Telugu", "name": u"తెలుగు"},
+    "th_TH": {"name_en": u"Thai", "name": u"ภาษาไทย"},
+    "tl_PH": {"name_en": u"Filipino", "name": u"Filipino"},
+    "tr_TR": {"name_en": u"Turkish", "name": u"Türkçe"},
+    "uk_UA": {"name_en": u"Ukraini ", "name": u"Українська"},
+    "vi_VN": {"name_en": u"Vietnamese", "name": u"Tiếng Việt"},
+    "zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"中文(简体)"},
+    "zh_TW": {"name_en": u"Chinese (Traditional)", "name": u"中文(繁體)"},
 }
index ff7172aa01bfd5e06aa70086e8c4f6fbabc0f0d7..0f1b3b13d6f9ad679f0ee2ace84fbaef11016b59 100644 (file)
@@ -188,7 +188,7 @@ class OpenIdMixin(object):
         """
         # Verify the OpenID response via direct request to the OP
         args = dict((k, v[-1]) for k, v in self.request.arguments.items())
-        args["openid.mode"] = u("check_authentication")
+        args["openid.mode"] = u"check_authentication"
         url = self._OPENID_ENDPOINT
         if http_client is None:
             http_client = self.get_auth_http_client()
@@ -255,13 +255,13 @@ class OpenIdMixin(object):
         ax_ns = None
         for name in self.request.arguments:
             if name.startswith("openid.ns.") and \
-                    self.get_argument(name) == u("http://openid.net/srv/ax/1.0"):
+                    self.get_argument(name) == u"http://openid.net/srv/ax/1.0":
                 ax_ns = name[10:]
                 break
 
         def get_ax_arg(uri):
             if not ax_ns:
-                return u("")
+                return u""
             prefix = "openid." + ax_ns + ".type."
             ax_name = None
             for name in self.request.arguments.keys():
@@ -270,8 +270,8 @@ class OpenIdMixin(object):
                     ax_name = "openid." + ax_ns + ".value." + part
                     break
             if not ax_name:
-                return u("")
-            return self.get_argument(ax_name, u(""))
+                return u""
+            return self.get_argument(ax_name, u"")
 
         email = get_ax_arg("http://axschema.org/contact/email")
         name = get_ax_arg("http://axschema.org/namePerson")
index 2f04b4683ae7cccecbbb2571d354c4a40eea3a9d..b644a474913df648c8f64db60418607c07e15b30 100644 (file)
@@ -366,7 +366,7 @@ def linkify(text, shorten=False, extra_params="",
                     # have a status bar, such as Safari by default)
                     params += ' title="%s"' % href
 
-        return u('<a href="%s"%s>%s</a>') % (href, params, url)
+        return u'<a href="%s"%s>%s</a>' % (href, params, url)
 
     # First HTML-escape so that our strings are all safe.
     # The regex is modified to avoid character entites other than &amp; so
index 8310c4d4c63de7a13a5236499f5c590c782cfccf..07da26df85b108a7e918451b30aeb8c591494341 100644 (file)
@@ -274,7 +274,7 @@ class Locale(object):
 
     def __init__(self, code, translations):
         self.code = code
-        self.name = LOCALE_NAMES.get(code, {}).get("name", u("Unknown"))
+        self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown")
         self.rtl = False
         for prefix in ["fa", "ar", "he"]:
             if self.code.startswith(prefix):
@@ -376,7 +376,7 @@ class Locale(object):
             str_time = "%d:%02d" % (local_date.hour, local_date.minute)
         elif self.code == "zh_CN":
             str_time = "%s%d:%02d" % (
-                (u('\u4e0a\u5348'), u('\u4e0b\u5348'))[local_date.hour >= 12],
+                (u'\u4e0a\u5348', u'\u4e0b\u5348')[local_date.hour >= 12],
                 local_date.hour % 12 or 12, local_date.minute)
         else:
             str_time = "%d:%02d %s" % (
@@ -422,7 +422,7 @@ class Locale(object):
             return ""
         if len(parts) == 1:
             return parts[0]
-        comma = u(' \u0648 ') if self.code.startswith("fa") else u(", ")
+        comma = u' \u0648 ' if self.code.startswith("fa") else u", "
         return _("%(commas)s and %(last)s") % {
             "commas": comma.join(parts[:-1]),
             "last": parts[len(parts) - 1],
index 59c96b232ff03844b7280daa17de82d74b302ee9..a49b2744451da58f2a91bb8892b84ca40d24a268 100644 (file)
@@ -430,12 +430,12 @@ class AuthTest(AsyncHTTPTestCase):
         response.rethrow()
         parsed = json_decode(response.body)
         self.assertEqual(parsed,
-                         {u('access_token'): {u('key'): u('hjkl'),
-                                              u('screen_name'): u('foo'),
-                                              u('secret'): u('vbnm')},
-                          u('name'): u('Foo'),
-                          u('screen_name'): u('foo'),
-                          u('username'): u('foo')})
+                         {u'access_token': {u'key': u'hjkl',
+                                              u'screen_name': u'foo',
+                                              u'secret': u'vbnm'},
+                          u'name': u'Foo',
+                          u'screen_name': u'foo',
+                          u'username': u'foo'})
 
     def test_twitter_show_user(self):
         response = self.fetch('/twitter/client/show_user?name=somebody')
@@ -539,7 +539,7 @@ class GoogleOAuth2Test(AsyncHTTPTestCase):
     def test_google_login(self):
         response = self.fetch('/client/login')
         self.assertDictEqual({
-            u('name'): u('Foo'),
-            u('email'): u('foo@example.com'),
-            u('access_token'): u('fake-access-token'),
+            u'name': u'Foo',
+            u'email': u'foo@example.com',
+            u'access_token': u'fake-access-token',
         }, json_decode(response.body))
index 65765b68aa31fd90205d38fe22dcbf3b081ca381..e2c638c287390b8177ef7de04fbb8d30e151fc45 100644 (file)
@@ -12,123 +12,123 @@ linkify_tests = [
     # (input, linkify_kwargs, expected_output)
 
     ("hello http://world.com/!", {},
-     u('hello <a href="http://world.com/">http://world.com/</a>!')),
+     u'hello <a href="http://world.com/">http://world.com/</a>!'),
 
     ("hello http://world.com/with?param=true&stuff=yes", {},
-     u('hello <a href="http://world.com/with?param=true&amp;stuff=yes">http://world.com/with?param=true&amp;stuff=yes</a>')),
+     u'hello <a href="http://world.com/with?param=true&amp;stuff=yes">http://world.com/with?param=true&amp;stuff=yes</a>'),
 
     # an opened paren followed by many chars killed Gruber's regex
     ("http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", {},
-     u('<a href="http://url.com/w">http://url.com/w</a>(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')),
+     u'<a href="http://url.com/w">http://url.com/w</a>(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),
 
     # as did too many dots at the end
     ("http://url.com/withmany.......................................", {},
-     u('<a href="http://url.com/withmany">http://url.com/withmany</a>.......................................')),
+     u'<a href="http://url.com/withmany">http://url.com/withmany</a>.......................................'),
 
     ("http://url.com/withmany((((((((((((((((((((((((((((((((((a)", {},
-     u('<a href="http://url.com/withmany">http://url.com/withmany</a>((((((((((((((((((((((((((((((((((a)')),
+     u'<a href="http://url.com/withmany">http://url.com/withmany</a>((((((((((((((((((((((((((((((((((a)'),
 
     # some examples from http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
     # plus a fex extras (such as multiple parentheses).
     ("http://foo.com/blah_blah", {},
-     u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>')),
+     u'<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>'),
 
     ("http://foo.com/blah_blah/", {},
-     u('<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>')),
+     u'<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>'),
 
     ("(Something like http://foo.com/blah_blah)", {},
-     u('(Something like <a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>)')),
+     u'(Something like <a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>)'),
 
     ("http://foo.com/blah_blah_(wikipedia)", {},
-     u('<a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>')),
+     u'<a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>'),
 
     ("http://foo.com/blah_(blah)_(wikipedia)_blah", {},
-     u('<a href="http://foo.com/blah_(blah)_(wikipedia)_blah">http://foo.com/blah_(blah)_(wikipedia)_blah</a>')),
+     u'<a href="http://foo.com/blah_(blah)_(wikipedia)_blah">http://foo.com/blah_(blah)_(wikipedia)_blah</a>'),
 
     ("(Something like http://foo.com/blah_blah_(wikipedia))", {},
-     u('(Something like <a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>)')),
+     u'(Something like <a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>)'),
 
     ("http://foo.com/blah_blah.", {},
-     u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>.')),
+     u'<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>.'),
 
     ("http://foo.com/blah_blah/.", {},
-     u('<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>.')),
+     u'<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>.'),
 
     ("<http://foo.com/blah_blah>", {},
-     u('&lt;<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>&gt;')),
+     u'&lt;<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>&gt;'),
 
     ("<http://foo.com/blah_blah/>", {},
-     u('&lt;<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>&gt;')),
+     u'&lt;<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>&gt;'),
 
     ("http://foo.com/blah_blah,", {},
-     u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>,')),
+     u'<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>,'),
 
     ("http://www.example.com/wpstyle/?p=364.", {},
-     u('<a href="http://www.example.com/wpstyle/?p=364">http://www.example.com/wpstyle/?p=364</a>.')),
+     u'<a href="http://www.example.com/wpstyle/?p=364">http://www.example.com/wpstyle/?p=364</a>.'),
 
     ("rdar://1234",
      {"permitted_protocols": ["http", "rdar"]},
-     u('<a href="rdar://1234">rdar://1234</a>')),
+     u'<a href="rdar://1234">rdar://1234</a>'),
 
     ("rdar:/1234",
      {"permitted_protocols": ["rdar"]},
-     u('<a href="rdar:/1234">rdar:/1234</a>')),
+     u'<a href="rdar:/1234">rdar:/1234</a>'),
 
     ("http://userid:password@example.com:8080", {},
-     u('<a href="http://userid:password@example.com:8080">http://userid:password@example.com:8080</a>')),
+     u'<a href="http://userid:password@example.com:8080">http://userid:password@example.com:8080</a>'),
 
     ("http://userid@example.com", {},
-     u('<a href="http://userid@example.com">http://userid@example.com</a>')),
+     u'<a href="http://userid@example.com">http://userid@example.com</a>'),
 
     ("http://userid@example.com:8080", {},
-     u('<a href="http://userid@example.com:8080">http://userid@example.com:8080</a>')),
+     u'<a href="http://userid@example.com:8080">http://userid@example.com:8080</a>'),
 
     ("http://userid:password@example.com", {},
-     u('<a href="http://userid:password@example.com">http://userid:password@example.com</a>')),
+     u'<a href="http://userid:password@example.com">http://userid:password@example.com</a>'),
 
     ("message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
      {"permitted_protocols": ["http", "message"]},
-     u('<a href="message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e">message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e</a>')),
+     u'<a href="message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e">message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e</a>'),
 
-    (u("http://\u27a1.ws/\u4a39"), {},
-     u('<a href="http://\u27a1.ws/\u4a39">http://\u27a1.ws/\u4a39</a>')),
+    (u"http://\u27a1.ws/\u4a39", {},
+     u'<a href="http://\u27a1.ws/\u4a39">http://\u27a1.ws/\u4a39</a>'),
 
     ("<tag>http://example.com</tag>", {},
-     u('&lt;tag&gt;<a href="http://example.com">http://example.com</a>&lt;/tag&gt;')),
+     u'&lt;tag&gt;<a href="http://example.com">http://example.com</a>&lt;/tag&gt;'),
 
     ("Just a www.example.com link.", {},
-     u('Just a <a href="http://www.example.com">www.example.com</a> link.')),
+     u'Just a <a href="http://www.example.com">www.example.com</a> link.'),
 
     ("Just a www.example.com link.",
      {"require_protocol": True},
-     u('Just a www.example.com link.')),
+     u'Just a www.example.com link.'),
 
     ("A http://reallylong.com/link/that/exceedsthelenglimit.html",
      {"require_protocol": True, "shorten": True},
-     u('A <a href="http://reallylong.com/link/that/exceedsthelenglimit.html" title="http://reallylong.com/link/that/exceedsthelenglimit.html">http://reallylong.com/link...</a>')),
+     u'A <a href="http://reallylong.com/link/that/exceedsthelenglimit.html" title="http://reallylong.com/link/that/exceedsthelenglimit.html">http://reallylong.com/link...</a>'),
 
     ("A http://reallylongdomainnamethatwillbetoolong.com/hi!",
      {"shorten": True},
-     u('A <a href="http://reallylongdomainnamethatwillbetoolong.com/hi" title="http://reallylongdomainnamethatwillbetoolong.com/hi">http://reallylongdomainnametha...</a>!')),
+     u'A <a href="http://reallylongdomainnamethatwillbetoolong.com/hi" title="http://reallylongdomainnamethatwillbetoolong.com/hi">http://reallylongdomainnametha...</a>!'),
 
     ("A file:///passwords.txt and http://web.com link", {},
-     u('A file:///passwords.txt and <a href="http://web.com">http://web.com</a> link')),
+     u'A file:///passwords.txt and <a href="http://web.com">http://web.com</a> link'),
 
     ("A file:///passwords.txt and http://web.com link",
      {"permitted_protocols": ["file"]},
-     u('A <a href="file:///passwords.txt">file:///passwords.txt</a> and http://web.com link')),
+     u'A <a href="file:///passwords.txt">file:///passwords.txt</a> and http://web.com link'),
 
     ("www.external-link.com",
      {"extra_params": 'rel="nofollow" class="external"'},
-     u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>')),
+     u'<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>'),
 
     ("www.external-link.com and www.internal-link.com/blogs extra",
      {"extra_params": lambda href: 'class="internal"' if href.startswith("http://www.internal-link.com") else 'rel="nofollow" class="external"'},
-     u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a> and <a href="http://www.internal-link.com/blogs" class="internal">www.internal-link.com/blogs</a> extra')),
+     u'<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a> and <a href="http://www.internal-link.com/blogs" class="internal">www.internal-link.com/blogs</a> extra'),
 
     ("www.external-link.com",
      {"extra_params": lambda href: '    rel="nofollow" class="external"  '},
-     u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>')),
+     u'<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>'),
 ]
 
 
@@ -141,13 +141,13 @@ class EscapeTestCase(unittest.TestCase):
     def test_xhtml_escape(self):
         tests = [
             ("<foo>", "&lt;foo&gt;"),
-            (u("<foo>"), u("&lt;foo&gt;")),
+            (u"<foo>", u"&lt;foo&gt;"),
             (b"<foo>", b"&lt;foo&gt;"),
 
             ("<>&\"'", "&lt;&gt;&amp;&quot;&#39;"),
             ("&amp;", "&amp;amp;"),
 
-            (u("<\u00e9>"), u("&lt;\u00e9&gt;")),
+            (u"<\u00e9>", u"&lt;\u00e9&gt;"),
             (b"<\xc3\xa9>", b"&lt;\xc3\xa9&gt;"),
         ]
         for unescaped, escaped in tests:
@@ -159,7 +159,7 @@ class EscapeTestCase(unittest.TestCase):
             ('foo&#32;bar', 'foo bar'),
             ('foo&#x20;bar', 'foo bar'),
             ('foo&#X20;bar', 'foo bar'),
-            ('foo&#xabc;bar', u('foo\u0abcbar')),
+            ('foo&#xabc;bar', u'foo\u0abcbar'),
             ('foo&#xyz;bar', 'foo&#xyz;bar'),  # invalid encoding
             ('foo&#;bar', 'foo&#;bar'),        # invalid encoding
             ('foo&#x;bar', 'foo&#x;bar'),      # invalid encoding
@@ -174,16 +174,16 @@ class EscapeTestCase(unittest.TestCase):
             (u('\u00e9').encode('latin1'), '%E9'),
 
             # unicode strings become utf8
-            (u('\u00e9'), '%C3%A9'),
+            (u'\u00e9', '%C3%A9'),
         ]
         for unescaped, escaped in tests:
             self.assertEqual(url_escape(unescaped), escaped)
 
     def test_url_unescape_unicode(self):
         tests = [
-            ('%C3%A9', u('\u00e9'), 'utf8'),
-            ('%C3%A9', u('\u00c3\u00a9'), 'latin1'),
-            ('%C3%A9', utf8(u('\u00e9')), None),
+            ('%C3%A9', u'\u00e9', 'utf8'),
+            ('%C3%A9', u'\u00c3\u00a9', 'latin1'),
+            ('%C3%A9', utf8(u'\u00e9'), None),
         ]
         for escaped, unescaped, encoding in tests:
             # input strings to url_unescape should only contain ascii
@@ -209,28 +209,28 @@ class EscapeTestCase(unittest.TestCase):
         # On python2 the escape methods should generally return the same
         # type as their argument
         self.assertEqual(type(xhtml_escape("foo")), str)
-        self.assertEqual(type(xhtml_escape(u("foo"))), unicode_type)
+        self.assertEqual(type(xhtml_escape(u"foo")), unicode_type)
 
     def test_json_decode(self):
         # json_decode accepts both bytes and unicode, but strings it returns
         # are always unicode.
-        self.assertEqual(json_decode(b'"foo"'), u("foo"))
-        self.assertEqual(json_decode(u('"foo"')), u("foo"))
+        self.assertEqual(json_decode(b'"foo"'), u"foo")
+        self.assertEqual(json_decode(u'"foo"'), u"foo")
 
         # Non-ascii bytes are interpreted as utf8
-        self.assertEqual(json_decode(utf8(u('"\u00e9"'))), u("\u00e9"))
+        self.assertEqual(json_decode(utf8(u'"\u00e9"')), u"\u00e9")
 
     def test_json_encode(self):
         # json deals with strings, not bytes.  On python 2 byte strings will
         # convert automatically if they are utf8; on python 3 byte strings
         # are not allowed.
-        self.assertEqual(json_decode(json_encode(u("\u00e9"))), u("\u00e9"))
+        self.assertEqual(json_decode(json_encode(u"\u00e9")), u"\u00e9")
         if bytes is str:
-            self.assertEqual(json_decode(json_encode(utf8(u("\u00e9")))), u("\u00e9"))
+            self.assertEqual(json_decode(json_encode(utf8(u"\u00e9"))), u"\u00e9")
             self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
 
     def test_squeeze(self):
-        self.assertEqual(squeeze(u('sequences     of    whitespace   chars')), u('sequences of whitespace chars'))
+        self.assertEqual(squeeze(u'sequences     of    whitespace   chars'), u'sequences of whitespace chars')
 
     def test_recursive_unicode(self):
         tests = {
@@ -239,7 +239,7 @@ class EscapeTestCase(unittest.TestCase):
             'tuple': (b"foo", b"bar"),
             'bytes': b"foo"
         }
-        self.assertEqual(recursive_unicode(tests['dict']), {u("foo"): u("bar")})
-        self.assertEqual(recursive_unicode(tests['list']), [u("foo"), u("bar")])
-        self.assertEqual(recursive_unicode(tests['tuple']), (u("foo"), u("bar")))
-        self.assertEqual(recursive_unicode(tests['bytes']), u("foo"))
+        self.assertEqual(recursive_unicode(tests['dict']), {u"foo": u"bar"})
+        self.assertEqual(recursive_unicode(tests['list']), [u"foo", u"bar"])
+        self.assertEqual(recursive_unicode(tests['tuple']), (u"foo", u"bar"))
+        self.assertEqual(recursive_unicode(tests['bytes']), u"foo")
index e7551c93bb7373cdf505ef58c99dcf3bfffe89b5..939bebb89391572f2ee137575d8f546e85cdf139 100644 (file)
@@ -271,7 +271,7 @@ Transfer-Encoding: chunked
                          response.body)
 
     def test_body_encoding(self):
-        unicode_body = u("\xe9")
+        unicode_body = u"\xe9"
         byte_body = binascii.a2b_hex(b"e9")
 
         # unicode string in body gets converted to utf8
@@ -291,7 +291,7 @@ Transfer-Encoding: chunked
         # break anything
         response = self.fetch("/echopost", method="POST", body=byte_body,
                               headers={"Content-Type": "application/blah"},
-                              user_agent=u("foo"))
+                              user_agent=u"foo")
         self.assertEqual(response.headers["Content-Length"], "1")
         self.assertEqual(response.body, byte_body)
 
@@ -363,7 +363,7 @@ Transfer-Encoding: chunked
         # in a plain dictionary or an HTTPHeaders object.
         # Keys must always be the native str type.
         # All combinations should have the same results on the wire.
-        for value in [u("MyUserAgent"), b"MyUserAgent"]:
+        for value in [u"MyUserAgent", b"MyUserAgent"]:
             for container in [dict, HTTPHeaders]:
                 headers = container()
                 headers['User-Agent'] = value
index 065f5b1fade3c3fe27caecbc7263228476cb1f2f..fd54633be041c178006bee37dff7ed4a86f7722a 100644 (file)
@@ -241,10 +241,10 @@ class HTTPConnectionTest(AsyncHTTPTestCase):
                 b"",
             ]))
         data = json_decode(response)
-        self.assertEqual(u("\u00e9"), data["header"])
-        self.assertEqual(u("\u00e1"), data["argument"])
-        self.assertEqual(u("\u00f3"), data["filename"])
-        self.assertEqual(u("\u00fa"), data["filebody"])
+        self.assertEqual(u"\u00e9", data["header"])
+        self.assertEqual(u"\u00e1", data["argument"])
+        self.assertEqual(u"\u00f3", data["filename"])
+        self.assertEqual(u"\u00fa", data["filebody"])
 
     def test_newlines(self):
         # We support both CRLF and bare LF as line separators.
@@ -340,17 +340,17 @@ class HTTPServerTest(AsyncHTTPTestCase):
     def test_query_string_encoding(self):
         response = self.fetch("/echo?foo=%C3%A9")
         data = json_decode(response.body)
-        self.assertEqual(data, {u("foo"): [u("\u00e9")]})
+        self.assertEqual(data, {u"foo": [u"\u00e9"]})
 
     def test_empty_query_string(self):
         response = self.fetch("/echo?foo=&foo=")
         data = json_decode(response.body)
-        self.assertEqual(data, {u("foo"): [u(""), u("")]})
+        self.assertEqual(data, {u"foo": [u"", u""]})
 
     def test_empty_post_parameters(self):
         response = self.fetch("/echo", method="POST", body="foo=&bar=")
         data = json_decode(response.body)
-        self.assertEqual(data, {u("foo"): [u("")], u("bar"): [u("")]})
+        self.assertEqual(data, {u"foo": [u""], u"bar": [u""]})
 
     def test_types(self):
         headers = {"Cookie": "foo=bar"}
@@ -440,7 +440,7 @@ bar
 """.replace(b"\n", b"\r\n"))
         read_stream_body(self.stream, self.stop)
         headers, response = self.wait()
-        self.assertEqual(json_decode(response), {u('foo'): [u('bar')]})
+        self.assertEqual(json_decode(response), {u'foo': [u'bar']})
 
 
 class XHeaderTest(HandlerBaseTestCase):
@@ -775,7 +775,7 @@ class GzipBaseTest(object):
 
     def test_uncompressed(self):
         response = self.fetch('/', method='POST', body='foo=bar')
-        self.assertEquals(json_decode(response.body), {u('foo'): [u('bar')]})
+        self.assertEquals(json_decode(response.body), {u'foo': [u'bar']})
 
 
 class GzipTest(GzipBaseTest, AsyncHTTPTestCase):
@@ -784,7 +784,7 @@ class GzipTest(GzipBaseTest, AsyncHTTPTestCase):
 
     def test_gzip(self):
         response = self.post_gzip('foo=bar')
-        self.assertEquals(json_decode(response.body), {u('foo'): [u('bar')]})
+        self.assertEquals(json_decode(response.body), {u'foo': [u'bar']})
 
 
 class GzipUnsupportedTest(GzipBaseTest, AsyncHTTPTestCase):
index 3f25f3eedae45a8a7be418262622f2ac81bf67a2..7acceae749e5d61222c914ab4152b47075c7c8c3 100644 (file)
@@ -238,13 +238,13 @@ Foo: even
         # and cpython's unicodeobject.c (which defines the implementation
         # of unicode_type.splitlines(), and uses a different list than TR13).
         newlines = [
-            u('\u001b'),  # VERTICAL TAB
-            u('\u001c'),  # FILE SEPARATOR
-            u('\u001d'),  # GROUP SEPARATOR
-            u('\u001e'),  # RECORD SEPARATOR
-            u('\u0085'),  # NEXT LINE
-            u('\u2028'),  # LINE SEPARATOR
-            u('\u2029'),  # PARAGRAPH SEPARATOR
+            u'\u001b',  # VERTICAL TAB
+            u'\u001c',  # FILE SEPARATOR
+            u'\u001d',  # GROUP SEPARATOR
+            u'\u001e',  # RECORD SEPARATOR
+            u'\u0085',  # NEXT LINE
+            u'\u2028',  # LINE SEPARATOR
+            u'\u2029',  # PARAGRAPH SEPARATOR
         ]
         for newline in newlines:
             # Try the utf8 and latin1 representations of each newline
index e25783861eb824e5bb8a8e7648d8de83fbe32a24..dfa4e4b653c6e22d29f6f7b91f56c16ecb3bfcf8 100644 (file)
@@ -35,7 +35,7 @@ class TranslationLoaderTest(unittest.TestCase):
             os.path.join(os.path.dirname(__file__), 'csv_translations'))
         locale = tornado.locale.get("fr_FR")
         self.assertTrue(isinstance(locale, tornado.locale.CSVLocale))
-        self.assertEqual(locale.translate("school"), u("\u00e9cole"))
+        self.assertEqual(locale.translate("school"), u"\u00e9cole")
 
     # tempfile.mkdtemp is not available on app engine.
     @skipOnAppEngine
@@ -55,7 +55,7 @@ class TranslationLoaderTest(unittest.TestCase):
                 tornado.locale.load_translations(tmpdir)
                 locale = tornado.locale.get('fr_FR')
                 self.assertIsInstance(locale, tornado.locale.CSVLocale)
-                self.assertEqual(locale.translate("school"), u("\u00e9cole"))
+                self.assertEqual(locale.translate("school"), u"\u00e9cole")
             finally:
                 shutil.rmtree(tmpdir)
 
@@ -65,20 +65,20 @@ class TranslationLoaderTest(unittest.TestCase):
             "tornado_test")
         locale = tornado.locale.get("fr_FR")
         self.assertTrue(isinstance(locale, tornado.locale.GettextLocale))
-        self.assertEqual(locale.translate("school"), u("\u00e9cole"))
-        self.assertEqual(locale.pgettext("law", "right"), u("le droit"))
-        self.assertEqual(locale.pgettext("good", "right"), u("le bien"))
-        self.assertEqual(locale.pgettext("organization", "club", "clubs", 1), u("le club"))
-        self.assertEqual(locale.pgettext("organization", "club", "clubs", 2), u("les clubs"))
-        self.assertEqual(locale.pgettext("stick", "club", "clubs", 1), u("le b\xe2ton"))
-        self.assertEqual(locale.pgettext("stick", "club", "clubs", 2), u("les b\xe2tons"))
+        self.assertEqual(locale.translate("school"), u"\u00e9cole")
+        self.assertEqual(locale.pgettext("law", "right"), u"le droit")
+        self.assertEqual(locale.pgettext("good", "right"), u"le bien")
+        self.assertEqual(locale.pgettext("organization", "club", "clubs", 1), u"le club")
+        self.assertEqual(locale.pgettext("organization", "club", "clubs", 2), u"les clubs")
+        self.assertEqual(locale.pgettext("stick", "club", "clubs", 1), u"le b\xe2ton")
+        self.assertEqual(locale.pgettext("stick", "club", "clubs", 2), u"les b\xe2tons")
 
 
 class LocaleDataTest(unittest.TestCase):
     def test_non_ascii_name(self):
         name = tornado.locale.LOCALE_NAMES['es_LA']['name']
         self.assertTrue(isinstance(name, unicode_type))
-        self.assertEqual(name, u('Espa\u00f1ol'))
+        self.assertEqual(name, u'Espa\u00f1ol')
         self.assertEqual(utf8(name), b'Espa\xc3\xb1ol')
 
 
index df493bcdc4ee5e784e103d9aed2a47fca08d7cca..e45c4e730c475a646ae1f85363045d17f1c61427 100644 (file)
@@ -51,9 +51,9 @@ class LogFormatterTest(unittest.TestCase):
         # for testing.  (testing with color off fails to expose some potential
         # encoding issues from the control characters)
         self.formatter._colors = {
-            logging.ERROR: u("\u0001"),
+            logging.ERROR: u"\u0001",
         }
-        self.formatter._normal = u("\u0002")
+        self.formatter._normal = u"\u0002"
         # construct a Logger directly to bypass getLogger's caching
         self.logger = logging.Logger('LogFormatterTest')
         self.logger.propagate = False
@@ -100,12 +100,12 @@ class LogFormatterTest(unittest.TestCase):
         if issubclass(bytes, basestring_type):
             # on python 2, utf8 byte strings (and by extension ascii byte
             # strings) are passed through as-is.
-            self.assertEqual(self.get_output(), utf8(u("\u00e9")))
+            self.assertEqual(self.get_output(), utf8(u"\u00e9"))
         else:
             # on python 3, byte strings always get repr'd even if
             # they're ascii-only, so this degenerates into another
             # copy of test_bytes_logging.
-            self.assertEqual(self.get_output(), utf8(repr(utf8(u("\u00e9")))))
+            self.assertEqual(self.get_output(), utf8(repr(utf8(u"\u00e9"))))
 
     def test_bytes_exception_logging(self):
         try:
@@ -128,8 +128,8 @@ class UnicodeLogFormatterTest(LogFormatterTest):
         return logging.FileHandler(filename, encoding="utf8")
 
     def test_unicode_logging(self):
-        self.logger.error(u("\u00e9"))
-        self.assertEqual(self.get_output(), utf8(u("\u00e9")))
+        self.logger.error(u"\u00e9")
+        self.assertEqual(self.get_output(), utf8(u"\u00e9"))
 
 
 class EnablePrettyLoggingTest(unittest.TestCase):
index 59d183830544b8368e1cd873e774771653cb9517..7ee8bfbc6df2c180138d03899ebb8327df038f6a 100644 (file)
@@ -9,4 +9,4 @@ from tornado.util import u
 # this deadlock.
 
 resolver = ThreadedResolver()
-IOLoop.current().run_sync(lambda: resolver.resolve(u('localhost'), 80))
+IOLoop.current().run_sync(lambda: resolver.resolve(u'localhost', 80))
index 031b2f540a3997f17b909d12b114aaed1fa1bb31..c48618e03f6007b676f02006d78f0a189496d2d0 100644 (file)
@@ -71,8 +71,8 @@ class TemplateTest(unittest.TestCase):
                          b"expr {{jquery expr}}")
 
     def test_unicode_template(self):
-        template = Template(utf8(u("\u00e9")))
-        self.assertEqual(template.generate(), utf8(u("\u00e9")))
+        template = Template(utf8(u"\u00e9"))
+        self.assertEqual(template.generate(), utf8(u"\u00e9"))
 
     def test_unicode_literal_expression(self):
         # Unicode literals should be usable in templates.  Note that this
@@ -82,10 +82,10 @@ class TemplateTest(unittest.TestCase):
         if str is unicode_type:
             # python 3 needs a different version of this test since
             # 2to3 doesn't run on template internals
-            template = Template(utf8(u('{{ "\u00e9" }}')))
+            template = Template(utf8(u'{{ "\u00e9" }}'))
         else:
-            template = Template(utf8(u('{{ u"\u00e9" }}')))
-        self.assertEqual(template.generate(), utf8(u("\u00e9")))
+            template = Template(utf8(u'{{ u"\u00e9" }}'))
+        self.assertEqual(template.generate(), utf8(u"\u00e9"))
 
     def test_custom_namespace(self):
         loader = DictLoader({"test.html": "{{ inc(5) }}"}, namespace={"inc": lambda x: x + 1})
@@ -100,14 +100,14 @@ class TemplateTest(unittest.TestCase):
     def test_unicode_apply(self):
         def upper(s):
             return to_unicode(s).upper()
-        template = Template(utf8(u("{% apply upper %}foo \u00e9{% end %}")))
-        self.assertEqual(template.generate(upper=upper), utf8(u("FOO \u00c9")))
+        template = Template(utf8(u"{% apply upper %}foo \u00e9{% end %}"))
+        self.assertEqual(template.generate(upper=upper), utf8(u"FOO \u00c9"))
 
     def test_bytes_apply(self):
         def upper(s):
             return utf8(to_unicode(s).upper())
-        template = Template(utf8(u("{% apply upper %}foo \u00e9{% end %}")))
-        self.assertEqual(template.generate(upper=upper), utf8(u("FOO \u00c9")))
+        template = Template(utf8(u"{% apply upper %}foo \u00e9{% end %}"))
+        self.assertEqual(template.generate(upper=upper), utf8(u"FOO \u00c9"))
 
     def test_if(self):
         template = Template(utf8("{% if x > 4 %}yes{% else %}no{% end %}"))
@@ -174,8 +174,8 @@ try{% set y = 1/x %}
         self.assertEqual(template.generate(), '0')
 
     def test_non_ascii_name(self):
-        loader = DictLoader({u("t\u00e9st.html"): "hello"})
-        self.assertEqual(loader.load(u("t\u00e9st.html")).generate(), b"hello")
+        loader = DictLoader({u"t\u00e9st.html": "hello"})
+        self.assertEqual(loader.load(u"t\u00e9st.html").generate(), b"hello")
 
 
 class StackTraceTest(unittest.TestCase):
@@ -482,4 +482,4 @@ class TemplateLoaderTest(unittest.TestCase):
     def test_utf8_in_file(self):
         tmpl = self.loader.load("utf8.html")
         result = tmpl.generate()
-        self.assertEqual(to_unicode(result).strip(), u("H\u00e9llo"))
+        self.assertEqual(to_unicode(result).strip(), u"H\u00e9llo")
index 0936c89ad1792eda892e805a3bafdfbd8caa7914..353efbbce154cd7cd417ced0ade86d5622f90182 100644 (file)
@@ -133,7 +133,7 @@ class ConfigurableTest(unittest.TestCase):
 
 class UnicodeLiteralTest(unittest.TestCase):
     def test_unicode_escapes(self):
-        self.assertEqual(utf8(u('\u00e9')), b'\xc3\xa9')
+        self.assertEqual(utf8(u'\u00e9'), b'\xc3\xa9')
 
 
 class ExecInTest(unittest.TestCase):
@@ -189,7 +189,7 @@ class ImportObjectTest(unittest.TestCase):
         self.assertIs(import_object('tornado.escape.utf8'), utf8)
 
     def test_import_member_unicode(self):
-        self.assertIs(import_object(u('tornado.escape.utf8')), utf8)
+        self.assertIs(import_object(u'tornado.escape.utf8'), utf8)
 
     def test_import_module(self):
         self.assertIs(import_object('tornado.escape'), tornado.escape)
@@ -198,4 +198,4 @@ class ImportObjectTest(unittest.TestCase):
         # The internal implementation of __import__ differs depending on
         # whether the thing being imported is a module or not.
         # This variant requires a byte string in python 2.
-        self.assertIs(import_object(u('tornado.escape')), tornado.escape)
+        self.assertIs(import_object(u'tornado.escape'), tornado.escape)
index 36312f975a1d747b6f7eca18579696abf31793ac..c9e2c3a8724699fbaf4822f1e2aa51427fda3f19 100644 (file)
@@ -188,7 +188,7 @@ class CookieTest(WebTestCase):
                 # Try setting cookies with different argument types
                 # to ensure that everything gets encoded correctly
                 self.set_cookie("str", "asdf")
-                self.set_cookie("unicode", u("qwer"))
+                self.set_cookie("unicode", u"qwer")
                 self.set_cookie("bytes", b"zxcv")
 
         class GetCookieHandler(RequestHandler):
@@ -199,8 +199,8 @@ class CookieTest(WebTestCase):
             def get(self):
                 # unicode domain and path arguments shouldn't break things
                 # either (see bug #285)
-                self.set_cookie("unicode_args", "blah", domain=u("foo.com"),
-                                path=u("/foo"))
+                self.set_cookie("unicode_args", "blah", domain=u"foo.com",
+                                path=u"/foo")
 
         class SetCookieSpecialCharHandler(RequestHandler):
             def get(self):
@@ -434,9 +434,9 @@ class RequestEncodingTest(WebTestCase):
     def test_group_encoding(self):
         # Path components and query arguments should be decoded the same way
         self.assertEqual(self.fetch_json('/group/%C3%A9?arg=%C3%A9'),
-                         {u("path"): u("/group/%C3%A9"),
-                          u("path_args"): [u("\u00e9")],
-                          u("args"): {u("arg"): [u("\u00e9")]}})
+                         {u"path": u"/group/%C3%A9",
+                          u"path_args": [u"\u00e9"],
+                          u"args": {u"arg": [u"\u00e9"]}})
 
     def test_slashes(self):
         # Slashes may be escaped to appear as a single "directory" in the path,
@@ -690,15 +690,15 @@ class WSGISafeWebTest(WebTestCase):
             response = self.fetch(req_url)
             response.rethrow()
             data = json_decode(response.body)
-            self.assertEqual(data, {u('path'): [u('unicode'), u('\u00e9')],
-                                    u('query'): [u('unicode'), u('\u00e9')],
+            self.assertEqual(data, {u'path': [u'unicode', u'\u00e9'],
+                                    u'query': [u'unicode', u'\u00e9'],
                                     })
 
         response = self.fetch("/decode_arg/%C3%A9?foo=%C3%A9")
         response.rethrow()
         data = json_decode(response.body)
-        self.assertEqual(data, {u('path'): [u('bytes'), u('c3a9')],
-                                u('query'): [u('bytes'), u('c3a9')],
+        self.assertEqual(data, {u'path': [u'bytes', u'c3a9'],
+                                u'query': [u'bytes', u'c3a9'],
                                 })
 
     def test_decode_argument_invalid_unicode(self):
@@ -717,8 +717,8 @@ class WSGISafeWebTest(WebTestCase):
             response = self.fetch(req_url)
             response.rethrow()
             data = json_decode(response.body)
-            self.assertEqual(data, {u('path'): [u('unicode'), u('1 + 1')],
-                                    u('query'): [u('unicode'), u('1 + 1')],
+            self.assertEqual(data, {u'path': [u'unicode', u'1 + 1'],
+                                    u'query': [u'unicode', u'1 + 1'],
                                     })
 
     def test_reverse_url(self):
@@ -728,7 +728,7 @@ class WSGISafeWebTest(WebTestCase):
                          '/decode_arg/42')
         self.assertEqual(self.app.reverse_url('decode_arg', b'\xe9'),
                          '/decode_arg/%E9')
-        self.assertEqual(self.app.reverse_url('decode_arg', u('\u00e9')),
+        self.assertEqual(self.app.reverse_url('decode_arg', u'\u00e9'),
                          '/decode_arg/%C3%A9')
         self.assertEqual(self.app.reverse_url('decode_arg', '1 + 1'),
                          '/decode_arg/1%20%2B%201')
@@ -765,9 +765,9 @@ js_embed()
 
     def test_optional_path(self):
         self.assertEqual(self.fetch_json("/optional_path/foo"),
-                         {u("path"): u("foo")})
+                         {u"path": u"foo"})
         self.assertEqual(self.fetch_json("/optional_path/"),
-                         {u("path"): None})
+                         {u"path": None})
 
     def test_multi_header(self):
         response = self.fetch("/multi_header")
@@ -1370,7 +1370,7 @@ class NamedURLSpecGroupsTest(WebTestCase):
                 self.write(path)
 
         return [("/str/(?P<path>.*)", EchoHandler),
-                (u("/unicode/(?P<path>.*)"), EchoHandler)]
+                (u"/unicode/(?P<path>.*)", EchoHandler)]
 
     def test_named_urlspec_groups(self):
         response = self.fetch("/str/foo")
index 7b47214df71c03ef168c0e2f132ecac37b908989..86f53e620975946e85b4168abef5b1a9ac099c59 100644 (file)
@@ -159,9 +159,9 @@ class WebSocketTest(WebSocketBaseTestCase):
     @gen_test
     def test_unicode_message(self):
         ws = yield self.ws_connect('/echo')
-        ws.write_message(u('hello \u00e9'))
+        ws.write_message(u'hello \u00e9')
         response = yield ws.read_message()
-        self.assertEqual(response, u('hello \u00e9'))
+        self.assertEqual(response, u'hello \u00e9')
         yield self.close(ws)
 
     @gen_test