self.write("".join(chr(i % 256) * 1024 for i in range(512)))
class FinishOnCloseHandler(RequestHandler):
+ def initialize(self, cleanup_event):
+ self.cleanup_event = cleanup_event
+
@gen.coroutine
def get(self):
self.flush()
- never_finish = Event()
- yield never_finish.wait()
+ yield self.cleanup_event.wait()
def on_connection_close(self):
# This is not very realistic, but finishing the request
# some errors seen in the wild.
self.finish("closed")
+ self.cleanup_event = Event()
return Application(
[
("/", HelloHandler),
("/large", LargeHandler),
- ("/finish_on_close", FinishOnCloseHandler),
+ (
+ "/finish_on_close",
+ FinishOnCloseHandler,
+ dict(cleanup_event=self.cleanup_event),
+ ),
]
)
self.stream.write(b"GET /finish_on_close HTTP/1.1\r\n\r\n")
yield self.read_headers()
self.close()
+ # Let the hanging coroutine clean up after itself
+ self.cleanup_event.set()
@gen_test
def test_keepalive_chunked(self):
@gen.coroutine
def get(self):
logging.debug("queuing trigger")
- self.queue.append(self.finish)
+ event = Event()
+ self.queue.append(event.set)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
- never_finish = Event()
- yield never_finish.wait()
-
-
-class HangHandler(RequestHandler):
- @gen.coroutine
- def get(self):
- never_finish = Event()
- yield never_finish.wait()
+ yield event.wait()
class ContentLengthHandler(RequestHandler):
),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
- url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
self.fetch("/trigger?wake=false", request_timeout=timeout, raise_error=True)
# trigger the hanging request to let it clean up after itself
self.triggers.popleft()()
+ self.io_loop.run_sync(lambda: gen.sleep(0))
@skipIfNoIPv6
def test_ipv6(self):
@gen.coroutine
def get(self):
self.test.on_handler_waiting()
- never_finish = Event()
- yield never_finish.wait()
+ yield self.test.cleanup_event.wait()
def on_connection_close(self):
self.test.on_connection_close()
class ConnectionCloseTest(WebTestCase):
def get_handlers(self):
+ self.cleanup_event = Event()
return [("/", ConnectionCloseHandler, dict(test=self))]
def test_connection_close(self):
self.stream = IOStream(s)
self.stream.write(b"GET / HTTP/1.0\r\n\r\n")
self.wait()
+ # Let the hanging coroutine clean up after itself
+ self.cleanup_event.set()
+ self.io_loop.run_sync(lambda: gen.sleep(0))
def on_handler_waiting(self):
logging.debug("handler waiting")
break
return match
- @gen.coroutine
- def _execute(
+ async def _execute(
self, transforms: List["OutputTransform"], *args: bytes, **kwargs: bytes
- ) -> Generator[Any, Any, None]:
+ ) -> None:
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
result = self.prepare()
if result is not None:
- result = yield result
+ result = await result
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
# result; the data has been passed to self.data_received
# instead.
try:
- yield self.request._body_future
+ await self.request._body_future
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if result is not None:
- result = yield result
+ result = await result
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
# except handler, and we cannot easily access the IOLoop here to
# call add_future (because of the requirement to remain compatible
# with WSGI)
- self.handler._execute(transforms, *self.path_args, **self.path_kwargs)
+ fut = gen.convert_yielded(
+ self.handler._execute(transforms, *self.path_args, **self.path_kwargs)
+ )
+ fut.add_done_callback(lambda f: f.result())
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)