self._io_loop = IOLoop()
self._async_client = AsyncHTTPClient(self._io_loop)
self._response = None
+ self._closed = False
def __del__(self):
- self._async_client.close()
+ self.close()
+
+ def close(self):
+ """Closes the HTTPClient, freeing any resources used."""
+ if not self._closed:
+ self._async_client.close()
+ self._io_loop.close()
+ self._closed = True
def fetch(self, request, **kwargs):
"""Executes a request, returning an `HTTPResponse`.
create and destroy http clients. No other methods may be called
on the AsyncHTTPClient after close().
"""
- if self._async_clients[self.io_loop] is self:
+ if self._async_clients.get(self.io_loop) is self:
del self._async_clients[self.io_loop]
def fetch(self, request, callback, **kwargs):
print response.headers
if options.print_body:
print response.body
+ client.close()
if __name__ == "__main__":
main()
"""Returns true if the singleton instance has been created."""
return hasattr(cls, "_instance")
+ def close(self, all_fds=False):
+ """Closes the IOLoop, freeing any resources used.
+
+ If ``all_fds`` is true, all file descriptors registered on the
+ IOLoop will be closed (not just the ones created by the IOLoop itself.
+ """
+ if all_fds:
+ for fd in self._handlers.keys()[:]:
+ if fd in (self._waker_reader.fileno(),
+ self._waker_writer.fileno()):
+ # Close these through the file objects that wrap them,
+ # or else the destructor will try to close them later
+ # and log a warning
+ continue
+ try:
+ os.close(fd)
+ except Exception:
+ logging.debug("error closing fd %d", fd, exc_info=True)
+ self._waker_reader.close()
+ self._waker_writer.close()
+ self._impl.close()
+
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for fd."""
self._handlers[fd] = stack_context.wrap(handler)
def fileno(self):
return self._epoll_fd
+ def close(self):
+ os.close(self._epoll_fd)
+
def register(self, fd, events):
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_ADD, fd, events)
def fileno(self):
return self._kqueue.fileno()
+ def close(self):
+ self._kqueue.close()
+
def register(self, fd, events):
self._control(fd, events, select.KQ_EV_ADD)
self._active[fd] = events
self.error_fds = set()
self.fd_sets = (self.read_fds, self.write_fds, self.error_fds)
+ def close(self):
+ pass
+
def register(self, fd, events):
if events & IOLoop.READ: self.read_fds.add(fd)
if events & IOLoop.WRITE: self.write_fds.add(fd)
# This avoids leaks, especially when tests are run repeatedly
# in the same process with autoreload (because curl does not
# set FD_CLOEXEC on its file descriptors)
- for fd in self.io_loop._handlers.keys()[:]:
- if (fd == self.io_loop._waker_reader.fileno() or
- fd == self.io_loop._waker_writer.fileno()):
- # Close these through the file objects that wrap
- # them, or else the destructor will try to close
- # them later and log a warning
- continue
- try:
- os.close(fd)
- except:
- logging.debug("error closing fd %d", fd, exc_info=True)
- self.io_loop._waker_reader.close()
- self.io_loop._waker_writer.close()
+ self.io_loop.close(all_fds=True)
super(AsyncTestCase, self).tearDown()
def get_new_ioloop(self):