-#!/usr/bin/env python
+#!/usr/bin/env python3
import time
from datetime import timedelta
-try:
- from HTMLParser import HTMLParser
- from urlparse import urljoin, urldefrag
-except ImportError:
- from html.parser import HTMLParser
- from urllib.parse import urljoin, urldefrag
+from html.parser import HTMLParser
+from urllib.parse import urljoin, urldefrag
-from tornado import httpclient, gen, ioloop, queues
+from tornado import gen, httpclient, ioloop, queues
base_url = 'http://www.tornadoweb.org/en/stable/'
concurrency = 10
-@gen.coroutine
-def get_links_from_url(url):
+async def get_links_from_url(url):
"""Download the page at `url` and parse it for links.
Returned links have had the fragment after `#` removed, and have been made
absolute so, e.g. the URL 'gen.html#tornado.gen.coroutine' becomes
'http://www.tornadoweb.org/en/stable/gen.html'.
"""
- try:
- response = yield httpclient.AsyncHTTPClient().fetch(url)
- print('fetched %s' % url)
+ response = await httpclient.AsyncHTTPClient().fetch(url)
+ print('fetched %s' % url)
- html = response.body if isinstance(response.body, str) \
- else response.body.decode(errors='ignore')
- urls = [urljoin(url, remove_fragment(new_url))
- for new_url in get_links(html)]
- except Exception as e:
- print('Exception: %s %s' % (e, url))
- raise gen.Return([])
-
- raise gen.Return(urls)
+ html = response.body.decode(errors='ignore')
+ return [urljoin(url, remove_fragment(new_url))
+ for new_url in get_links(html)]
def remove_fragment(url):
return url_seeker.urls
-@gen.coroutine
-def main():
+async def main():
q = queues.Queue()
start = time.time()
fetching, fetched = set(), set()
- @gen.coroutine
- def fetch_url():
- current_url = yield q.get()
- try:
- if current_url in fetching:
- return
-
- print('fetching %s' % current_url)
- fetching.add(current_url)
- urls = yield get_links_from_url(current_url)
- fetched.add(current_url)
+ async def fetch_url(current_url):
+ if current_url in fetching:
+ return
- for new_url in urls:
- # Only follow links beneath the base URL
- if new_url.startswith(base_url):
- yield q.put(new_url)
+ print('fetching %s' % current_url)
+ fetching.add(current_url)
+ urls = await get_links_from_url(current_url)
+ fetched.add(current_url)
- finally:
- q.task_done()
+ for new_url in urls:
+ # Only follow links beneath the base URL
+ if new_url.startswith(base_url):
+ await q.put(new_url)
- @gen.coroutine
- def worker():
- while True:
- yield fetch_url()
+ async def worker():
+ async for url in q:
+ if url is None:
+ return
+ try:
+ await fetch_url(url)
+ except Exception as e:
+ print('Exception: %s %s' % (e, url))
+ finally:
+ q.task_done()
- q.put(base_url)
+ await q.put(base_url)
# Start workers, then wait for the work queue to be empty.
- for _ in range(concurrency):
- worker()
- yield q.join(timeout=timedelta(seconds=300))
+ workers = gen.multi([worker() for _ in range(concurrency)])
+ await q.join(timeout=timedelta(seconds=300))
assert fetching == fetched
print('Done in %d seconds, fetched %s URLs.' % (
time.time() - start, len(fetched)))
+ # Signal all the workers to exit.
+ for _ in range(concurrency):
+ await q.put(None)
+ await workers
+
if __name__ == '__main__':
io_loop = ioloop.IOLoop.current()