import time
import asyncio
import aiohttp
from bs4 import BeautifulSoup as bs
BASE_URL = "http://www.biqudu.com"
TITLE2URL = dict()
CONTENT = list()
async def fetch(url, callback=None, **kwarags):
headers = {"User-Agent":"Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}
sem = asyncio.Semaphore(5)
with (await sem):
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers) as res:
page = await res.text()
if callback:
callback(page, **kwarags)
else:
return page
def parse_url(page):
soup = bs(page, "lxml")
dd_a_doc = soup.select("dd > a")
for a_doc in dd_a_doc:
article_page_url = a_doc["href"]
article_title = a_doc.get_text()
if article_page_url:
TITLE2URL[article_title] = article_page_url
def parse_body(page, **kwarags):
title = kwarags.get("title", "")
print("{}".format(title))
soup = bs(page, "lxml")
content_doc = soup.find("div", id="content")
content_text = content_doc.get_text().replace("readx();", "").replace(" ", "\r\n")
content = "%s\n%s\n\n" % (title, content_text)
CONTENT.append(content)
def main():
t0 = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(fetch(BASE_URL+"/43_43074/", callback=parse_url))
tasks = [fetch(BASE_URL + page_url, callback=parse_body, title=title) for title, page_url in TITLE2URL.items()]
loop.run_until_complete(asyncio.gather(*tasks[:500]))
loop.close()
elapsed = time.time() - t0
print("cost {}".format(elapsed))
if name = "_ _ main__":
main()