import requests
import re
from bs4 import BeautifulSoup
import os
url = "https://www.biqugee.com/book/56078/"
resp = requests.get(url)
# print(resp.text)
soup = BeautifulSoup(resp.text, 'html.parser').find('div', {'id': 'info'})
# print(soup)
so = soup.find('h1')
title = so.text
filename = 'e://{}'.format(title)
# print(soup)
isExists = os.path.exists(filename)
if not isExists:
os.mkdir(filename)
print('目录创建成功!')
else:
print('目录已经存在!')
# so = soup.find_all("p")
# for i in so:
# print(i.text)
soup = BeautifulSoup(resp.text, 'html.parser').find("div", {'id': 'list'})
a_list = soup.find_all('a')
for a in a_list:
href = a.get('href')
href = "https://www.biqugee.com" + href
# print(href)
resp = requests.get(href)
# print(resp.text)
soup = BeautifulSoup(resp.text, 'html.parser').find('div', {'class', 'bookname'})
bookname = soup.find('h1')
# print(bookname.string)
bookname = bookname.string
bookname02 = 'e://{}//{}.txt' .format(title, bookname)
# print(bookname)
soup = BeautifulSoup(resp.text, 'html.parser').find('div', {'id': 'content'})
file = soup.text.replace('\xa0' * 8, '\n\n')
file = re.sub(r'\n\s*\n', '\n', file)
# print(file)
with open(bookname02, 'a', encoding='utf-8') as f:
f.write(file)
f.close()
print(bookname+" "+'下载成功!')