(更新一下软件,手贱的没有保存原来的文件...)
学习BS之后,发现确实比正则有点强...但是在一些时候,他们两个同时运用效果更好。在爬取小说时候就体现出来了,对于BS来说直接调用文字标签就好,别的不说直接上代码。
#-*-coding:utf8-*- #!/usr/bin/python from bs4 import BeautifulSoup import urllib import urllib2 import re def get_menu(url): user_agent = "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0" headers = {'User-Agent':user_agent} req = urllib2.Request(url,headers = headers) page = urllib2.urlopen(req).read() soup = BeautifulSoup(page) novel = soup.find_all('title')[0].text.split('_')[0] # 提取小说名 menu = [] all_text = soup.find_all('a',target="_blank") # 提取记载有小说章节名和链接地址的模块 regex=re.compile(ur'\u7b2c.+\u7ae0') # 中文正则匹配第..章,去除不必要的链接 for title in all_text: if re.findall(regex,title.text): name = title.text x = [name,title['href']] menu.append(x) # 把记载有小说章节名和链接地址的列表插入列表中 return menu,novel def get_chapter(name,url): html=urllib2.urlopen(url).read() soup=BeautifulSoup(html) div=soup.find('div',class_='read-content') content=div.find_all('p') # 提取小说正文 return content if __name__=="__main__": user_agent = "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0" headers = {'User-Agent':user_agent} req = urllib2.Request("http://www.zhulang.com/",headers = headers) page = urllib2.urlopen(req).read() pattern = re.compile('<h3><a href="(.*?)" title=',re.S) result = re.findall(pattern,page) print result for k in result: menu,title=get_menu(k) # print title,str(len(menu))+'\n Enter \n' # 输出获取到的小说名和章节数 for i in menu: chapter=get_chapter(i[0],i[1]) num=len(chapter) print k # def getTitle(self,page): # # pattern = re.compile('<td><div calss="textl"><a href=.*?>(.*?)</a></div></td>',re.S) # result = re.search(pattern,page) # return result.group(1).strip() for i in range(0,num-1): print(chapter[i].get_text()) # f = open('fileName.txt','a') # return result.group(1).strip() f = open('novel2.txt','a') f.write(chapter[i].get_text().encode('UTF-8')) f.close()