这个是这个网站http://www.jikexueyuan.com/course/777.html的第一个实战项目,由于网站的提供的代码过于久远,加之python的版本变迁,网页源代码的改变,导致代码bug许多,为了加深我对代码的理解,我就简单的修正了一下,顺便把知识理解的更加深入。
参考资料:
http://blog.youkuaiyun.com/sinchb/article/details/8351685
#-*_coding:utf8-*-
import requests
import re
class spider(object):
def __init__(self):
print (u'开始爬取内容。。。')
#getsource用来获取网页源代码
def getsource(self,url):
html = requests.get(url)
return html.text
#changepage用来生产不同页数的链接
def changepage(self,url,total_page):
now_page = int(re.search('pageNum=(\d+)',url,re.S).group(1))
page_group = []
for i in range(now_page,total_page+1):
link = re.sub('pageNum=\d+','pageNum=%s'%i,url,re.S)
page_group.append(link)
return page_group
#geteveryclass用来抓取每个课程块的信息
def geteveryclass(self,source):
everyclass = re.findall('(<li id=.*?>.*?</li>)',source,re.S)
return everyclass
#getinfo用来从每个课程块中提取出我们需要的信息
def getinfo(self,eachclass):
info = {}
info['title'] = re.search('<h2 class=".*?"><a href=".*?" target="_blank" jktag=".*?">(.*?)</a>',eachclass,re.S).group(1)
info['content'] = re.search('<p style=.*?>(.*?)</p>',eachclass,re.S).group(1)
tmp = re.findall('<em>(.*?)</em>',eachclass,re.S)
info['classtime'] = tmp[0]
info['classlevel'] =tmp[1] #此处大改动,貌似不需要group了
info['learnnum'] = re.search('<em class="learn-number">(.*?)</em>',eachclass,re.S).group(1)
return info
#saveinfo用来保存结果到info.txt文件中
def saveinfo(self,classinfo):
f = open('info.txt','a',encoding='utf-8')
for each in classinfo:
each['title'] = each['title'].strip('\n\t')
each['content'] = each['content'].strip('\n\t') #用strip去掉制表符和换行符
each['classtime'] = each['classtime'].strip('\n\t')
f.writelines('title:' + each['title'] + '\n')
f.writelines('content:' + each['content'] + '\n')
f.writelines('classtime:' + each['classtime'] + '\n')
f.writelines('classlevel:' + each['classlevel'] + '\n')
f.writelines('learnnum:' + each['learnnum'] +'\n\n')
f.close()
if __name__ == '__main__':
classinfo = []
url = 'http://www.jikexueyuan.com/course/?pageNum=1'
jikespider = spider()
all_links = jikespider.changepage(url,20)
for link in all_links:
print (u'正在处理页面:' + link)
html = jikespider.getsource(link)
everyclass = jikespider.geteveryclass(html)
for each in everyclass:
info = jikespider.getinfo(each)
classinfo.append(info)
jikespider.saveinfo(classinfo)