以百度贴吧校花吧为例爬取每个帖子中的图片
思路
1.获取铁保主页URL,下一页:找URL规律
2.获取1页中所有帖子的URL
[帖子1链接,帖子2链接,…]
3.对每个帖子URL发请求,获取所有图片的URL
[图片1链接,图片2链接,…]
4.对每个图片URL发请求,以wb方式写入到本地文件
import requests
from lxml import etree
class BaiduTiebaSpider(object):
def __init__(self):
self.baseurl = 'http://tieba.baidu.com/f?'
self.headers = {"User-Agent":"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)"}
# 获取帖子链接
def getPageUrl(self,params):
res = requests.get(self.baseurl,params=params,headers=self.headers)
res.encoding = 'utf-8'
html = res.text
parseHtml = etree.HTML(html)
# 调用xpath得到列表
tList = parseHtml.xpath('//div[@class="t_con cleafix"]/div/div/div/a/@href')
for t in tList:
# 拼接帖子链接
tLink = 'http://tieba.baidu.com' + t
self.getImgUrl(tLink)
# 获取图片链接
def getImgUrl(self,tLink):
# 对帖子链接发请求,获取html
res = requests.get(tLink,headers=self.headers)
res.encoding = 'utf-8'
html = res.text
#创建解析对象
parseHtml = etree.HTML(html)
# 调用xpath获取图片链接列表
imgList = parseHtml.xpath('//div[@class="d_post_content j_d_post_content clearfix"]/img[@class="BDE_Image"]/@src')
for imgLink in imgList:
self.writeImg(imgLink)
# 保存图片到本地
def writeImg(self,imgLink):
res = requests.get(imgLink,headers=self.headers)
res.encoding = 'utf-8'
html = res.content
# 保存图片到本地
filename = imgLink[-13:]
with open(filename,'wb') as f:
f.write(html)
print('%s下载成功' % filename)
# 主函数
def workOn(self):
name = input('请输入要爬取的贴吧名称:')
begin = int(input('请输入要爬取的起始页:'))
end = int(input('请输入要爬取的终止页:'))
for i in range(begin,end+1):
pn = (i -1 )*50
# 定义查询参数
params = {
'kw':name,
'pn':str(pn)
}
self.getPageUrl(params)
if __name__ == '__main__':
spider = BaiduTiebaSpider()
spider.workOn()