学了点python
写了个简单的爬虫,没做异常处理和封装成类(只是很简单的。。。),暂时凑合着用吧,请安装python3.x版本。设置好环境变量。然后将代码保存为crawler.py,并在相同的目录下新建一个文件夹data,
在终端输入python crawler.py即可爬了。。。。数据在data文件夹内,按照文章的日期命名
结果里面有中文有英文,有标题,时间,来源,可以用来构建平行语料库。
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:xizer00
# date:2013/04/11
# crawling the hxen.com
import urllib.request as request
import re
import io
#初始url
init_url = r'http://hxen.com/interpretation/bilingualnews/'
#总文章数目正则表达式
numofarticle_regex = b'<b>(\d+?)</b> '
#总页面数目正则表达式
numofpage_regex = b'<b>\d+?/(\d+?)</b>'
#当前索引页面的内容内容页面URL正则表达式
contenturls_regex = b'<td height="25"> <font face="Wingdings">v</font> <a href="([a-zA-Z0-9-\s\.\/]+)" target=_blank>'
#内容页面标题正则表达式
title_regex = b'<b><h1>([\s\S]*?)</h1></b>'
#title_regex = b'<b><h1>([\s\S]*)</h1></b>'
#内容页面的内容正则表达式
content1_regex = b'<p>([\s\S]*?)</p>'
#content1_regex = b'<p>([\s\S]*)<p>'
content2_regex = b'<p>([\s\w(),.`~!@#$%^&*/\\;:{}\[\]]*?)<script'
#content2_regex = b'<p>([\s\S]*)<script'
#文章来源
source_regex = b"Source: <a href='[\w:\/.\\_\s]*' target=_blank>([\s\S]*?)</a>"
#发表日期
date_regex = b'([\d-]+) '
#残留符号
junk1_regex = b'&[\w]*?;'
junk2_regex = b'<br[\s]*/>'
junk3_regex = b'<a href=http://www.hxen.com/englishlistening/voaenglish/ target=_blank class=infotextkey>VOA</a>'
def crawler(url):
#获取信息
getInfo(url)
#获取索引页面url
pageurls = getAllPageUrls(url)
#遍历每一个索引页面,获取得到索引页面中内容页面的url并进行采集
#contents = []
for pageurl in pageurls:
contenturls = getContentUrls(pageurl)
for contenturl in contenturls:
if contenturl=='http://hxen.com/interpretation/bilingualnews/20111112/160028.html':
break
else:
content = getContentData(contenturl)
article2file(content)
#print(content)#估计有问题
print('抓取结束!')
def getPage(url):
try:
page = request.urlopen(url)
code = page.getcode()
if code < 200 or code > 300:
print('自定义错误')
except Exception as e:
if isinstance(e, request.HTTPError):
print('http error: {0}'.format(e.code))
elif isinstance(e, request.URLError) and isinstance(e.reason, socket.timeout):
print ('url error: socket timeout {0}'.format(e.__str__()))
else:
print ('misc error: ' + e.__str__())
return page
#获取信息
def getInfo(url):
page = getPage(url)
rawdata = page.read()
numofarticles = re.findall(numofarticle_regex,rawdata)[0]
numofpages = re.findall(numofpage_regex,rawdata)[0]
print('文章总数为:%d'%int(numofarticles))
#print('第一页页面数目为:%d'%int(numofpages))
return
#获取所有的索引页面
def getAllPageUrls(url):
page = getPage(url)
rawdata = page.read()
numofpages = re.findall(numofpage_regex,rawdata)[0]
inumofpages = int(numofpages)
pageurls =['http://hxen.com/interpretation/bilingualnews/index.html']
for x in range(2,inumofpages+1):
pageurls.append(r'http://hxen.com/interpretation/bilingualnews/index_%d.html'%x)
print('索引页面URL:')
for x in pageurls:
print(x)
return pageurls
#获取索引页面中内容页面的url
def getContentUrls(url):
page = getPage(url)
rawdata = page.read()
rawcontenturls = re.findall(contenturls_regex,rawdata)
contenturls = []
for url in rawcontenturls:
contenturls.append(r'http://hxen.com%s'%url.decode('gbk'))
print('获取的内容URL为:')
for url in contenturls:
print(url)
return contenturls
#采集页面数据
def getContentData(url):
print("\n正在抓取页面:%s\n"%url)
page = getPage(url)
rawdata = page.read()
#获取文章标题
title = re.findall(title_regex,rawdata)
print('标题:%s'%title[0].decode('gbk'))
#获取来源
source = re.findall(source_regex,rawdata)
print('来源:%s'%source[0].decode('gbk'))
#获取发表日期
date = re.findall(date_regex,rawdata)
print('发表日期:%s'%date[0].decode('gbk'))
#获取英文文章
encontent1 = re.findall(content1_regex,rawdata)
#获取英文文章结尾
encontent2 = re.findall(content2_regex,rawdata)
#合并
if not encontent2 == []:
encontent1.append(encontent2[0])
print('英文')
for sentence in encontent1:
print("%s"%sentence.decode('gbk'))
#for sentence in encontent2:
# print("%s"%sentence.decode('gbk'))
#获取中文文章
url = url.replace('.html','_2.html')
page = getPage(url)
rawdata = page.read()
chcontent1 = re.findall(content1_regex,rawdata)
#获取中文文章结尾,貌似不需要
#chcontent2 = re.findall(content2_regex,rawdata)
print('中文')
for sentence in chcontent1:
print("%s"%sentence.decode('gbk'))
return {'title':title,'source':source,'date':date,'english':encontent1,'chinese':chcontent1}
#写入文件
def article2file(content):
if content['title']!=[] and content['source']!=[] and content['date']!=[] and content['english']!=[] and content['chinese']!=[]:
fp = open('./data/%s.txt'%content['date'][0].decode('gbk'),'wb+')
fp.write(b'<title>'+content['title'][0]+b'</title>\r\n')
fp.write(b'<source>'+content['source'][0]+b'</source>\r\n')
fp.write(b'<date>'+content['date'][0]+b'</date>\r\n')
fp.write(b'<english>\r\n')
for e in content['english']:
e = re.sub(junk1_regex,b'',e)
e = re.sub(junk2_regex,b'',e)
e = re.sub(junk3_regex,b'',e)
fp.write(e+b'\r\n')
fp.write(b'</english>\r\n')
fp.write(b'<chinese>\r\n')
for c in content['chinese']:
c = re.sub(junk1_regex,b'',c)
c = re.sub(junk2_regex,b'',c)
c = re.sub(junk3_regex,b'',c)
fp.write(c+b'\r\n')
fp.write(b'</chinese>\r\n')
fp.close()
return
if __name__ == "__main__":
print ("爬虫开始爬啦....\n")
crawler(init_url)
下面是运行结果图: