本文实例讲述了Python 实现的微信爬虫。分享给大家供大家参考,具体如下:
单线程版:
import urllib.request
import urllib.parse
import urllib.error
import re,time
headers = ("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3107.4 Safari/537.36")
operner = urllib.request.build_opener()
operner.addheaders = [headers]
urllib.request.install_opener(operner)
list_url = []
###使用代理获取网页url内容
def use_proxy(url):
try:
# proxy = urllib.request.ProxyHandler({'http':proxy_addr}) ##使用代理版
# operner = urllib.request.build_opener()
# urllib.request.install_opener(operner)
headers = ("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3107.4 Safari/537.36")
operner = urllib.request.build_opener()
operner.addheaders = [headers]
urllib.request.install_opener(operner)
data = urllib.request.urlopen(url).read().decode('utf-8')
# print (data)
return data
except urllib.error.URLError as e:
if hasattr(e, "code"):
print(e.code)
elif hasattr(e, "reason"):
print(e.reason)
except Exception as e:
print("exception" + str(e))
time.sleep(1)
##获取要爬取的url
def get_url(key, pagestart, pageend):
try:
keycode = urllib.parse.quote(key)
for page in range(pagestart, pageend + 1):
url = "http://weixin.sogou.com/weixin?query=%s&_sug_type_=&s_from=input&_sug_=n&type=%d&page=1&ie=utf8" % (
keycode, page)
data1 = use_proxy(url)
#print("data1的内容是", data1)
listurl_pattern = '<h3>.*?("http://.*?)</h3>'
result = re.compile(l