# -*- coding: utf-8 -*-
import urllib2
import random
url="http://blog.youkuaiyun.com/u013256816"
# my_headers={"Host":"blog.youkuaiyun.com",
# "Referer":"http://blog.youkuaiyun.com/",
# "User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.154 Safari/537.36 LBBROWSER",
# "GET":url
# }
# req=urllib2.Request(url,headers=my_headers)
# html=urllib2.urlopen(req)
# print html.read()
#
# print req.header_items()
"""每次用不一样的头部信息
代理IP,假的用户头部信息
"""
myHeader=["Mozilla/4.0 (compatible; MSIE 5.0; Windows NT)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3",
"Opera/8.0 (Macintosh; PPC Mac OS X; U; en)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6"
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; 360SE)"
" Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; Maxthon/3.0)"
]
def getContent(url,headers):
"""
@获取403精致访问的网页
:param url:
:param headers:
:return:
"""
random_header=random.choice(headers)
print random_header
req=urllib2.Request(url)
req.add_header("User-Agent",random_header)
req.add_header("Host","blog.youkuaiyun.com")
req.add_header("Referer","http://blog.youkuaiyun.com/")
req.add_header("GET",url)
content=urllib2.urlopen(req).read()
return content
print getContent(url,myHeader)
爬虫 禁止访问时伪造字符头
最新推荐文章于 2025-03-25 13:29:29 发布