import urllib.request
import ssl
import os
import re
from collections import deque
def writeFileByte(htmlBytes,toPath):
with open(toPath,"wb") as f:
f.write(htmlBytes)
def writeFileStr(htmlBytes,toPath):
with open(toPath,"w") as f:
f.write(str(htmlBytes))
def getHtmlBytes(url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
# 请求头
req = urllib.request.Request(url, headers=headers)
context=ssl._create_unverified_context()
# 请求体
response = urllib.request.urlopen(req,context=context)
return response.read()
#爬取网页信息+文件存储目录文件
def qqCrawler(url,toPath):
htmlBytes=getHtmlBytes(url)
writeFileByte(htmlBytes,r"存文件目录\file1.html")
writeFileStr(htmlBytes,r"存文件目录\file2.txt")
htmlStr=str(htmlBytes)
#找qq号
#pat=r'<
python网页信息爬取,爬取QQ号
最新推荐文章于 2023-12-22 11:52:52 发布