#request库 #pip install requests 安装request库 import requests data = {'wd':"zhangsan"} headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36' } response = requests.get(url="http://www.baidu.com/s",headers=headers,params=data) print(type(response.text)) #print(response.text) #若text方式获取网页有乱码,则可以使用content方式解码 print(response.content.decode("utf-8")) print(response.url) print(response.status_code) #默认的编码方式:ISO-8859-1 print(response.encoding) def parse_page(url): response = requests.get(url) text = response.content.decode('utf-8') soup = BeautifulSoup(text,'html5lib') divContent = soup.select("div.conMidtab")[0] tables = divContent.select("table") all_data = [] for table in tables: trs = table.find_all("tr")[2:] for tr in trs: city = tr.find_all("a")[0].string main_temp = tr.find("td",{"width":"86"}).string all_data.append({"city":city,"main_temp":int(main_temp)}) print(all_data)