[python]---从java到python(03)---爬虫

1.简单网页

#!/usr/bin/env python 
# -*- coding:utf-8 -*-

import urllib.request

file = urllib.request.urlopen("https://www.jd.com")
data = file.read()
# dataline = file.readline()
print(data)

fhandle = open("E:/python/1_1.html", "wb")
fhandle.write(data)
fhandle.close()

# filename = urllib.request.urlretrieve("http://edu.51cto.com", filename="E:/python/2.html")
# filename2 = urllib.request.urlretrieve("http://www.jd.com", filename="E:/python/3.html")

print(file.getcode())
print(file.geturl())


2.模拟浏览器

#!/usr/bin/env python 
# -*- coding:utf-8 -*-

import urllib.request

url = "https://blog.youkuaiyun.com/java_zhangshuai/article/details/81749208"
headers = ("User-Agent",
           "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36")
opener = urllib.request.build_opener()
opener.addheaders = [headers]
data = opener.open(url).read()
print(data)
fhandle = open("E:/python/2_1.html", "wb")
fhandle.write(data)
fhandle.close()

3.http请求

#!/usr/bin/env python 
# -*- coding:utf-8 -*-

import urllib.request

keywd = "hello"
# 中文等不符合url标准的,需要编码
keywd = urllib.request.quote(keywd)
url = "http://www.baidu.com/s?wd=" + keywd
req = urllib.request.Request(url)
data = urllib.request.urlopen(req).read()

fhandle = open("E:/python/3_1.html", "wb")
fhandle.write(data)
fhandle.close()

import urllib.parse

url = "http://www.iqianyue.com/mypost"
data = {"name": "zhangsan", "pass": "zhangsanpass"}
postdata = urllib.parse.urlencode(data).encode("utf-8")

for x in range(1, 3):
    try:
        req = urllib.request.Request(url, postdata)
        req.add_header("User-Agent",
                       "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36")
        data = urllib.request.urlopen(req).read()

        fhandle = open("E:/python/3_2.html", "wb")
        fhandle.write(data)
        fhandle.close()
        print(len(data))
    except Exception as e:
        print("出现异常--->"+str(e))

4.爬取某电商商品列表下的图片集合

#!/usr/bin/env python 
# -*- coding:utf-8 -*-

import urllib.request
import re


def craw(url, page):
    html1 = urllib.request.urlopen(url).read()
    html1 = str(html1)
    pat1 = '<div id="plist".+? <div class="page clearfix">'
    # 根据pat1,过滤出图片部分
    result1 = re.compile(pat1).findall(html1)
    result1 = result1[0]
    pat2 = '<img width="220" height="220" data-img="1" src="//(.+?\.jpg)">'
    # 根据pat2,过滤出图片list
    imagelist = re.compile(pat2).findall(result1)
    x = 1
    for imageurl in imagelist:
        print(imageurl)
        imagename = "E:/python/爬虫/" + str(page) + str(x) + ".jpg"
        imageurl = "https://" + imageurl
        try:
            # 将图片链接是imageurl的图片存在路径为imagename的地方
            urllib.request.urlretrieve(imageurl, filename=imagename)
        except:
            x += 1
        x += 1


for i in range(1, 10):
    url = "https://list.jd.com/list.html?cat=9192,12632,12633&page=" + str(i)
    craw(url, i)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值