一、re库
import re
def test_regex():
match = re.search(r'[1-9]\d{5}', 'BIT 100081 100081')
if match:
print(match.group(0))
match = re.match(r'[1-9]\d{5}', '100081 BIT 100081')
if match:
print(match.group(0))
ls = re.findall(r'[1-9]\d{5}', 'BIT100081 TSU100084')
print(ls)
ls = re.split(r'[1-9]\d{5}', 'BIT100081 TSU100084')
print(ls)
ls = re.split(r'[1-9]\d{5}', 'BIT100081 TSU100084', maxsplit=1)
print(ls)
for m in re.finditer(r'[1-9]\d{5}', 'BIT100081 TSU100084'):
if m:
print(m.group(0))
t = re.sub(r'[1-9]\d{5}', ':zipcode', 'BIT100081 TSU100084')
print(t)
def test_match_object():
m = re.search(r'[1-9]\d{5}', 'BIT100081 TSU100084')
print(m.string)
print(m.re)
print(m.pos)
print(m.endpos)
print(m.group(0))
print(m.start())
print(m.end())
print(m.span())
def greed_and_min_match():
m = re.search(r'py.*n', 'pynabncdenfn')
print(m.group())
m = re.search(r'py.*?n', 'pynabncdenfn')
print(m.group(0))
greed_and_min_match()
二、淘宝定向爬虫实例
import requests
import re
def find_html(url, kv, cookies):
try:
r = requests.get(url, cookies=cookies, headers=kv, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def parse_page(ilt, html):
try:
plt = re.findall(r'\"view_price\"\:\"[\d\.]*\"', html)
tlt = re.findall(r'\"raw_title\"\:\".*?\"', html)
for i in range(len(plt)):
price = eval(plt[i].split(':')[1])
title = eval(tlt[i].split(':')[1])
ilt.append([price, title])
except:
print("shit")
def print_good_list(ilt):
tplt = "{:4}\t{:8}\t{:16}"
print(tplt.format("序号", "价格", "商品名称"))
count = 0
for g in ilt:
count = count + 1
print(tplt.format(count, g[0], g[1]))
def main():
goods = '铅笔'
depth = 3
start_url = 'https://s.taobao.com/search?&q=' + goods
infoList = []
coo = ''
cookies = {}
for line in coo.split(';'):
name, value = line.strip().split('=', 1)
cookies[name] = value
kv = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0'}
for i in range(depth):
try:
url = start_url + '&s=' + str(44 * i)
html = find_html(url, kv, cookies)
if i == 0:
print(html)
parse_page(infoList, html)
except:
continue
print_good_list(infoList)
main()