Catalog
翻页
from urllib import parse
domain = 'https://s.taobao.com/search?'
keyword = 'Python3网络爬虫视频'
for i in range(199):
page = i * 44
url = domain + parse.urlencode({'q': keyword, 's': page})
print(url)
获取动态页面信息
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Firefox()
driver.get('https://s.taobao.com/search?q=Python爬虫&s=44')
wait = WebDriverWait(driver, 9)
wait.until(EC.presence_of_element_located(
(By.CSS_SELECTOR, '#mainsrp-itemlist .items .item')))
html = driver.page_source
print(html)
提取商品信息
from pyquery import PyQuery as pq
doc = pq(html)
items = doc('#mainsrp-itemlist .items .item').items()
for n, item in enumerate(items):
product = {
'image': item.find('.pic .img').attr('src'),
'price': item.find('.price').text(),
'deal_cnt': item.find('.deal-cnt').text()[:-3],
'title': item.find('.title').text(),
'shop': item.find('.shop').text(),
'location': item.find('.location').text()}
完整代码
from urllib import parse
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from pyquery import PyQuery as pq
driver = webdriver.Firefox()
wait = WebDriverWait(driver, 9)
def get_products():
wait.until(EC.presence_of_element_located(
(By.CSS_SELECTOR, '#mainsrp-itemlist .items .item')))
html = driver.page_source
doc = pq(html)
items = doc('#mainsrp-itemlist .items .item').items()
for n, item in enumerate(items):
product = {
'price': item.find('.price').text(),
'deal_cnt': item.find('.deal-cnt').text()[:-3],
'title': item.find('.title').text(),
'shop': item.find('.shop').text(),
'location': item.find('.location').text()}
if n < 44:
print(n + 1, product)
else:
break
def main():
domain = 'https://s.taobao.com/search?'
keyword = 'Python3网络爬虫视频'
for i in range(199):
page = i * 44
url = domain + parse.urlencode({'q': keyword, 's': page})
driver.get(url)
try:
get_products()
except TimeoutException as te:
print('结束页:', url)
break
driver.close()
if __name__ == '__main__':
main()