import json
import scrapy
class TxxzSpider(scrapy.Spider):
name = "txxz"
#allowed_domains = ["txxz.com"]
start_urls = ["https://join.qq.com/api/v1/position/searchPosition?timestamp=1764213291465"]
page=1
def start_requests(self):
print(1)
# 表单数据
data = {"projectIdList":[2,12,1,14,20,16],"keyword":"","bgList":[],"workCountryType":0,"workCityList":[],"recruitCityList":[],"positionFidList":[],"pageIndex":2,"pageSize":10}
# 字典--》json字符串
json_data = json.dumps(data)
yield scrapy.Request(
url=self.start_urls[0],
callback=self.parse,
method='POST',
headers={
'content-type':'application/json;charset=UTF-8'
},
body=json_data)
def parse(self, response):
print(2)
print(response.json()['data'])
data = {"projectIdList": [2, 12, 1, 14, 20, 16], "keyword": "", "bgList": [], "workCountryType": 0,
"workCityList": [], "recruitCityList": [], "positionFidList": [], "pageIndex": self.page, "pageSize": 10}
# 字典--》json字符串
json_data = json.dumps(data)
self.page += 1
if self.page>3:
return
yield scrapy.Request(
url=self.start_urls[0],
callback=self.parse,
method='POST',
headers={
'content-type': 'application/json;charset=UTF-8'
},
body=json_data)
pass
上面方法一:并加入分页写法
下面方法二
import json
import scrapy
class Txxz2Spider(scrapy.Spider):
name = "txxz2"
#allowed_domains = ["txxz2"]
start_urls = ["https://join.qq.com/post.html"]
def parse(self, response):
# 表单数据
data = {"projectIdList":[],"projectMappingIdList":[1,2,12,14,20],"keyword":"","bgList":[],"workCountryType":0,"workCityList":[],"recruitCityList":[],"positionFidList":[],"pageIndex":1,"pageSize":10}
json_data=json.dumps(data)
# 处理请求参数
yield scrapy.Request(
url='https://join.qq.com/api/v1/position/searchPosition?timestamp=1764213291465',
callback=self.parse_data,
method='POST',
body=json_data,
# 如果POST请求参数:表单数据
headers={'Content-Type': 'application/json;charset=UTF-8'})
pass
def parse_data(self,response):
print(response.json())
from typing import Iterable, Any
from urllib.parse import urlencode
import scrapy
class HbwSpider(scrapy.Spider):
name = "hbw"
#allowed_domains = ["hbw.com"]
#post请求,默认start_urls列表中的请求是get请求
start_urls = ["https://www.bkchina.cn/product/productList"]
def start_requests(self):
#表单数据
data={
'type':'season'
}
data=urlencode(data)
#处理请求参数
yield scrapy.Request(
url=self.start_urls[0],
callback=self.parse,
method='POST',
body=data,
#如果POST请求参数:表单数据
headers={'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8'})
def parse(self, response):
print(response.json())
这是普通表单提交。上面是json字符串提交
701

被折叠的 条评论
为什么被折叠?



