清除webBrowser 缓存和Cookie的解决方案

本文介绍了一种通过调用IEAPI清除IE缓存和Cookie的解决方案,包括如何使用ShellExecute方法来实现对特定组件(如临时Internet文件、Cookies等)的清理,并提供了代码示例。

转自:http://www.cnblogs.com/midcn/p/3527123.html


通过测试webBrowser与IE缓存和Cookie都存放在Local Settings\Temporary Internet Files,我们可以直接调用IE API进行清除

解决方案1:

复制代码
public enum ShowCommands : int

{

SW_HIDE = 0,

SW_SHOWNORMAL = 1,

SW_NORMAL = 1,

SW_SHOWMINIMIZED = 2,

SW_SHOWMAXIMIZED = 3,

SW_MAXIMIZE = 3,

SW_SHOWNOACTIVATE = 4,

SW_SHOW = 5,

SW_MINIMIZE = 6,

SW_SHOWMINNOACTIVE = 7,

SW_SHOWNA = 8,

SW_RESTORE = 9,

SW_SHOWDEFAULT = 10,

SW_FORCEMINIMIZE = 11,

SW_MAX = 11

}
复制代码

 

[DllImport("shell32.dll")]

static extern IntPtr ShellExecute( IntPtr hwnd, string lpOperation, string lpFile, string lpParameters, string lpDirectory, ShowCommands nShowCmd);

 

//清除IE临时文件

                ShellExecute(IntPtr.Zero, "open", "rundll32.exe", " InetCpl.cpl,ClearMyTracksByProcess 255", "", ShowCommands.SW_HIDE);

 

 

其中ClearMyTracksByProcess 可进行选择设置 :

 

Temporary Internet Files  (Internet临时文件)

 

RunDll32.exe InetCpl.cpl,ClearMyTracksByProcess 8

 

Cookies

 

RunDll32.exe InetCpl.cpl,ClearMyTracksByProcess 2

 

History (历史记录)

 

RunDll32.exe InetCpl.cpl,ClearMyTracksByProcess 1

 

 

 Form. Data (表单数据)

RunDll32.exe InetCpl.cpl,ClearMyTracksByProcess 16

 

 

 Passwords (密码)

RunDll32.exe InetCpl.cpl,ClearMyTracksByProcess 32

 

 

 Delete All  (全部删除)

RunDll32.exe InetCpl.cpl,ClearMyTracksByProcess 255

 

解决方案2:快速清除webBrowser Cookie

 

for (int i = 0; i < this.webBrowser1.Document.Cookie.Count(); i++)
{
   this.webBrowser1.Document.Cookie.Remove(i);
}

原来的代码如下import datetime import json import os import random import re import time import webbrowser from time import sleep from urllib import parse from loguru import logger from configobj import ConfigObj import execjs import requests from selenium.webdriver.chrome.service import Service from selenium.webdriver.common.by import By from win10toast import ToastNotifier requests.packages.urllib3.disable_warnings() from selenium import webdriver from bs4 import BeautifulSoup # import Util # 指定浏览器打开网页 firefoxPath = r"C:\Program Files\Mozilla Firefox\firefox.exe" webbrowser.register('firefox', None, webbrowser.BackgroundBrowser(firefoxPath)) # 浏览器设置 option = webdriver.ChromeOptions() # 以最高权限运行 option.add_argument('--no-sandbox') # 开发者模式的开关,设置一下,打开浏览器就不会识别为自动化测试工具了 option.add_experimental_option('excludeSwitches', ['enable-automation']) option.add_argument('--ignore-certificate-errors') option.add_argument('--ignore-ssl-errors') # # 指定缓存位置 # option.add_argument(r'--disk-cache-dir=G:\python项目\凌风抖音监控还原\cache') # option.add_argument(r'--user-data-dir=G:\python项目\凌风抖音监控还原\cache') # 处理SSL证书错误问题 option.add_argument('--ignore-certificate-errors') option.add_argument('--ignore-ssl-errors') # 忽略无用的日志 option.add_experimental_option("excludeSwitches", ['enable-automation', 'enable-logging']) option.add_argument('--no-sandbox') # 给予root执行权限 s = Service('chromedriver.exe') tplt = "{0:{3}^22}\t{1:^8}\t{2:^8}" today = str(datetime.date.today()) video_save_lv = f"D:\\监控视频下载\\{today}" if not os.path.exists(video_save_lv): os.mkdir(video_save_lv) def get_secUid(): """ 登录获取UID :return: """ global headers browser = webdriver.Chrome(service=s, options=option) url = 'https://www.douyin.com/user/self' # cookie 登录 if os.path.exists("cookies.txt"): # 从9828_cookies.txt文件读取cookies with open("cookies.txt") as f2: cookies = json.loads(f2.read()) # 使用cookies登录 browser.get(url) for cook in cookies: browser.add_cookie(cook) # 刷新页面 browser.refresh() while True: sleep(5) if "用户名" in browser.title: break else: browser.get(url) user_input = input("登录:") # 获取cookie cookies = browser.get_cookies() print(cookies) with open("cookies.txt", "w") as f: f.write(json.dumps(cookies)) # 存储header供requests使用 headers = {'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/116.0"} cookies = browser.get_cookies() cookie_string = '' for cookie in cookies: cookie_string = cookie_string + cookie['name'] + '=' + cookie['value'] + ';' headers['cookie'] = cookie_string headers['referer'] = "https://www.xxx.com/user/self?showTab=record" headers['TE'] = "trailers" headers['Sec-Fetch-Dest'] = "empty" headers['Sec-Fetch-Mode'] = "cors" headers['Sec-Fetch-Site'] = "same-origin" headers['Host'] = "www.douyin.com" # 获取 secUid, uid, 关注数量 res = re.findall('<script id="RENDER_DATA" type="application/json">(.*?)</script>', browser.page_source)[0] res = requests.utils.unquote(res) secUid = json.loads(res)['app']['user']['info']['secUid'] uid = str(json.loads(res)['app']['user']['info']['uid']) followingCount = json.loads(res)['app']['user']['info']['followingCount'] logger.info(f"secUid:{secUid}, uid: {uid} 关注博主:{followingCount}") # print(secUid) browser.quit() return (secUid, followingCount, uid, headers) def check_index(url_list): """ 开始检查的作品数的主程序 :param url_list: 用户作品列表数 :return: """ count = 1 # 计数开始 while True: start_time = datetime.datetime.now() # 计时开始 # 打开本地json文件读取上次检查信息 if get_all_following_list(url_list): count += 1 sleep(random.randint(5, 10)) end_time = datetime.datetime.now() # 计时结束 # 处理输出时间格式 count_time = str(end_time - start_time) count_time = count_time.split(":", 1)[-1] count_time = count_time.split(".", 1)[0] # count_time = round(count_time, 2) logger.info(f"【{count}】轮监控中,耗时: 【{count_time}】 {str(datetime.datetime.now().time())[:8]}\n") print("-" * 50)
09-19
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值