Webdriver - Reset Safari

在Mac上使用Safari进行自动化测试时,有时会出现错误。错误信息为:'Exception thrown in Open URL -> Timed out awaiting response to command "get" after 46862 ms'。该问题可以通过重置Safari来解决。然而,随着测试次数增加,错误可能会再次出现。解决方案是创建一个脚本,在每次执行测试前或间隔时间重置Safari。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Webdriver - Reset Safari

作者: Max.Bai

时间: 2014/10


Webdriver - Reset Safari


The error is appear when automation test on MAC with Safari browser some times.

java.lang.AssertionError: Exception thrown in Open URL ->Timed out awaiting response to command "get" after 46862 ms (WARNING: The server did not provide any stacktrace information)
Command duration or timeout: 46.88 seconds
Build info: version: '2.42.2', revision: '6a6995d', time: '2014-06-03 17:42:03'
System info: host: 'xxxMac.local', ip: '192.168.9.60', os.name: 'Mac OS X', os.arch: 'x86_64', os.version: '10.9.4', java.version: '1.8.0_11'
Driver info: org.openqa.selenium.safari.SafariDriver


About this error, we can reset safari resolve this problem every time, but after more and more times test case executed, the error will appeared once more.

Can we create a script to reset Safari every time before execute test or reset Safari at interval time?

Yes, we can use below commands to reset safari(Tested on OS x 10.9).

rm ~/Library/Safari/History.plist
rm ~/Library/Safari/Downloads.plist
rm ~/Library/Safari/HistoryIndex.sk
rm ~/Library/Safari/LastSession.plist
rm ~/Library/Safari/TopSites.plist
rm -rf ~/Library/Caches/com.apple.safari
rm ~/Library/Safari/SearchProviders.plist.signed



不要大幅改动下面已有的代码,新导入pandas 库,将爬取到的视频标题,播放量,弹幕数,点赞数等以及简要计算的结果以表格形式导出 import asyncio import time import numpy as np from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.chrome.options import Options from lxml import etree from bilibili_api import video, search from scrapy import Spider, Request from scrapy.crawler import CrawlerProcess # 配置Selenium chrome_options = Options() chrome_options.add_argument("--headless") # 无头模式 chrome_options.add_argument("--disable-gpu") chrome_options.add_argument("--no-sandbox") # 关键词搜索函数(使用Selenium获取动态内容) def bilibili_search(keyword, max_videos=30): driver = webdriver.Chrome(options=chrome_options) try: driver.get(f"https://search.bilibili.com/all?keyword={keyword}") time.sleep(3) # 等待页面加载 # 滚动加载更多结果 for _ in range(3): driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") time.sleep(1.5) # 使用lxml解析页面 page_source = driver.page_source tree = etree.HTML(page_source) # 提取视频链接和BV号 video_items = tree.xpath('//li[contains(@class, "video-item")]') results = [] for item in video_items[:max_videos]: try: bvid = item.xpath('.//a[contains(@href, "video/BV")]/@href')[0].split('/')[-1] title = item.xpath('.//a[@title]/@title')[0] results.append({"bvid": bvid, "title": title}) except: continue return results finally: driver.quit() # 使用bilibili-api获取视频详细信息 async def get_video_stats(bvid): try: v = video.Video(bvid=bvid) info = await v.get_info() stat = info['stat'] return { "like": stat['like'], "danmaku": stat['danmaku'], "view": stat['view'], "coin": stat['coin'], "favorite": stat['favorite'] } except Exception as e: print(f"Error getting stats for {bvid}: {e}") return None # Scrapy爬虫框架实现 class BilibiliSpider(Spider): name = "bilibili_videos" def __init__(self, keyword="Python爬虫", *args, **kwargs): super(BilibiliSpider, self).__init__(*args, **kwargs) self.keyword = keyword self.results = [] self.stats_data = [] def start_requests(self): # 使用Selenium获取搜索结果 search_results = bilibili_search(self.keyword) for result in search_results: self.results.append(result) yield Request( url=f"https://www.bilibili.com/video/{result['bvid']}", callback=self.parse, meta={'bvid': result['bvid']} ) def parse(self, response): bvid = response.meta['bvid'] # 使用lxml解析视频页面 tree = etree.HTML(response.text) # 提取部分信息(API获取更完整) try: danmaku = tree.xpath('//div[@class="video-data"]/span[contains(text(), "弹幕")]/text()')[0].split()[0] danmaku = int(danmaku.replace(',', '')) except: danmaku = 0 # 异步获取完整统计信息 loop = asyncio.get_event_loop() stats = loop.run_until_complete(get_video_stats(bvid)) if stats: stats['danmaku_from_page'] = danmaku # 对比页面解析结果 stats['bvid'] = bvid self.stats_data.append(stats) # 找到对应的视频标题 title = next((r['title'] for r in self.results if r['bvid'] == bvid), "Unknown") yield { "title": title, "bvid": bvid, **stats } def closed(self, reason): # 计算统计数据 if self.stats_data: likes = [d['like'] for d in self.stats_data] danmakus = [d['danmaku'] for d in self.stats_data] views = [d['view'] for d in self.stats_data] print(f"\n{'='*50}") print(f"关键词 '{self.keyword}' 搜索结果统计:") print(f"视频总数: {len(self.stats_data)}") print(f"平均点赞数: {np.mean(likes):.1f}") print(f"平均弹幕数: {np.mean(danmakus):.1f}") print(f"平均播放量: {np.mean(views):.1f}") print(f"最高点赞视频: {max(likes)}") print(f"最高弹幕视频: {max(danmakus)}") print(f"{'='*50}") # 运行爬虫 if __name__ == "__main__": process = CrawlerProcess(settings={ 'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36', 'CONCURRENT_REQUESTS': 4, # 控制并发数 'DOWNLOAD_DELAY': 1.5, # 请求延迟 'FEED_FORMAT': 'json', 'FEED_URI': 'bilibili_videos.json' }) process.crawl(BilibiliSpider, keyword="石楠花") process.start()
最新发布
07-28
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值