self._raiseerror(v) File "D:\GameDevelopment\Python27\lib\xml\etree\ElementTree.py", line 1506, in _...

本文记录了一个在使用Python解析plist文件时遇到的错误:文件格式不正确导致无法正常解析。错误发生在尝试从plist文件中读取内容并转换为XML元素树的过程中。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

D:\BaiDuYun\Plist>python unpack_plist.py lobbyRelieve
Traceback (most recent call last):
File "unpack_plist.py", line 70, in <module>
gen_png_from_plist( plist_filename, png_filename )
File "unpack_plist.py", line 23, in gen_png_from_plist
root = ElementTree.fromstring(open(plist_filename, 'r').read())
File "D:\GameDevelopment\Python27\lib\xml\etree\ElementTree.py", line 1300, in
XML
parser.feed(text)
File "D:\GameDevelopment\Python27\lib\xml\etree\ElementTree.py", line 1642, in
feed
self._raiseerror(v)
File "D:\GameDevelopment\Python27\lib\xml\etree\ElementTree.py", line 1506, in
_raiseerror
raise err
xml.etree.ElementTree.ParseError: not well-formed (invalid token): line 1, colum
n 9

转载于:https://www.cnblogs.com/123ing/p/3920620.html

make[4]: Leaving directory '/home/cxzj/bin/apps/qnx_ap/target/hypervisor/host/slm' /home/cxzj/bin/apps/qnx_ap/target/hypervisor/host/create_images.sh: 行 523: filepp: 未找到命令 /home/cxzj/bin/apps/qnx_ap/target/hypervisor/host/create_images.sh: 行 527: filepp: 未找到命令 /home/cxzj/bin/apps/qnx_ap/target/hypervisor/host/create_images.sh: 行 536: filepp: 未找到命令 Traceback (most recent call last): File "/home/cxzj/bin/apps/qnx_ap/tools/build/qcpe_config_gen.py", line 1199, in <module> xml_parse(sys.argv[1], sys.argv[2]) File "/home/cxzj/bin/apps/qnx_ap/tools/build/qcpe_config_gen.py", line 1192, in xml_parse code = generateCode(xmlFile) File "/home/cxzj/bin/apps/qnx_ap/tools/build/qcpe_config_gen.py", line 1017, in generateCode tree = ET.parse(xmlFile) File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1182, in parse tree.parse(source, parser) File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 657, in parse self._root = parser.close() File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1671, in close self._raiseerror(v) File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1523, in _raiseerror raise err xml.etree.ElementTree.ParseError: no element found: line 1, column 0 Couldn't create qcpe configurations Makefile:9: recipe for target 'callit' failed make[3]: *** [callit] Error 1 make[3]: Leaving directory '/home/cxzj/bin/apps/qnx_ap/target/hypervisor/host' recurse.mk:96: recipe for target 'all' failed make[2]: *** [all] Error 2 make[2]: Leaving directory '/home/cxzj/bin/apps/qnx_ap/target/hypervisor' recurse.mk:96: recipe for target 'all' failed make[1]: *** [all] Error 2 make[1]: Leaving directory '/home/cxzj/bin/apps/qnx_ap/target' Makefile:64: recipe for target 'images' failed make: *** [images] Error 2 怎么出错了
07-20
authorName_list, likeNr_list, URL_list, userURL_list = [], [], [], [] qbar = tqdm(total=num, desc="已获取的笔记数量...") # 检查是否已经爬取足够数量的笔记,或是否已经达到页面底部 while len(URL_list) < num: if '- THE END -' in browser.page_source: print(f"当前与{key_word}有关的笔记数量少于 {num}") print('检查时间:',time.ctime()) break parsePage(browser.page_source, authorName_list, likeNr_list, URL_list, userURL_list, num) qbar.update(1) if len(URL_list) < num: browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')# 模拟鼠标滚动 time.sleep(random.uniform(3, 5)) if len(URL_list) > num: URL_list = URL_list[:num] authorName_list = authorName_list[:num] likeNr_list = likeNr_list[:num] userURL_list = userURL_list[:num] qbar.close() 这段代码发生了以下错误,请帮我改正(只修改错误部分) LookupError Traceback (most recent call last) Cell In[13], line 11 8 print('检查时间:',time.ctime()) 9 break ---> 11 parsePage(browser.page_source, authorName_list, likeNr_list, URL_list, userURL_list, num) 12 qbar.update(1) 14 if len(URL_list) < num: Cell In[12], line 17, in parsePage(html_content, authorName_list, likeNr_list, URL_list, userURL_list, num) 14 html_content = html_content.decode('utf-8', errors='ignore') 16 # 修正点2:显式指定HTML解析器 ---> 17 response = Selector(text=html_content, type='html') # 关键修复 18 divs = response.xpath('//div[contains(@class, "feeds-container")]/section/div') 20 # 以下保持原有逻辑不变 File d:\anaconda3\Lib\site-packages\scrapy\selector\unified.py:97, in Selector.__init__(self, response, text, type, root, **kwargs) 94 if root is not _NOT_SET: 95 kwargs["root"] = root ---> 97 super().__init__(text=text, type=st, **kwargs) File d:\anaconda3\Lib\site-packages\parsel\selector.py:496, in Selector.__init__(self, text, type, body, encoding, namespaces, root, base_url, _expr, huge_tree) 493 msg = f"text argument should be of type str, got {text.__class__}" 494 raise TypeError(msg) --> 496 root, type = _get_root_and_type_from_text( 497 text, 498 input_type=type, 499 base_url=base_url, 500 huge_tree=huge_tree, 501 ) 502 self.root = root 503 self.type = type File d:\anaconda3\Lib\site-packages\parsel\selector.py:377, in _get_root_and_type_from_text(text, input_type, **lxml_kwargs) 375 assert input_type in ("html", "xml", None) # nosec 376 type = _xml_or_html(input_type) --> 377 root = _get_root_from_text(text, type=type, **lxml_kwargs) 378 return root, type File d:\anaconda3\Lib\site-packages\parsel\selector.py:329, in _get_root_from_text(text, type, **lxml_kwargs) 326 def _get_root_from_text( 327 text: str, *, type: str, **lxml_kwargs: Any 328 ) -> etree._Element: --> 329 return create_root_node(text, _ctgroup[type]["_parser"], **lxml_kwargs) File d:\anaconda3\Lib\site-packages\parsel\selector.py:110, in create_root_node(text, parser_cls, base_url, huge_tree, body, encoding) 107 body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>" 109 if huge_tree and LXML_SUPPORTS_HUGE_TREE: --> 110 parser = parser_cls(recover=True, encoding=encoding, huge_tree=True) 111 root = etree.fromstring(body, parser=parser, base_url=base_url) 112 else: File d:\anaconda3\Lib\site-packages\lxml\html\__init__.py:1887, in HTMLParser.__init__(self, **kwargs) 1886 def __init__(self, **kwargs): -> 1887 super().__init__(**kwargs) 1888 self.set_element_class_lookup(HtmlElementClassLookup()) File src\\lxml\\parser.pxi:1806, in lxml.etree.HTMLParser.__init__() File src\\lxml\\parser.pxi:858, in lxml.etree._BaseParser.__init__() LookupError: unknown encoding: 'b'utf8''
最新发布
05-28
2025-05-13 20:40:45,056 SSD INFO: Using 1 GPUs 2025-05-13 20:40:45,057 SSD INFO: Namespace(config_file='configs/vgg_ssd300_voc0712.yaml', local_rank=0, log_step=10, save_step=2500, eval_step=2500, use_tensorboard=True, skip_test=False, opts=[], distributed=False, num_gpus=1) 2025-05-13 20:40:45,057 SSD INFO: Loaded configuration file configs/vgg_ssd300_voc0712.yaml 2025-05-13 20:40:45,057 SSD INFO: MODEL: NUM_CLASSES: 5 INPUT: IMAGE_SIZE: 300 DATASETS: TRAIN: ("voc_2007_trainval", "voc_2012_trainval") TEST: ("voc_2007_test", ) SOLVER: MAX_ITER: 120000 LR_STEPS: [80000, 100000] GAMMA: 0.1 BATCH_SIZE: 32 LR: 1e-3 OUTPUT_DIR: 'outputs/vgg_ssd300_voc0712' 2025-05-13 20:40:45,058 SSD INFO: Running with config: DATASETS: TEST: ('voc_2007_test',) TRAIN: ('voc_2007_trainval', 'voc_2012_trainval') DATA_LOADER: NUM_WORKERS: 8 PIN_MEMORY: True INPUT: IMAGE_SIZE: 300 PIXEL_MEAN: [123, 117, 104] MODEL: BACKBONE: NAME: vgg OUT_CHANNELS: (512, 1024, 512, 256, 256, 256) PRETRAINED: True BOX_HEAD: NAME: SSDBoxHead PREDICTOR: SSDBoxPredictor CENTER_VARIANCE: 0.1 DEVICE: cuda META_ARCHITECTURE: SSDDetector NEG_POS_RATIO: 3 NUM_CLASSES: 5 PRIORS: ASPECT_RATIOS: [[2], [2, 3], [2, 3], [2, 3], [2], [2]] BOXES_PER_LOCATION: [4, 6, 6, 6, 4, 4] CLIP: True FEATURE_MAPS: [38, 19, 10, 5, 3, 1] MAX_SIZES: [60, 111, 162, 213, 264, 315] MIN_SIZES: [30, 60, 111, 162, 213, 264] STRIDES: [8, 16, 32, 64, 100, 300] SIZE_VARIANCE: 0.2 THRESHOLD: 0.5 OUTPUT_DIR: outputs/vgg_ssd300_voc0712 SOLVER: BATCH_SIZE: 32 GAMMA: 0.1 LR: 0.001 LR_STEPS: [80000, 100000] MAX_ITER: 120000 MOMENTUM: 0.9 WARMUP_FACTOR: 0.3333333333333333 WARMUP_ITERS: 500 WEIGHT_DECAY: 0.0005 TEST: BATCH_SIZE: 10 CONFIDENCE_THRESHOLD: 0.01 MAX_PER_CLASS: -1 MAX_PER_IMAGE: 100 NMS_THRESHOLD: 0.45 D:\SSDcode\SSD-master\ssd\utils\model_zoo.py:62: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle modu le implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#u ntrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpi ckling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We rec ommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature. return torch.load(cached_file, map_location=map_location) 2025-05-13 20:40:45,690 SSD.trainer INFO: No checkpoint found. 2025-05-13 20:40:45,734 SSD.trainer INFO: Start training ... Traceback (most recent call last): File "D:\SSDcode\SSD-master\train.py", line 114, in <module> main() File "D:\SSDcode\SSD-master\train.py", line 105, in main model = train(cfg, args) File "D:\SSDcode\SSD-master\train.py", line 44, in train model = do_train(cfg, model, train_loader, optimizer, scheduler, checkpointer, device, arguments, args) File "D:\SSDcode\SSD-master\ssd\engine\trainer.py", line 76, in do_train for iteration, (images, targets, _) in enumerate(data_loader, start_iter): File "D:\wulianwang\an\an1\envs\ssd\lib\site-packages\torch\utils\data\dataloader.py", line 701, in __next__ data = self._next_data() File "D:\wulianwang\an\an1\envs\ssd\lib\site-packages\torch\utils\data\dataloader.py", line 1465, in _next_data return self._process_data(data) File "D:\wulianwang\an\an1\envs\ssd\lib\site-packages\torch\utils\data\dataloader.py", line 1491, in _process_data data.reraise() File "D:\wulianwang\an\an1\envs\ssd\lib\site-packages\torch\_utils.py", line 715, in reraise raise exception KeyError: Caught KeyError in DataLoader worker process 0. Original Traceback (most recent call last): File "D:\wulianwang\an\an1\envs\ssd\lib\site-packages\torch\utils\data\_utils\worker.py", line 351, in _worker_loop data = fetcher.fetch(index) # type: ignore[possibly-undefined] File "D:\wulianwang\an\an1\envs\ssd\lib\site-packages\torch\utils\data\_utils\fetch.py", line 52, in fetch data = [self.dataset[idx] for idx in possibly_batched_index] File "D:\wulianwang\an\an1\envs\ssd\lib\site-packages\torch\utils\data\_utils\fetch.py", line 52, in <listcomp> data = [self.dataset[idx] for idx in possibly_batched_index] File "D:\wulianwang\an\an1\envs\ssd\lib\site-packages\torch\utils\data\dataset.py", line 350, in __getitem__ return self.datasets[dataset_idx][sample_idx] File "D:\SSDcode\SSD-master\ssd\data\datasets\voc.py", line 33, in __getitem__ boxes, labels, is_difficult = self._get_annotation(image_id) File "D:\SSDcode\SSD-master\ssd\data\datasets\voc.py", line 78, in _get_annotation labels.append(self.class_dict[class_name]) KeyError: '3'
05-14
def parsePage(html_content, authorName_list, likeNr_list, URL_list, userURL_list, num): """ 解析网页内容并更新数据列表。 Args: html_content (str): 当前页面的HTML内容 authorName_list (list): 存储作者名字的列表 likeNr_list (list): 存储获赞数量的列表 URL_list (list): 存储笔记URL的列表 userURL_list (list): 存储用户URL的列表 qbar (tqdm): 进度条对象 num (int): 需要爬取的笔记数量 Returns: None: 数据存储在传入的列表中 """ response = Selector(text=html_content) divs = response.xpath('//div[contains(@class, "feeds-container")]/section/div')# 选中网页中包含笔记信息的部分 # 遍历divs获取每一篇笔记的信息 for div in divs: if len(URL_list) >= num: break if div.xpath('.//span[contains(text(), "大家都在搜")]'): continue # 选择并提取网页数据 try: author_name = div.xpath('.//a[contains(@class, "author")]/span[contains(@class, "name")]/text()').get()# 作者名字 like_nr = div.xpath('.//span[contains(@class, "count")]/text()').get()# 获赞数量 url = div.xpath('.//a[contains(@class, "cover")]/@href').get()# 笔记URL user_url = div.xpath('.//a[contains(@class, "author")]/@href').get()# 用户URL authorName_list.append(author_name) likeNr_list.append(like_nr) URL_list.append(url) userURL_list.append(user_url) time.sleep(0.35) except: pass return True authorName_list, likeNr_list, URL_list, userURL_list = [], [], [], [] qbar = tqdm(total=num, desc="已获取的笔记数量...") # 检查是否已经爬取足够数量的笔记,或是否已经达到页面底部 while len(URL_list) < num: if '- THE END -' in browser.page_source: print(f"当前与{key_word}有关的笔记数量少于 {num}") print('检查时间:',time.ctime()) break parsePage(browser.page_source, authorName_list, likeNr_list, URL_list, userURL_list, num) qbar.update(1) if len(URL_list) < num: browser.execute_script('window.scrollTo(0,document.body.scrollHeight)')# 模拟鼠标滚动 time.sleep(random.uniform(3, 5)) if len(URL_list) > num: URL_list = URL_list[:num] authorName_list = authorName_list[:num] likeNr_list = likeNr_list[:num] userURL_list = userURL_list[:num] qbar.close() 这段代码发生了以下错误,请帮我改正 LookupError Traceback (most recent call last) Cell In[11], line 57 54 print('检查时间:',time.ctime()) 55 break ---> 57 parsePage(browser.page_source, authorName_list, likeNr_list, URL_list, userURL_list, num) 58 qbar.update(1) 60 if len(URL_list) < num: Cell In[11], line 17, in parsePage(html_content, authorName_list, likeNr_list, URL_list, userURL_list, num) 1 def parsePage(html_content, authorName_list, likeNr_list, URL_list, userURL_list, num): 2 """ 3 解析网页内容并更新数据列表。 4 (...) 15 None: 数据存储在传入的列表中 16 """ ---> 17 response = Selector(text=html_content) 18 divs = response.xpath('//div[contains(@class, "feeds-container")]/section/div')# 选中网页中包含笔记信息的部分 20 # 遍历divs获取每一篇笔记的信息 File d:\anaconda3\Lib\site-packages\scrapy\selector\unified.py:97, in Selector.__init__(self, response, text, type, root, **kwargs) 94 if root is not _NOT_SET: 95 kwargs["root"] = root ---> 97 super().__init__(text=text, type=st, **kwargs) File d:\anaconda3\Lib\site-packages\parsel\selector.py:496, in Selector.__init__(self, text, type, body, encoding, namespaces, root, base_url, _expr, huge_tree) 493 msg = f"text argument should be of type str, got {text.__class__}" 494 raise TypeError(msg) --> 496 root, type = _get_root_and_type_from_text( 497 text, 498 input_type=type, 499 base_url=base_url, 500 huge_tree=huge_tree, 501 ) 502 self.root = root 503 self.type = type File d:\anaconda3\Lib\site-packages\parsel\selector.py:377, in _get_root_and_type_from_text(text, input_type, **lxml_kwargs) 375 assert input_type in ("html", "xml", None) # nosec 376 type = _xml_or_html(input_type) --> 377 root = _get_root_from_text(text, type=type, **lxml_kwargs) 378 return root, type File d:\anaconda3\Lib\site-packages\parsel\selector.py:329, in _get_root_from_text(text, type, **lxml_kwargs) 326 def _get_root_from_text( 327 text: str, *, type: str, **lxml_kwargs: Any 328 ) -> etree._Element: --> 329 return create_root_node(text, _ctgroup[type]["_parser"], **lxml_kwargs) File d:\anaconda3\Lib\site-packages\parsel\selector.py:110, in create_root_node(text, parser_cls, base_url, huge_tree, body, encoding) 107 body = text.strip().replace("\x00", "").encode(encoding) or b"<html/>" 109 if huge_tree and LXML_SUPPORTS_HUGE_TREE: --> 110 parser = parser_cls(recover=True, encoding=encoding, huge_tree=True) 111 root = etree.fromstring(body, parser=parser, base_url=base_url) 112 else: File d:\anaconda3\Lib\site-packages\lxml\html\__init__.py:1887, in HTMLParser.__init__(self, **kwargs) 1886 def __init__(self, **kwargs): -> 1887 super().__init__(**kwargs) 1888 self.set_element_class_lookup(HtmlElementClassLookup()) File src\\lxml\\parser.pxi:1806, in lxml.etree.HTMLParser.__init__() File src\\lxml\\parser.pxi:858, in lxml.etree._BaseParser.__init__() LookupError: unknown encoding: 'b'utf8''
05-28
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值