append_path: command not found

本文解决了一个在切换用户时出现的问题:系统提示“append_path未找到”。通过检查发现这是由于/etc/profile文件更新时产生的冲突。文章详细介绍了如何通过合并/etc/profile与/etc/profile.pacnew文件来修复此问题。

切换用户时出现了这个问题,append_path未找到,应该是在/etc/profile中的一个函数,但为什么会报错呢?
我发现/etc/下有一个profile.pacnew文件
在这里插入图片描述
查了一下,发现是更新系统的时候,由于我之前修改了/etc/profile文件,导致/etc/profile不能直接升级,就生成了一个/etc/profile.pacnew文件,然后让你手动修改
https://wiki.archlinux.org/index.php/Pacman/Pacnew_and_Pacsave

在这里插入图片描述
只需要合并/etc/profile和/etc/profile.pacnew文件即可

mv /etc/profile /etc/profile.old
mv /etc/profile.pacnew /etc/profile

然后在把之前修改的东西加到/etc/profile中去就可以了


修复成功了!
在这里插入图片描述

def add_hashtree_footer(self, image_filename, partition_size, partition_name, generate_fec, fec_num_roots, hash_algorithm, block_size, salt, chain_partitions, algorithm_name, key_path, public_key_metadata_path, rollback_index, flags, rollback_index_location, props, props_from_file, kernel_cmdlines, setup_rootfs_from_kernel, setup_as_rootfs_from_kernel, include_descriptors_from_image, calc_max_image_size, signing_helper, signing_helper_with_files, release_string, append_to_release_string, output_vbmeta_image, do_not_append_vbmeta_image, print_required_libavb_version, use_persistent_root_digest, do_not_use_ab, no_hashtree, check_at_most_once): """Implements the 'add_hashtree_footer' command. See https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity for more information about dm-verity and these hashes. Arguments: image_filename: File to add the footer to. partition_size: Size of partition or 0 to put it right at the end. partition_name: Name of partition (without A/B suffix). generate_fec: If True, generate FEC codes. fec_num_roots: Number of roots for FEC. hash_algorithm: Hash algorithm to use. block_size: Block size to use. salt: Salt to use as a hexadecimal string or None to use /dev/urandom. chain_partitions: List of partitions to chain. algorithm_name: Name of algorithm to use. key_path: Path to key to use or None. public_key_metadata_path: Path to public key metadata or None. rollback_index: Rollback index. flags: Flags value to use in the image. rollback_index_location: Location of the main vbmeta rollback index. props: Properties to insert (List of strings of the form 'key:value'). props_from_file: Properties to insert (List of strings 'key:<path>'). kernel_cmdlines: Kernel cmdlines to insert (list of strings). setup_rootfs_from_kernel: None or file to generate dm-verity kernel cmdline from. setup_as_rootfs_from_kernel: If True, generate dm-verity kernel cmdline to set up rootfs. include_descriptors_from_image: List of file objects for which to insert descriptors from. calc_max_image_size: Don't store the hashtree or footer - instead calculate the maximum image size leaving enough room for hashtree and metadata with the given |partition_size|. signing_helper: Program which signs a hash and return signature. signing_helper_with_files: Same as signing_helper but uses files instead. release_string: None or avbtool release string. append_to_release_string: None or string to append. output_vbmeta_image: If not None, also write vbmeta struct to this file. do_not_append_vbmeta_image: If True, don't append vbmeta struct. print_required_libavb_version: True to only print required libavb version. use_persistent_root_digest: Use a persistent root digest on device. do_not_use_ab: The partition does not use A/B. no_hashtree: Do not append hashtree. Set size in descriptor as zero. check_at_most_once: Set to verify data blocks only the first time they are read from the data device. Raises: AvbError: If an argument is incorrect or adding the hashtree footer failed. """ required_libavb_version_minor = 0 if use_persistent_root_digest or do_not_use_ab or check_at_most_once: required_libavb_version_minor = 1 if rollback_index_location > 0: required_libavb_version_minor = 2 # If we're asked to calculate minimum required libavb version, we're done. if print_required_libavb_version: print('1.{}'.format(required_libavb_version_minor)) return digest_size = len(create_avb_hashtree_hasher(hash_algorithm, b'') .digest()) digest_padding = round_to_pow2(digest_size) - digest_size # If |partition_size| is given (e.g. not 0), calculate the maximum image # size such that an image this size + the hashtree + metadata (footer + # vbmeta struct) fits in |partition_size|. We use very conservative figures # for metadata. if partition_size > 0: max_tree_size = 0 max_fec_size = 0 if not no_hashtree: (_, max_tree_size) = calc_hash_level_offsets( partition_size, block_size, digest_size + digest_padding) if generate_fec: max_fec_size = calc_fec_data_size(partition_size, fec_num_roots) max_metadata_size = (max_fec_size + max_tree_size + self.MAX_VBMETA_SIZE + self.MAX_FOOTER_SIZE) max_image_size = partition_size - max_metadata_size else: max_image_size = 0 # If we're asked to only calculate the maximum image size, we're done. if calc_max_image_size: print('{}'.format(max_image_size)) return image = ImageHandler(image_filename) if partition_size > 0: if partition_size % image.block_size != 0: raise AvbError('Partition size of {} is not a multiple of the image ' 'block size {}.'.format(partition_size, image.block_size)) elif image.image_size % image.block_size != 0: raise AvbError('File size of {} is not a multiple of the image ' 'block size {}.'.format(image.image_size, image.block_size)) # If there's already a footer, truncate the image to its original # size. This way 'avbtool add_hashtree_footer' is idempotent # (modulo salts). if image.image_size >= AvbFooter.SIZE: image.seek(image.image_size - AvbFooter.SIZE) try: footer = AvbFooter(image.read(AvbFooter.SIZE)) # Existing footer found. Just truncate. original_image_size = footer.original_image_size image.truncate(footer.original_image_size) except (LookupError, struct.error): original_image_size = image.image_size else: # Image size is too small to possibly contain a footer. original_image_size = image.image_size # If anything goes wrong from here-on, restore the image back to # its original size. try: # Ensure image is multiple of block_size. rounded_image_size = round_to_multiple(image.image_size, block_size) if rounded_image_size > image.image_size: # If we need to round up the image size, it means the length of the # data to append is not a multiple of block size. # Setting multiple_block_size to false, so append_raw() will not # require it. image.append_raw(b'\0' * (rounded_image_size - image.image_size), multiple_block_size=False) # If image size exceeds the maximum image size, fail. if partition_size > 0: if image.image_size > max_image_size: raise AvbError('Image size of {} exceeds maximum image ' 'size of {} in order to fit in a partition ' 'size of {}.'.format(image.image_size, max_image_size, partition_size)) if salt: salt = binascii.unhexlify(salt) elif salt is None and not use_persistent_root_digest: # If salt is not explicitly specified, choose a hash that's the same # size as the hash size. Don't populate a random salt if this # descriptor is being created to use a persistent digest on device. hash_size = digest_size with open('/dev/urandom', 'rb') as f: salt = f.read(hash_size) else: salt = b'' # Hashes are stored upside down so we need to calculate hash # offsets in advance. (hash_level_offsets, tree_size) = calc_hash_level_offsets( image.image_size, block_size, digest_size + digest_padding) # If the image isn't sparse, its size might not be a multiple of # the block size. This will screw up padding later so just grow it. if image.image_size % image.block_size != 0: assert not image.is_sparse padding_needed = image.block_size - (image.image_size%image.block_size) image.truncate(image.image_size + padding_needed) # Generate the tree and add padding as needed. tree_offset = image.image_size root_digest, hash_tree = generate_hash_tree(image, image.image_size, block_size, hash_algorithm, salt, digest_padding, hash_level_offsets, tree_size) # Generate HashtreeDescriptor with details about the tree we # just generated. if no_hashtree: tree_size = 0 hash_tree = b'' ht_desc = AvbHashtreeDescriptor() ht_desc.dm_verity_version = 1 ht_desc.image_size = image.image_size ht_desc.tree_offset = tree_offset ht_desc.tree_size = tree_size ht_desc.data_block_size = block_size ht_desc.hash_block_size = block_size ht_desc.hash_algorithm = hash_algorithm ht_desc.partition_name = partition_name ht_desc.salt = salt if do_not_use_ab: ht_desc.flags |= AvbHashtreeDescriptor.FLAGS_DO_NOT_USE_AB if not use_persistent_root_digest: ht_desc.root_digest = root_digest if check_at_most_once: ht_desc.flags |= AvbHashtreeDescriptor.FLAGS_CHECK_AT_MOST_ONCE # Write the hash tree padding_needed = (round_to_multiple(len(hash_tree), image.block_size) - len(hash_tree)) hash_tree_with_padding = hash_tree + b'\0' * padding_needed #ifdef OPLUS_FEATURE_SECURITY_COMMON #Zhiwei.Qu@BSP.Security.Basic, 2023/02/03, skip appending tree chunk if the size is zero. if len(hash_tree_with_padding) > 0: image.append_raw(hash_tree_with_padding) #endif /*OPLUS_FEATURE_SECURITY_COMMON*/ len_hashtree_and_fec = len(hash_tree_with_padding) # Generate FEC codes, if requested. if generate_fec: if no_hashtree: fec_data = b'' else: fec_data = generate_fec_data(image_filename, fec_num_roots) padding_needed = (round_to_multiple(len(fec_data), image.block_size) - len(fec_data)) fec_data_with_padding = fec_data + b'\0' * padding_needed fec_offset = image.image_size image.append_raw(fec_data_with_padding) len_hashtree_and_fec += len(fec_data_with_padding) # Update the hashtree descriptor. ht_desc.fec_num_roots = fec_num_roots ht_desc.fec_offset = fec_offset ht_desc.fec_size = len(fec_data) ht_desc_to_setup = None if setup_as_rootfs_from_kernel: ht_desc_to_setup = ht_desc # Generate the VBMeta footer and add padding as needed. vbmeta_offset = tree_offset + len_hashtree_and_fec vbmeta_blob = self._generate_vbmeta_blob( algorithm_name, key_path, public_key_metadata_path, [ht_desc], chain_partitions, rollback_index, flags, rollback_index_location, props, props_from_file, kernel_cmdlines, setup_rootfs_from_kernel, ht_desc_to_setup, include_descriptors_from_image, signing_helper, signing_helper_with_files, release_string, append_to_release_string, required_libavb_version_minor) padding_needed = (round_to_multiple(len(vbmeta_blob), image.block_size) - len(vbmeta_blob)) vbmeta_blob_with_padding = vbmeta_blob + b'\0' * padding_needed # Write vbmeta blob, if requested. if output_vbmeta_image: output_vbmeta_image.write(vbmeta_blob) # Append vbmeta blob and footer, unless requested not to. if not do_not_append_vbmeta_image: image.append_raw(vbmeta_blob_with_padding) # Now insert a DONT_CARE chunk with enough bytes such that the # final Footer block is at the end of partition_size.. if partition_size > 0: image.append_dont_care(partition_size - image.image_size - 1 * image.block_size) # Generate the Footer that tells where the VBMeta footer # is. Also put enough padding in the front of the footer since # we'll write out an entire block. footer = AvbFooter() footer.original_image_size = original_image_size footer.vbmeta_offset = vbmeta_offset footer.vbmeta_size = len(vbmeta_blob) footer_blob = footer.encode() footer_blob_with_padding = ( b'\0' * (image.block_size - AvbFooter.SIZE) + footer_blob) image.append_raw(footer_blob_with_padding) except Exception as e: # Truncate back to original size, then re-raise. image.truncate(original_image_size) raise AvbError('Adding hashtree_footer failed: {}.'.format(e)) from e详细分析这个脚本
最新发布
10-17
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ 混合OCR识别模块 腾讯云二维码识别 + 阿里云文字识别 专门用于识别A-1、B-1、A-2、B-2标识 支持二维码优先识别功能 使用示例: from ocr_recognition import OCRRecognizer # 创建识别器实例 ocr = OCRRecognizer() # 识别图像 result = ocr.recognize_image('path/to/image.jpg') if result['success']: print(f"识别到标签: {result['detected_labels']}") else: print(f"识别失败: {result.get('error', '未知错误')}") # 配置二维码识别 ocr.set_qrcode_config(enable_qrcode=True, qrcode_priority=True) """ import os import cv2 import numpy as np import base64 import re from typing import List, Dict from PIL import Image import io import json # 阿里云SDK导入 from alibabacloud_ocr_api20210707.client import Client as OcrClient from alibabacloud_credentials.client import Client as CredentialClient from alibabacloud_tea_openapi import models as open_api_models from alibabacloud_ocr_api20210707 import models as ocr_models from alibabacloud_tea_util import models as util_models from alibabacloud_tea_util.client import Client as UtilClient # 腾讯云SDK导入 from tencentcloud.common import credential from tencentcloud.common.profile.client_profile import ClientProfile from tencentcloud.common.profile.http_profile import HttpProfile from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException from tencentcloud.ocr.v20181119 import ocr_client, models as tencent_models # -------- 腾讯云API配置 -------- TENCENT_SECRET_ID = "AKIDvX2NXUgf4LPH4kHpIcorrlbM2a8hZVM8" TENCENT_SECRET_KEY = "f6KLAmEk5DBwVsEPN8f4ampeNtFTdgmo" TENCENT_REGION = "ap-beijing" # 腾讯云区域 # -------- 阿里云API配置 -------- ALIYUN_ACCESS_KEY_ID = "LTAI5tSKNrm21thiWK3g7sjL" ALIYUN_ACCESS_KEY_SECRET = "MtVCmEhEDY9V0jaaLBH9ChZprERgre" ALIYUN_REGION = "cn-hangzhou" # 使用杭州区域 ALIYUN_ENDPOINT = f"ocr-api.{ALIYUN_REGION}.aliyuncs.com" # -------- 识别配置 -------- VALID_LABELS = ['A-1', 'A-2', 'B-1', 'B-2'] IMAGE_EXTS = ['.png', '.jpg', '.jpeg', '.bmp'] CONFIDENCE_THRESHOLD = 0.7 # 置信度阈值 # 二维码识别配置 ENABLE_QRCODE = True # 是否启用二维码识别 QRCODE_PRIORITY = True # 二维码优先识别 class OCRRecognizer: """ 混合OCR识别器 腾讯云二维码识别 + 阿里云文字识别 专门用于识别A-1、B-1、A-2、B-2标识 """ def __init__(self, enable_qrcode: bool = ENABLE_QRCODE, qrcode_priority: bool = QRCODE_PRIORITY): """ 初始化混合OCR客户端 Args: enable_qrcode: 是否启用二维码识别 qrcode_priority: 是否优先进行二维码识别 """ self.enable_qrcode = enable_qrcode self.qrcode_priority = qrcode_priority # 初始化阿里云OCR客户端 try: config = open_api_models.Config( access_key_id=ALIYUN_ACCESS_KEY_ID, access_key_secret=ALIYUN_ACCESS_KEY_SECRET ) config.endpoint = ALIYUN_ENDPOINT self.aliyun_client = OcrClient(config) print("阿里云OCR初始化成功") except Exception as e: print(f"阿里云OCR初始化失败: {e}") raise e # 初始化腾讯云OCR客户端 try: cred = credential.Credential(TENCENT_SECRET_ID, TENCENT_SECRET_KEY) httpProfile = HttpProfile() httpProfile.endpoint = "ocr.tencentcloudapi.com" clientProfile = ClientProfile() clientProfile.httpProfile = httpProfile self.tencent_client = ocr_client.OcrClient(cred, TENCENT_REGION, clientProfile) print("腾讯云OCR初始化成功") except Exception as e: print(f"腾讯云OCR初始化失败: {e}") raise e def preprocess_image(self, image_path: str) -> bytes: """ 预处理图像,转换为base64编码 Args: image_path: 图像文件路径 Returns: 图像的base64编码字节数据 """ try: with open(image_path, 'rb') as f: image_data = f.read() return image_data except Exception as e: print(f"图像预处理失败: {e}") return None def image_to_base64(self, image_path: str) -> str: """ 将图像转换为base64字符串 Args: image_path: 图像文件路径 Returns: base64编码的字符串 """ try: with open(image_path, 'rb') as f: image_data = f.read() return base64.b64encode(image_data).decode('utf-8') except Exception as e: print(f"图像base64转换失败: {e}") return None def recognize_text(self, image_path: str) -> List[Dict]: """ 使用阿里云OCR识别图像中的文字 Args: image_path: 图像文件路径 Returns: 识别结果列表 """ try: print(f"正在处理图像: {os.path.basename(image_path)}") # 读取图像二进制数据 image_data = self.preprocess_image(image_path) print("正在调用阿里云OCR API...") # 创建识别请求 request = ocr_models.RecognizeAllTextRequest() request.body = image_data request.type = "General" # 设置识别类型为通用文字识别 # 创建运行时选项 runtime = util_models.RuntimeOptions() # 调用API response = self.aliyun_client.recognize_all_text_with_options(request, runtime) print(f"阿里云OCR识别完成") # 解析结果 texts = [] if response.body and response.body.data: content = response.body.data.content if content: print(f"检测到 {len(content)} 个文字区域") for item in content: # 处理字符串格式的结果 if isinstance(item, str): text_info = { 'text': item, 'confidence': 1.0, 'location': { 'left': 0, 'top': 0, 'width': 100, 'height': 30 } } else: # 处理对象格式的结果 text_info = { 'text': getattr(item, 'text', '') if hasattr(item, 'text') else str(item), 'confidence': getattr(item, 'confidence', 1.0) if hasattr(item, 'confidence') else 1.0, 'location': { 'left': int(item.text_rectangles[0]) if hasattr(item, 'text_rectangles') and item.text_rectangles and len(item.text_rectangles) >= 1 else 0, 'top': int(item.text_rectangles[1]) if hasattr(item, 'text_rectangles') and item.text_rectangles and len(item.text_rectangles) >= 2 else 0, 'width': int(item.text_rectangles[2] - item.text_rectangles[0]) if hasattr(item, 'text_rectangles') and item.text_rectangles and len(item.text_rectangles) >= 3 else 100, 'height': int(item.text_rectangles[3] - item.text_rectangles[1]) if hasattr(item, 'text_rectangles') and item.text_rectangles and len(item.text_rectangles) >= 4 else 30 } } texts.append(text_info) print(f" 检测到文字: '{text_info['text']}' (置信度: {text_info['confidence']:.2f})") print(f"总共检测到 {len(texts)} 个文字区域") return texts except Exception as error: print(f"阿里云OCR识别失败: {error}") return [] def recognize_qrcode_with_tencent_api(self, image_path: str) -> List[Dict]: """ 使用腾讯云OCR API的QrcodeOCR接口识别二维码 Args: image_path: 图像文件路径 Returns: 二维码识别结果列表 """ try: print(f"正在使用腾讯云OCR API进行二维码识别: {os.path.basename(image_path)}") # 将图像转换为base64 image_base64 = self.image_to_base64(image_path) if not image_base64: return [] print("正在调用腾讯云QrcodeOCR API...") # 创建请求对象 req = tencent_models.QrcodeOCRRequest() # 设置请求参数 params = { "ImageBase64": image_base64 } req.from_json_string(json.dumps(params)) # 调用API resp = self.tencent_client.QrcodeOCR(req) print(f"腾讯云OCR二维码识别完成") # 解析二维码结果 qrcodes = [] if resp.CodeResults: print(f"检测到 {len(resp.CodeResults)} 个二维码/条形码") for code_result in resp.CodeResults: # 获取二维码内容 qr_text = getattr(code_result, 'Url', '') or getattr(code_result, 'Text', '') type_name = getattr(code_result, 'TypeName', 'UNKNOWN') if qr_text and qr_text.strip(): print(f"腾讯云API检测到二维码内容: {qr_text}") # 检查二维码内容是否包含目标标签 found_label = False for label in VALID_LABELS: if label in qr_text: found_label = True # 获取位置信息 left, top, width, height = 0, 0, 100, 100 if hasattr(code_result, 'Position') and code_result.Position: position = code_result.Position if hasattr(position, 'LeftTop') and hasattr(position, 'RightBottom'): left_top = position.LeftTop right_bottom = position.RightBottom if hasattr(left_top, 'X') and hasattr(left_top, 'Y'): left = int(left_top.X) top = int(left_top.Y) if hasattr(right_bottom, 'X') and hasattr(right_bottom, 'Y'): width = int(right_bottom.X - left) height = int(right_bottom.Y - top) qr_info = { 'text': qr_text, 'label': label, 'confidence': 1.0, # 腾讯云API识别置信度 'type': 'qrcode', 'method': f'腾讯云OCR API ({type_name})', 'location': { 'left': left, 'top': top, 'width': width, 'height': height } } qrcodes.append(qr_info) print(f"检测到二维码标签: '{label}' 内容: '{qr_text}' 类型: {type_name}") break if not found_label: print(f"二维码内容不包含目标标签 (A-1, A-2, B-1, B-2): '{qr_text}'") else: print("二维码数据为空或无效") else: print("腾讯云API未检测到二维码") print(f"腾讯云API总共检测到 {len(qrcodes)} 个有效二维码标签") return qrcodes except TencentCloudSDKException as error: print(f"腾讯云OCR二维码识别失败: {error}") return [] except Exception as error: print(f"腾讯云OCR二维码识别失败: {error}") return [] def recognize_qrcode(self, image_path: str) -> List[Dict]: """ 使用腾讯云OCR API进行二维码识别 Args: image_path: 图像文件路径 Returns: 二维码识别结果列表 """ try: print(f"正在进行腾讯云OCR二维码识别: {os.path.basename(image_path)}") # 直接使用腾讯云OCR API进行二维码识别 print("使用腾讯云OCR API (QrcodeOCR) 进行识别...") qrcodes = self.recognize_qrcode_with_tencent_api(image_path) if qrcodes: print("腾讯云OCR二维码识别成功!") return qrcodes else: print("腾讯云OCR二维码识别未找到有效标签") return [] except Exception as error: print(f"二维码识别失败: {error}") return [] def set_qrcode_config(self, enable_qrcode: bool = True, qrcode_priority: bool = True): """ 设置二维码识别配置 Args: enable_qrcode: 是否启用二维码识别 qrcode_priority: 是否优先进行二维码识别 """ self.enable_qrcode = enable_qrcode self.qrcode_priority = qrcode_priority print(f"二维码识别配置已更新: 启用={enable_qrcode}, 优先={qrcode_priority}") def normalize_text(self, text: str) -> str: """ 标准化文本,用于提高匹配准确率 Args: text: 原始文本 Returns: 标准化后的文本 """ if not text: return '' # 移除空格和特殊字符 text = re.sub(r'[\s\-_]', '', text) # 转换为大写 text = text.upper() # 处理常见的OCR错误 text = text.replace('O', '0') # 字母O替换为数字0 text = text.replace('I', '1') # 字母I替换为数字1 text = text.replace('L', '1') # 字母L替换为数字1 # 处理中文字符的OCR错误 text = text.replace('口', '0') # 中文"口"替换为数字0 text = text.replace('一', '1') # 中文"一"替换为数字1 return text def combine_consecutive_chars(self, ocr_results: List[Dict]) -> List[str]: """ 组合连续的字符,用于处理被分割的标签 Args: ocr_results: OCR识别结果列表 Returns: 组合后的字符串列表 """ if not ocr_results: return [] # 按位置排序(从左到右,从上到下) sorted_results = sorted(ocr_results, key=lambda x: (x['location']['top'], x['location']['left'])) # 提取所有文字 all_texts = [item['text'] for item in sorted_results] # 组合连续字符 combinations = [] # 单个字符 for text in all_texts: combinations.append(text) # 两个连续字符的组合 for i in range(len(all_texts) - 1): combinations.append(all_texts[i] + all_texts[i + 1]) # 三个连续字符的组合 for i in range(len(all_texts) - 2): combinations.append(all_texts[i] + all_texts[i + 1] + all_texts[i + 2]) # 四个连续字符的组合 for i in range(len(all_texts) - 3): combinations.append(all_texts[i] + all_texts[i + 1] + all_texts[i + 2] + all_texts[i + 3]) # 完整文本序列 full_text = ''.join(all_texts) combinations.append(full_text) return combinations def match_target_labels(self, ocr_results: List[Dict]) -> List[Dict]: """ 匹配目标标签 Args: ocr_results: OCR识别结果列表 Returns: 匹配到的标签列表 """ matched_labels = [] print("\n开始匹配目标标签...") # 直接匹配单个文字区域 for result in ocr_results: original_text = result['text'] normalized_text = self.normalize_text(original_text) print(f" 原文: '{original_text}' -> 标准化: '{normalized_text}'") for label in VALID_LABELS: normalized_label = self.normalize_text(label) if normalized_label == normalized_text: matched_labels.append({ 'label': label, 'original_text': original_text, 'confidence': result['confidence'], 'location': result['location'], 'type': 'text' }) print(f" 匹配成功: {original_text} -> {label}") break # 如果没有直接匹配,尝试连续字符组合匹配 if not matched_labels: print("\n尝试连续字符组合匹配...") combinations = self.combine_consecutive_chars(ocr_results) for combination in combinations: normalized_combination = self.normalize_text(combination) print(f"完整文本序列: '{combination}' -> 标准化: '{normalized_combination}'") for label in VALID_LABELS: normalized_label = self.normalize_text(label) if normalized_label in normalized_combination: # 计算平均位置和置信度 avg_confidence = sum(r['confidence'] for r in ocr_results) / len(ocr_results) avg_location = { 'left': int(sum(r['location']['left'] for r in ocr_results) / len(ocr_results)), 'top': int(sum(r['location']['top'] for r in ocr_results) / len(ocr_results)), 'width': int(sum(r['location']['width'] for r in ocr_results) / len(ocr_results)), 'height': int(sum(r['location']['height'] for r in ocr_results) / len(ocr_results)) } matched_labels.append({ 'label': label, 'original_text': combination, 'confidence': avg_confidence, 'location': avg_location, 'type': 'text' }) print(f" 组合匹配成功: {combination} -> {label}") break if matched_labels: break return matched_labels def recognize_image(self, image_path: str, debug: bool = False) -> Dict: """ 识别单张图像中的目标标签 Args: image_path: 图像文件路径 debug: 是否启用调试模式 Returns: 识别结果字典 """ print(f"\n{'='*60}") print(f"开始识别图像: {image_path}") print(f"{'='*60}") # 检查文件是否存在 if not os.path.exists(image_path): # 尝试添加扩展名 found = False for ext in IMAGE_EXTS: test_path = image_path + ext if os.path.exists(test_path): image_path = test_path found = True break if not found: return {'error': f'图像文件不存在: {image_path}'} try: matched_labels = [] recognition_method = "文字识别" # 优先进行二维码识别 if self.enable_qrcode and self.qrcode_priority: print("\n优先进行二维码识别...") qr_results = self.recognize_qrcode(image_path) if qr_results: # 将二维码结果转换为匹配标签格式 for qr_info in qr_results: matched_labels.append({ 'label': qr_info['label'], 'original_text': qr_info['text'], 'confidence': qr_info['confidence'], 'location': qr_info['location'], 'type': 'qrcode' }) recognition_method = "二维码识别" print(f"二维码识别成功,找到 {len(matched_labels)} 个标签,跳过文字识别") else: print("二维码识别未找到有效标签,继续进行文字识别...") # 如果二维码识别未找到结果,或者未启用二维码识别,则进行文字识别 if not matched_labels: print("\n开始文字识别...") # 执行OCR识别 ocr_results = self.recognize_text(image_path) if not ocr_results: return {'error': '未检测到任何文字'} # 匹配目标标签 matched_labels = self.match_target_labels(ocr_results) recognition_method = "文字识别" print(f"\n匹配结果: 找到 {len(matched_labels)} 个目标标签") # 构建结果 result = { 'image_path': image_path, 'ocr_service': '混合OCR (腾讯云二维码 + 阿里云文字)', 'recognition_method': recognition_method, 'total_text_regions': len(ocr_results) if 'ocr_results' in locals() else 0, 'matched_labels': matched_labels, 'detected_labels': [match['label'] for match in matched_labels], 'success': len(matched_labels) > 0 } # 打印最终结果 print(f"\n{'='*60}") print(f"识别方法: {recognition_method}") if matched_labels: print("识别成功!检测到以下标签:") for match in matched_labels: method_type = "(二维码)" if match.get('type') == 'qrcode' else "(文字)" print(f"{match['label']} {method_type}: '{match['original_text']}' (置信度: {match['confidence']:.2f})") else: print("未识别到目标标签 (A-1, A-2, B-1, B-2)") print(f"{'='*60}") return result except Exception as e: error_msg = f"识别过程出错: {e}" print(f"{error_msg}") return {'error': error_msg} def find_image_file(image_path: str) -> str: """ 查找图像文件,支持自动添加扩展名 Args: image_path: 图像路径(可能不包含扩展名) Returns: 完整的图像文件路径,如果找不到则返回None """ # 如果文件已存在,直接返回 if os.path.exists(image_path): return image_path # 尝试添加不同的扩展名 for ext in IMAGE_EXTS: test_path = image_path + ext if os.path.exists(test_path): return test_path return None # 便捷函数 def recognize_image(image_path: str, enable_qrcode: bool = True, qrcode_priority: bool = True) -> Dict: """ 便捷的图像识别函数 Args: image_path: 图像文件路径 enable_qrcode: 是否启用二维码识别 qrcode_priority: 是否优先进行二维码识别 Returns: 识别结果字典 """ ocr = OCRRecognizer(enable_qrcode=enable_qrcode, qrcode_priority=qrcode_priority) return ocr.recognize_image(image_path) def recognize_text_only(image_path: str) -> Dict: """ 仅使用文字识别的便捷函数 Args: image_path: 图像文件路径 Returns: 识别结果字典 """ ocr = OCRRecognizer(enable_qrcode=False, qrcode_priority=False) return ocr.recognize_image(image_path) def recognize_qrcode_only(image_path: str) -> Dict: """ 仅使用二维码识别的便捷函数 Args: image_path: 图像文件路径 Returns: 识别结果字典 """ ocr = OCRRecognizer(enable_qrcode=True, qrcode_priority=True) # 临时禁用文字识别回退 original_method = ocr.recognize_image def qrcode_only_recognize(image_path: str, debug: bool = False) -> Dict: print(f"\n{'='*60}") print(f"开始二维码识别: {image_path}") print(f"{'='*60}") if not os.path.exists(image_path): for ext in IMAGE_EXTS: test_path = image_path + ext if os.path.exists(test_path): image_path = test_path break else: return {'error': f'图像文件不存在: {image_path}'} try: qr_results = ocr.recognize_qrcode(image_path) matched_labels = [] if qr_results: for qr_info in qr_results: matched_labels.append({ 'label': qr_info['label'], 'original_text': qr_info['text'], 'confidence': qr_info['confidence'], 'location': qr_info['location'], 'type': 'qrcode' }) result = { 'image_path': image_path, 'ocr_service': '腾讯云二维码识别', 'recognition_method': '二维码识别', 'total_text_regions': 0, 'matched_labels': matched_labels, 'detected_labels': [match['label'] for match in matched_labels], 'success': len(matched_labels) > 0 } print(f"\n{'='*60}") if matched_labels: print("二维码识别成功!检测到以下标签:") for match in matched_labels: print(f"{match['label']} (二维码): '{match['original_text']}' (置信度: {match['confidence']:.2f})") else: print("未识别到目标标签 (A-1, A-2, B-1, B-2)") print(f"{'='*60}") return result except Exception as e: error_msg = f"二维码识别过程出错: {e}" print(f"{error_msg}") return {'error': error_msg} return qrcode_only_recognize(image_path)这是能够正常配置 二维码识别以及文字识别的代码,将里面的二维码识别更换成我下面这个import sys import time import lcm import cv2 from threading import Thread, Lock import rclpy from rclpy.node import Node from sensor_msgs.msg import Image from cv_bridge import CvBridge from pyzbar.pyzbar import decode from robot_control_cmd_lcmt import robot_control_cmd_lcmt from robot_control_response_lcmt import robot_control_response_lcmt class RobotControl: """机器人控制类,通过 LCM 与机器人进行命令和状态交互""" def __init__(self): # LCM 通信通道 self.lc_recv = lcm.LCM("udpm://239.255.76.67:7670?ttl=255") self.lc_send = lcm.LCM("udpm://239.255.76.67:7671?ttl=255") self.cmd_msg = robot_control_cmd_lcmt() self.rec_msg = robot_control_response_lcmt() self.send_lock = Lock() self.running = True self.life_count = 0 self.mode_ok = 0 # 接收与发送线程 self.thread_recv = Thread(target=self._receive_response, daemon=True) self.thread_send = Thread(target=self._send_publish, daemon=True) self.thread_recv.start() self.thread_send.start() def _receive_response(self): """接收机器人状态""" self.lc_recv.subscribe("robot_control_response", self._msg_handler) while self.running: self.lc_recv.handle() time.sleep(0.002) def _msg_handler(self, channel, data): """解析收到的 LCM 数据""" self.rec_msg = robot_control_response_lcmt().decode(data) if getattr(self.rec_msg, 'order_process_bar', 0) >= 95: self.mode_ok = self.rec_msg.mode def _send_publish(self): """周期性发送指令""" delay_counter = 0 while self.running: with self.send_lock: if delay_counter > 20: self.lc_send.publish("robot_control_cmd", self.cmd_msg.encode()) delay_counter = 0 delay_counter += 1 time.sleep(0.005) def send_cmd(self, msg): """安全地更新并发送命令""" with self.send_lock: self.life_count += 1 msg.life_count = self.life_count % 128 self.cmd_msg = msg def stand_up(self): """让机器人站立""" print("指令:机器人开始站立") stop_msg = robot_control_cmd_lcmt() stop_msg.mode = 7 stop_msg.gait_id = 0 self.send_cmd(stop_msg) time.sleep(0.5) msg = robot_control_cmd_lcmt() msg.mode = 12 msg.gait_id = 0 self.send_cmd(msg) start_time = time.time() while time.time() - start_time < 5: if self.mode_ok == 12: print("状态:机器人站立成功") return True time.sleep(0.1) print("警告:站立超时") return False def stop(self): """让机器人停止""" msg = robot_control_cmd_lcmt() msg.mode = 7 msg.gait_id = 1 self.send_cmd(msg) print("指令:停止机器人") def forward(self, speed=0.3): """让机器人前进""" msg = robot_control_cmd_lcmt() msg.mode = 11 msg.gait_id = 26 msg.vel_des = [speed, 0, 0] msg.duration = 0 self.send_cmd(msg) print(f"指令:前进,速度 {speed}") def quit(self): """退出控制""" print("退出机器人控制") self.running = False self.stop() if self.thread_recv.is_alive(): self.thread_recv.join(timeout=1.0) if self.thread_send.is_alive(): self.thread_send.join(timeout=1.0) print("机器人控制已退出") class QRCodeNode(Node): """ROS2 节点:摄像头识别二维码并控制机器人""" def __init__(self, robot_controller: RobotControl): super().__init__('qr_code_node') self.robot_controller = robot_controller self.bridge = CvBridge() self.qr_detected = False from rclpy.qos import QoSProfile qos_profile = QoSProfile(depth=10) self.create_subscription(Image, '/image', self.image_callback, qos_profile) cv2.namedWindow("Camera Feed", cv2.WINDOW_NORMAL) cv2.resizeWindow("Camera Feed", 640, 480) self.get_logger().info("二维码检测节点已启动") def image_callback(self, msg): """接收图像并进行二维码识别""" try: cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8") except Exception as e: self.get_logger().error(f"图像转换失败: {e}") return # 图像增强(提高对比度) enhanced = cv2.convertScaleAbs(cv_image, alpha=2.0, beta=50) gray = cv2.cvtColor(enhanced, cv2.COLOR_BGR2GRAY) decoded_objs = decode(gray) if decoded_objs and not self.qr_detected: qr_data = decoded_objs[0].data.decode('utf-8') rect = decoded_objs[0].rect self.get_logger().info(f"检测到二维码: {qr_data}") self.qr_detected = True cv2.rectangle(cv_image, (rect.left, rect.top), (rect.left + rect.width, rect.top + rect.height), (0, 255, 0), 2) cv2.putText(cv_image, qr_data, (rect.left, rect.top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) self.handle_qr_command(qr_data) cv2.imshow("Camera Feed", cv_image) cv2.waitKey(1) def handle_qr_command(self, content): """根据二维码内容执行命令""" self.robot_controller.forward(speed=0.3) self.get_logger().info("执行二维码指令:前进") def main(): rclpy.init() robot_ctrl = RobotControl() if not robot_ctrl.stand_up(): print("机器人站立失败,退出程序") robot_ctrl.quit() sys.exit(1) qr_node = QRCodeNode(robot_ctrl) try: rclpy.spin(qr_node) except KeyboardInterrupt: print("程序被用户终止") finally: qr_node.destroy_node() robot_ctrl.quit() cv2.destroyAllWindows() rclpy.shutdown() if __name__ == "__main__": main() 将这个二维码识别替换上面那个用腾讯云识别的,保留上述阿里云识别文字的
08-17
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值