1090. Largest Values From Labels

本文介绍了一种基于贪心算法的策略,用于从一组带有价值和标签的物品中选择最大价值的子集,同时考虑到数量限制和标签使用限制。通过四个实例演示了算法的应用,展示了如何在限制条件下优化子集价值。

We have a set of items: the i-th item has value values[i] and label labels[i].

Then, we choose a subset S of these items, such that:

  • |S| <= num_wanted
  • For every label L, the number of items in S with label L is <= use_limit.

Return the largest possible sum of the subset S.

 

Example 1:

Input: values = [5,4,3,2,1], labels = [1,1,2,2,3], num_wanted = 3, use_limit = 1
Output: 9
Explanation: The subset chosen is the first, third, and fifth item.

Example 2:

Input: values = [5,4,3,2,1], labels = [1,3,3,3,2], num_wanted = 3, use_limit = 2
Output: 12
Explanation: The subset chosen is the first, second, and third item.

Example 3:

Input: values = [9,8,8,7,6], labels = [0,0,0,1,1], num_wanted = 3, use_limit = 1
Output: 16
Explanation: The subset chosen is the first and fourth item.

Example 4:

Input: values = [9,8,8,7,6], labels = [0,0,0,1,1], num_wanted = 3, use_limit = 2
Output: 24
Explanation: The subset chosen is the first, second, and fourth item.

 

Note:

  1. 1 <= values.length == labels.length <= 20000
  2. 0 <= values[i], labels[i] <= 20000
  3. 1 <= num_wanted, use_limit <= values.length

思路:贪心

class Solution(object):
    def largestValsFromLabels(self, values, labels, num_wanted, use_limit):
        """
        :type values: List[int]
        :type labels: List[int]
        :type num_wanted: int
        :type use_limit: int
        :rtype: int
        """
        vls = [(v,l) for v,l in zip(values,labels)]
        vls.sort(key=lambda a:-a[0])
        
        res=0
        d={}
        i=0
        for _ in range(num_wanted):
            while i<len(vls) and d.get(vls[i][1],0)>=use_limit:
                i+=1
            if i==len(vls): break
            d[vls[i][1]]=d.get(vls[i][1],0)+1
            res+=vls[i][0]
            i+=1
        return res
    

 

import time import os import sys from media.sensor import * from media.display import * from media.media import * sensor = None try: print("camera_test") # 初始化摄像头 sensor = Sensor(width=640, height=640) sensor.reset() sensor.set_framesize(width=1280, height=720) sensor.set_pixformat(Sensor.RGB565) # 初始化显示 Display.init(Display.LT9611, to_ide=True) MediaManager.init() sensor.run() # 红色检测阈值 red_thresholds = [ (30, 100, 15, 127, 15, 127), (30, 100, -127, -15, -127, -15) ] # 方框定义 (x, y, w, h) boxes = { "B": (490, 0, 300, 720), # 蓝色方框 "R": (100, 210, 300, 300), # 红色方框 "Y": (880, 210, 300, 300) # 黄色方框 } # 颜色定义 colors = { "B": (0, 0, 255), "R": (255, 0, 0), "Y": (255, 255, 0) } clock = time.clock() while True: clock.tick() os.exitpoint() img = sensor.snapshot(chn=CAM_CHN_ID_0) # 1. 绘制方框 for name, box in boxes.items(): img.draw_rectangle(box[0], box[1], box[2], box[3], color=colors[name], thickness=2) # 2. 检测红色目标 detections = {name: False for name in boxes} blue_coord = None red_in_blue = None for name, box in boxes.items(): blobs = img.find_blobs( red_thresholds, False, box, x_stride=5, y_stride=5, pixels_threshold=1000, area_threshold=1000, merge=True, margin=10 ) if blobs: detections[name] = True if name == "B": # 蓝色方框 blob = blobs[0] blue_coord = (blob.cx(), blob.cy()) img.draw_cross(blue_coord[0], blue_coord[1], color=(0, 255, 255), size=8, thickness=2) # 仅在蓝色方框中查找红色目标 red_blobs = img.find_blobs( red_thresholds, False, box, x_stride=5, y_stride=5, pixels_threshold=100, # 降低阈值以便更容易检测 area_threshold=100, merge=True, margin=10 ) if red_blobs: # 找到最大的红色blob largest_blob = max(red_blobs, key=lambda b: b.pixels()) red_in_blue = (largest_blob.cx(), largest_blob.cy()) img.draw_cross(red_in_blue[0], red_in_blue[1], color=(255, 0, 0), size=8, thickness=2) # 在蓝色方框内显示红色目标坐标 img.draw_string_advanced( box[0] + 5, box[1] + 5, 10, f"Red: {red_in_blue[0]},{red_in_blue[1]}", color=(255, 0, 0), bg_color=(0, 0, 0) ) # 3. 计算并显示结果 total = sum(detections.values()) status = f"{total}" img.draw_string_advanced(10, 10, 10,status, color=(255, 255, 0), bg_color=(0, 0, 0)) if blue_coord: coord = f"{blue_coord[0]},{blue_coord[1]}" img.draw_string_advanced(10, 40, 10,coord, color=(0, 255, 255), bg_color=(0, 0, 0)) # 显示帧率 img.draw_string_advanced(550, 10,10, f"{clock.fps():.1f}", color=(255, 255, 0), bg_color=(0, 0, 0)) img.compressed_for_ide() Display.show_image(img) # 控制台输出 if red_in_blue: print(f"蓝色方框内红色目标坐标: ({red_in_blue[0]}, {red_in_blue[1]})") else: print("蓝色方框内未检测到红色目标") print(f"状态: {status}") except KeyboardInterrupt: print("程序已停止") except Exception as e: print(f"错误: {e}") finally: if sensor: sensor.stop() Display.deinit() MediaManager.deinit() import os import ujson from media.sensor import * from media.display import * from media.media import * from time import * import nncase_runtime as nn import ulab.numpy as np import time import utime import image import random import gc display_mode="lcd" if display_mode=="lcd": DISPLAY_WIDTH = ALIGN_UP(800, 16) DISPLAY_HEIGHT = 480 else: DISPLAY_WIDTH = ALIGN_UP(1920, 16) DISPLAY_HEIGHT = 1080 OUT_RGB888P_WIDTH = ALIGN_UP(1280, 16) OUT_RGB888P_HEIGH = 720 root_path="/sdcard/num_classify/" config_path=root_path+"deploy_config.json" deploy_conf={} debug_mode=1 class ScopedTiming: def __init__(self, info="", enable_profile=True): self.info = info self.enable_profile = enable_profile def __enter__(self): if self.enable_profile: self.start_time = time.time_ns() return self def __exit__(self, exc_type, exc_value, traceback): if self.enable_profile: elapsed_time = time.time_ns() - self.start_time print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") def read_deploy_config(config_path): # 打开JSON文件以进行读取deploy_config with open(config_path, 'r') as json_file: try: # 从文件中加载JSON数据 config = ujson.load(json_file) # 打印数据(可根据需要执行其他操作) #print(config) except ValueError as e: print("JSON 解析错误:", e) return config # 任务后处理 def softmax(x): exp_x = np.exp(x - np.max(x)) return exp_x / np.sum(exp_x) def sigmoid(x): return 1 / (1 + np.exp(-x)) def classification(): print("start") # 使用json读取内容初始化部署变量 deploy_conf=read_deploy_config(config_path) kmodel_name=deploy_conf["kmodel_path"] labels=deploy_conf["categories"] confidence_threshold=deploy_conf["confidence_threshold"] img_size=deploy_conf["img_size"] num_classes=deploy_conf["num_classes"] cls_idx=-1 score=0.0 # init kpu and load kmodel kpu = nn.kpu() ai2d = nn.ai2d() kpu.load_kmodel(root_path+kmodel_name) ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], [1,3,img_size[0],img_size[1]]) # 初始化并配置sensor sensor = Sensor() sensor.reset() # 设置镜像 sensor.set_hmirror(False) # 设置翻转 sensor.set_vflip(False) # 通道0直接给到显示VO,格式为YUV420 sensor.set_framesize(width = DISPLAY_WIDTH, height = DISPLAY_HEIGHT) sensor.set_pixformat(PIXEL_FORMAT_YUV_SEMIPLANAR_420) # 通道2给到AI做算法处理,格式为RGB888 sensor.set_framesize(width = OUT_RGB888P_WIDTH , height = OUT_RGB888P_HEIGH, chn=CAM_CHN_ID_2) sensor.set_pixformat(PIXEL_FORMAT_RGB_888_PLANAR, chn=CAM_CHN_ID_2) # 绑定通道0的输出到vo sensor_bind_info = sensor.bind_info(x = 0, y = 0, chn = CAM_CHN_ID_0) Display.bind_layer(**sensor_bind_info, layer = Display.LAYER_VIDEO1) if display_mode=="lcd": # 设置为ST7701显示,默认800x480 Display.init(Display.ST7701, to_ide = True) else: # 设置为LT9611显示,默认1920x1080 Display.init(Display.LT9611, to_ide = True) #创建OSD图像 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) try: # media初始化 MediaManager.init() # 启动sensor sensor.run() # init rgb888p_img = None ai2d_input_tensor = None data = np.ones((1,3,img_size[0],img_size[1]),dtype=np.uint8) ai2d_output_tensor = nn.from_numpy(data) while True: with ScopedTiming("total",debug_mode > 0): rgb888p_img = sensor.snapshot(chn=CAM_CHN_ID_2) if rgb888p_img == -1: print("capture_image failed") camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_2, rgb888p_img) continue # for rgb888planar if rgb888p_img.format() == image.RGBP888: ai2d_input = rgb888p_img.to_numpy_ref() ai2d_input_tensor = nn.from_numpy(ai2d_input) ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) # set input kpu.set_input_tensor(0, ai2d_output_tensor) # run kmodel kpu.run() # get output results = [] for i in range(kpu.outputs_size()): output_data = kpu.get_output_tensor(i) result = output_data.to_numpy() del output_data results.append(result) if num_classes>2: softmax_res=softmax(results[0][0]) cls_idx=np.argmax(softmax_res) if softmax_res[cls_idx]>confidence_threshold: score=softmax_res[cls_idx] print("classification result:") print(labels[cls_idx]) print("score",score) else: cls_idx=-1 score=0.0 else: sigmoid_res=sigmoid(results[0][0][0]) if sigmoid_res>confidence_threshold: cls_idx=1 score=sigmoid_res print("classification result:") print(labels[1]) print("score",score) else: cls_idx=0 score=1-sigmoid_res print("classification result:") print(labels[0]) print("score",score) osd_img.clear() if cls_idx>=0: osd_img.draw_string_advanced(5,5,32,"result:"+labels[cls_idx]+" score:"+str(score),color=(0,255,0)) Display.show_image(osd_img, 0, 0, Display.LAYER_OSD3) rgb888p_img = None gc.collect() #用于需要调用gc.mem_alloc()的内存 except Exception as e: print(f"An error occurred during buffer used: {e}") finally: os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) del ai2d_input_tensor del ai2d_output_tensor #停止摄像头输出 sensor.stop() #去初始化显示设备 Display.deinit() #释放媒体缓冲区 MediaManager.deinit() gc.collect() time.sleep(1) nn.shrink_memory_pool() print("end") return 0 if __name__=="__main__": classification() 基于k230,要求1:将现在的两个任务代码合并在一起,合成一个完整的代码,要求功能不变,尽量简洁。要求2:将控制台输出简化,仅保留:识别到的数字,红色目标检测的数量,蓝色方框内识别到的红色矩形的中心坐标。
07-11
import time import os import sys import ustruct import ustruct from machine import UART from machine import FPIOA import os,sys import time from machine import UART from media.sensor import * from media.display import * from media.media import * # 初始化FPIOA和UART fpioa = FPIOA() fpioa.set_function(3, FPIOA.UART1_TXD) fpioa.set_function(4, FPIOA.UART1_RXD) uart = UART(UART.UART1, baudrate=115200) def create_packet(x, y, z): # 创建10字节缓冲区 packet = bytearray(10) mv = memoryview(packet) # 帧头 (2字节) mv[0:2] = b'\x2C\x12' # 数据部分:3个16位整数 (6字节) ustruct.pack_into('>HHH', mv, 2, x, y, z) # 计算校验和 (索引2-7的异或值) checksum = 0 for i in range(2, 8): checksum ^= mv[i] mv[8] = checksum # 帧尾 (1字节) mv[9] = 0x5B return packet sensor = None try: print("camera_test") # 初始化摄像头 sensor = Sensor(width=640, height=640) sensor.reset() sensor.set_framesize(width=1280, height=720) sensor.set_pixformat(Sensor.RGB565) # 初始化显示 Display.init(Display.LT9611, to_ide=True) MediaManager.init() sensor.run() # 红色检测阈值 red_thresholds = [ (30, 100, 15, 127, 15, 127), (30, 100, -127, -15, -127, -15) ] # 方框定义 (x, y, w, h) boxes = { "B": (490, 0, 300, 720), # 蓝色方框 "R": (100, 210, 300, 300), # 红色方框 "Y": (880, 210, 300, 300) # 黄色方框 } # 颜色定义 colors = { "B": (0, 0, 255), "R": (255, 0, 0), "Y": (255, 255, 0) } clock = time.clock() while True: clock.tick() os.exitpoint() img = sensor.snapshot(chn=CAM_CHN_ID_0) # 1. 绘制方框 for name, box in boxes.items(): img.draw_rectangle(box[0], box[1], box[2], box[3], color=colors[name], thickness=2) # 2. 检测红色目标 detections = {name: False for name in boxes} blue_coord = None red_in_blue = None status_val = 0 # 初始化状态值 for name, box in boxes.items(): blobs = img.find_blobs( red_thresholds, False, box, x_stride=5, y_stride=5, pixels_threshold=1000, area_threshold=1000, merge=True, margin=10 ) if blobs: detections[name] = True if name == "B": # 蓝色方框 blob = blobs[0] blue_coord = (blob.cx(), blob.cy()) img.draw_cross(blue_coord[0], blue_coord[1], color=(0, 255, 255), size=8, thickness=2) # 在蓝色方框中查找红色目标 red_blobs = img.find_blobs( red_thresholds, False, box, x_stride=5, y_stride=5, pixels_threshold=100, area_threshold=100, merge=True, margin=10 ) if red_blobs: largest_blob = max(red_blobs, key=lambda b: b.pixels()) red_in_blue = (largest_blob.cx(), largest_blob.cy()) img.draw_cross(red_in_blue[0], red_in_blue[1], color=(255, 0, 0), size=8, thickness=2) img.draw_string_advanced( box[0] + 5, box[1] + 5, 10, f"Red: {red_in_blue[0]},{red_in_blue[1]}", color=(255, 0, 0), bg_color=(0, 0, 0) ) # 3. 计算状态值 total = sum(detections.values()) status_val = total # 保存为整数 status = f"{total}" # 显示用的字符串 # 4. 创建并发送数据包 if red_in_blue: x_val = red_in_blue[0] y_val = red_in_blue[1] else: # 未检测到时发送默认值 x_val, y_val = 0, 0 packet = create_packet(x_val, y_val, status_val) uart.write(packet) # 发送数据包 # 显示信息 img.draw_string_advanced(10, 10, 10, status, color=(255, 255, 0), bg_color=(0, 0, 0)) if blue_coord: coord = f"{blue_coord[0]},{blue_coord[1]}" img.draw_string_advanced(10, 40, 10, coord, color=(0, 255, 255), bg_color=(0, 0, 0)) img.draw_string_advanced(550, 10, 10, f"{clock.fps():.1f}", color=(255, 255, 0), bg_color=(0, 0, 0)) img.compressed_for_ide() Display.show_image(img) # 控制台输出 if red_in_blue: print(f"蓝色方框内红色目标坐标: ({red_in_blue[0]}, {red_in_blue[1]})") else: print("蓝色方框内未检测到红色目标") print(f"状态: {status_val}") except KeyboardInterrupt: print("程序已停止") except Exception as e: print(f"错误: {e}") finally: if sensor: sensor.stop() Display.deinit() MediaManager.deinit() import os import time import ujson import nncase_runtime as nn import ulab.numpy as np from media.sensor import * from media.display import * from media.media import * # 显示模式配置 display_mode = "lcd" # "lcd" 或 "hdmi" if display_mode == "lcd": DISPLAY_WIDTH = ALIGN_UP(800, 16) DISPLAY_HEIGHT = 480 else: DISPLAY_WIDTH = ALIGN_UP(1920, 16) DISPLAY_HEIGHT = 1080 # 图像尺寸配置 OUT_RGB888P_WIDTH = ALIGN_UP(1280, 16) OUT_RGB888P_HEIGHT = 720 NUM_CLASSIFY_PATH = "/sdcard/num_classify/" CONFIG_PATH = NUM_CLASSIFY_PATH + "deploy_config.json" # 红色检测配置 RED_THRESHOLDS = [ (30, 100, 15, 127, 15, 127), (30, 100, -127, -15, -127, -15) ] BOXES = { "B": (490, 0, 300, 720), # 蓝色方框 "R": (100, 210, 300, 300), # 红色方框 "Y": (880, 210, 300, 300) # 黄色方框 } COLORS = { "B": (0, 0, 255), "R": (255, 0, 0), "Y": (255, 255, 0) } class ScopedTiming: """性能计时工具类""" def __init__(self, info="", enable_profile=True): self.info = info self.enable_profile = enable_profile def __enter__(self): if self.enable_profile: self.start_time = time.time_ns() return self def __exit__(self, exc_type, exc_value, traceback): if self.enable_profile: elapsed_time = time.time_ns() - self.start_time print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") def read_deploy_config(config_path): """读取部署配置文件""" with open(config_path, 'r') as json_file: return ujson.load(json_file) def softmax(x): """Softmax函数""" exp_x = np.exp(x - np.max(x)) return exp_x / np.sum(exp_x) def sigmoid(x): """Sigmoid函数""" return 1 / (1 + np.exp(-x)) def main(): """主函数""" # 读取数字分类配置 deploy_conf = read_deploy_config(CONFIG_PATH) kmodel_name = deploy_conf["kmodel_path"] labels = deploy_conf["categories"] confidence_threshold = deploy_conf["confidence_threshold"] img_size = deploy_conf["img_size"] num_classes = deploy_conf["num_classes"] # 初始化KPU kpu = nn.kpu() kpu.load_kmodel(NUM_CLASSIFY_PATH + kmodel_name) # 初始化AI2D预处理 ai2d = nn.ai2d() ai2d.set_dtype( nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8 ) ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) ai2d_builder = ai2d.build([1, 3, OUT_RGB888P_HEIGHT, OUT_RGB888P_WIDTH], [1, 3, img_size[0], img_size[1]]) # 初始化传感器 sensor = Sensor() sensor.reset() sensor.set_hmirror(False) sensor.set_vflip(False) # 设置多路输出 sensor.set_framesize(width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT) # 通道0: 显示 sensor.set_pixformat(PIXEL_FORMAT_YUV_SEMIPLANAR_420, chn=CAM_CHN_ID_0) sensor.set_framesize(width=OUT_RGB888P_WIDTH, height=OUT_RGB888P_HEIGHT, chn=CAM_CHN_ID_1) # 通道1: 红色检测 sensor.set_pixformat(PIXEL_FORMAT_RGB_565, chn=CAM_CHN_ID_1) sensor.set_framesize(width=OUT_RGB888P_WIDTH, height=OUT_RGB888P_HEIGHT, chn=CAM_CHN_ID_2) # 通道2: 数字分类 sensor.set_pixformat(PIXEL_FORMAT_RGB_888_PLANAR, chn=CAM_CHN_ID_2) # 绑定显示 sensor_bind_info = sensor.bind_info(x=0, y=0, chn=CAM_CHN_ID_0) if display_mode == "lcd": Display.init(Display.ST7701, to_ide=True) else: Display.init(Display.LT9611, to_ide=True) Display.bind_layer(**sensor_bind_info, layer=Display.LAYER_VIDEO1) # 创建OSD层用于绘制结果 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 初始化媒体 MediaManager.init() sensor.run() # 初始化AI2D输入输出张量 data = np.ones((1, 3, img_size[0], img_size[1]), dtype=np.uint8) ai2d_output_tensor = nn.from_numpy(data) # 主循环 clock = time.clock() try: while True: clock.tick() os.exitpoint() # === 红色目标检测任务 === rgb565_img = sensor.snapshot(chn=CAM_CHN_ID_1) if rgb565_img == -1: continue detections = {name: False for name in BOXES} red_in_blue = None red_count = 0 for name, box in BOXES.items(): blobs = rgb565_img.find_blobs( RED_THRESHOLDS, False, box, x_stride=5, y_stride=5, pixels_threshold=1000, area_threshold=1000, merge=True, margin=10 ) if blobs: detections[name] = True red_count += 1 if name == "B": # 蓝色方框 blob = blobs[0] # 在蓝色方框中查找红色目标 red_blobs = rgb565_img.find_blobs( RED_THRESHOLDS, False, box, x_stride=5, y_stride=5, pixels_threshold=100, area_threshold=100, merge=True, margin=10 ) if red_blobs: largest_blob = max(red_blobs, key=lambda b: b.pixels()) red_in_blue = (largest_blob.cx(), largest_blob.cy()) # === 数字分类任务 === rgb888p_img = sensor.snapshot(chn=CAM_CHN_ID_2) if rgb888p_img == -1: continue cls_idx = -1 score = 0.0 digit_label = "N/A" if rgb888p_img.format() == image.RGBP888: ai2d_input = rgb888p_img.to_numpy_ref() ai2d_input_tensor = nn.from_numpy(ai2d_input) # 预处理并推理 ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) kpu.set_input_tensor(0, ai2d_output_tensor) kpu.run() # 获取输出 results = [] for i in range(kpu.outputs_size()): output_data = kpu.get_output_tensor(i) result = output_data.to_numpy() results.append(result) # 处理分类结果 if num_classes > 2: softmax_res = softmax(results[0][0]) cls_idx = np.argmax(softmax_res) if softmax_res[cls_idx] > confidence_threshold: score = softmax_res[cls_idx] digit_label = labels[cls_idx] else: sigmoid_res = sigmoid(results[0][0][0]) if sigmoid_res > confidence_threshold: cls_idx = 1 score = sigmoid_res digit_label = labels[1] else: cls_idx = 0 score = 1 - sigmoid_res digit_label = labels[0] # === 控制台输出(简化版) === print(f"识别数字: {digit_label}") print(f"红色目标数量: {red_count}") if red_in_blue: print(f"蓝色框内红色坐标: ({red_in_blue[0]}, {red_in_blue[1]})") else: print("蓝色框内红色坐标: 无") # === 屏幕显示 === osd_img.clear() # 绘制方框 for name, box in BOXES.items(): osd_img.draw_rectangle(box[0], box[1], box[2], box[3], color=COLORS[name], thickness=2) # 绘制红色目标坐标 if red_in_blue: osd_img.draw_cross(red_in_blue[0], red_in_blue[1], color=(255, 0, 0), size=8, thickness=2) osd_img.draw_string_advanced( BOXES["B"][0] + 5, BOXES["B"][1] + 5, 10, f"Red: {red_in_blue[0]},{red_in_blue[1]}", color=(255, 0, 0), bg_color=(0, 0, 0) ) # 显示红色目标数量 osd_img.draw_string_advanced(10, 10, 10, f"Red Count: {red_count}", color=(255, 255, 0), bg_color=(0, 0, 0)) # 显示数字分类结果 if cls_idx >= 0: osd_img.draw_string_advanced( 10, 40, 10, f"Digit: {digit_label} ({score:.2f})", color=(0, 255, 0), bg_color=(0, 0, 0) ) # 显示帧率 osd_img.draw_string_advanced(550, 10, 10, f"FPS: {clock.fps():.1f}", color=(255, 255, 0), bg_color=(0, 0, 0)) Display.show_image(osd_img, 0, 0, Display.LAYER_OSD3) except KeyboardInterrupt: print("程序已停止") except Exception as e: print(f"错误: {e}") finally: # 清理资源 sensor.stop() Display.deinit() MediaManager.deinit() del ai2d_output_tensor nn.shrink_memory_pool() print("资源已释放") if __name__ == "__main__": main() 将两个代码合成到一起,保证核心功能不变
最新发布
07-12
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值