Red packet【二分】

本文介绍了一个有趣的红包分配问题:如何确保成为最幸运的人。通过输入总金额、人数等参数,算法计算出最少需要获得多少金额才能确保自己在分配中是最幸运的那个。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Red packet
Time Limit:1000MS     Memory Limit:65535KB     64bit IO Format:

Description

New Year is coming! Our big boss Wine93 will distribute some “Red Package”, just like Alipay and Wechat.

Wine93 has m yuan, he decides to distribute them to n people and everyone can get some money(0 yuan is not allowed and everyone’s money is an integer), Now k people has gotten money, it’s your turn to get “Red Package”, you want to know, at least how much money to give you, then you can must become the “lucky man”. and the m yuan must be used out.

Noting that if someone’s money is strictly much than others’, than he is “lucky man”.


Input

Input starts with an integer T (T <= 50) denoting the number of test case. 
For each test case, three integers n, m, k (1 <= k < n <= 100000, 0< m <= 100000000) will be given. 
Next line contains k integers, denoting the money that k people get. You can assume that the k integers’ summation is no more than m. 

Output

Ouput the least money that you need to become the “lucky man”, if it is impossible, output “Impossible” (no quote). 

Sample Input

3
3 5 2
2 1
4 10 2
2 3
4 15 2
3 5

Sample Output

Impossible
4
6


 以下纯属渣渣胡扯  大神勿喷

#include<cstdio>   
#include<algorithm>   
using namespace std;
int main()  
{  
    int t,n,m,k,i,j,a;  
    scanf("%d",&t);  
    while(t--)  
    {  
        int sum=0;  
        int mm=0;  
        scanf("%d %d %d",&n,&m,&k);  
        for(i=0;i<k;i++)  
        {  
            scanf("%d",&a);  
            sum+=a;  
            mm=max(mm,a);  
        }
        int ss=m-sum-(n-k-1);  // 能得到的最大钱数 (让后面的每人都分 1 元) 
        int ma=mm+1;    // 至少要得到的钱数 
        if(ss<=mm)    
            printf("Impossible\n");  
        else  
        {  
            int left=ma,right=ss;
            int mid,ans=0;  
            while(left<=right)  
            {  
                mid=(left+right)/2;
                if(mid>ss-mid+1)   // 除前面的 k 人外,要比 剩下的所有人中 钱最多的那个人 多 ,之前ss是让后面每个人都得 1 元,这里让ss加 1 ,是为了保证让后面有一个人钱数不是 1 元,这样才有可比性,要不然后面全都是 1 元,得到的就不是最佳答案了
				{
					ans=mid;
                    right=mid-1;  
				}
				else
					left=mid+1;
            }  
            printf("%d\n",ans);  
        }  
    }  
    return 0;  
} 


import os import time import ujson import ustruct import nncase_runtime as nn import ulab.numpy as np from machine import UART, FPIOA from media.sensor import * from media.display import * from media.media import * # 显示模式配置 display_mode = "lcd" # "lcd" 或 "hdmi" if display_mode == "lcd": DISPLAY_WIDTH = ALIGN_UP(800, 16) DISPLAY_HEIGHT = 480 else: DISPLAY_WIDTH = ALIGN_UP(1920, 16) DISPLAY_HEIGHT = 1080 # 图像尺寸配置 OUT_RGB888P_WIDTH = ALIGN_UP(1280, 16) OUT_RGB888P_HEIGHT = 720 NUM_CLASSIFY_PATH = "/sdcard/num_classify/" CONFIG_PATH = NUM_CLASSIFY_PATH + "deploy_config.json" # 红色检测配置 RED_THRESHOLDS = [ (30, 100, 15, 127, 15, 127), (30, 100, -127, -15, -127, -15) ] BOXES = { "B": (490, 0, 300, 720), # 蓝色方框 "R": (100, 210, 300, 300), # 红色方框 "Y": (880, 210, 300, 300) # 黄色方框 } COLORS = { "B": (0, 0, 255), "R": (255, 0, 0), "Y": (255, 255, 0) } # 初始化UART fpioa = FPIOA() fpioa.set_function(3, FPIOA.UART1_TXD) fpioa.set_function(4, FPIOA.UART1_RXD) uart = UART(UART.UART1, baudrate=115200) def create_packet(x, y, z, n): """创建UART数据包""" packet = bytearray(12) mv = memoryview(packet) # 使用内存视图避免复制 # 修复1: 正确的帧头赋值方式 mv[0] = 0x2C # 使用十进制值赋值 mv[1] = 0x12 # 修复2: 正确处理数字标签 try: digit_value = int(n) # 尝试转换为整数 except (TypeError, ValueError): digit_value = 0 # 转换失败时使用默认值 # 数据部分:4个16位整数 (8字节) ustruct.pack_into('>HHHH', mv, 2, x, y, z, digit_value) # 计算校验 (索引2-9的异或值) checksum = 0 for i in range(2, 10): checksum ^= mv[i] mv[10] = checksum # 帧尾 (1字节) mv[11] = 0x5B return packet class ScopedTiming: """性能计时工具类""" def __init__(self, info="", enable_profile=True): self.info = info self.enable_profile = enable_profile def __enter__(self): if self.enable_profile: self.start_time = time.time_ns() return self def __exit__(self, exc_type, exc_value, traceback): if self.enable_profile: elapsed_time = time.time_ns() - self.start_time print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") def read_deploy_config(config_path): """读取部署配置文件""" with open(config_path, 'r') as json_file: return ujson.load(json_file) def softmax(x): """Softmax函数""" exp_x = np.exp(x - np.max(x)) return exp_x / np.sum(exp_x) def sigmoid(x): """Sigmoid函数""" return 1 / (1 + np.exp(-x)) def main(): """主函数""" # 读取数字分类配置 deploy_conf = read_deploy_config(CONFIG_PATH) kmodel_name = deploy_conf["kmodel_path"] labels = deploy_conf["categories"] confidence_threshold = deploy_conf["confidence_threshold"] img_size = deploy_conf["img_size"] num_classes = deploy_conf["num_classes"] # 初始化KPU kpu = nn.kpu() kpu.load_kmodel(NUM_CLASSIFY_PATH + kmodel_name) # 打印模型输入形状用于调试 try: input_shape = kpu.get_input_shape(0) print(f"模型输入形状: {input_shape}") except Exception as e: print(f"获取模型输入形状失败: {e}") # 初始化AI2D预处理 ai2d = nn.ai2d() ai2d.set_dtype( nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8 ) ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) # 修复3: 确保AI2D输出形状与模型输入匹配 ai2d_output_shape = (1, 3, img_size[0], img_size[1]) ai2d_builder = ai2d.build([1, 3, OUT_RGB888P_HEIGHT, OUT_RGB888P_WIDTH], ai2d_output_shape) # 初始化传感器 sensor = Sensor() sensor.reset() sensor.set_hmirror(False) sensor.set_vflip(False) # 设置多路输出 sensor.set_framesize(width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT) # 通道0: 显示 sensor.set_pixformat(PIXEL_FORMAT_YUV_SEMIPLANAR_420, chn=CAM_CHN_ID_0) sensor.set_framesize(width=OUT_RGB888P_WIDTH, height=OUT_RGB888P_HEIGHT, chn=CAM_CHN_ID_1) # 通道1: 红色检测 sensor.set_pixformat(PIXEL_FORMAT_RGB_565, chn=CAM_CHN_ID_1) sensor.set_framesize(width=OUT_RGB888P_WIDTH, height=OUT_RGB888P_HEIGHT, chn=CAM_CHN_ID_2) # 通道2: 数字分类 sensor.set_pixformat(PIXEL_FORMAT_RGB_888_PLANAR, chn=CAM_CHN_ID_2) # 绑定显示 sensor_bind_info = sensor.bind_info(x=0, y=0, chn=CAM_CHN_ID_0) if display_mode == "lcd": Display.init(Display.ST7701, to_ide=True) else: Display.init(Display.LT9611, to_ide=True) Display.bind_layer(**sensor_bind_info, layer=Display.LAYER_VIDEO1) # 创建OSD层用于绘制结果 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 初始化媒体 MediaManager.init() sensor.run() # 初始化AI2D输入输出张量 data = np.ones(ai2d_output_shape, dtype=np.uint8) ai2d_output_tensor = nn.from_numpy(data) # 添数据包发送间隔控制变量 last_send_time = 0 # 上次发送时间戳 SEND_INTERVAL = 0.1 # 0.1秒发送间隔 # 主循环 clock = time.clock() try: while True: clock.tick() os.exitpoint() # === 红色目标检测任务 === rgb565_img = sensor.snapshot(chn=CAM_CHN_ID_1) if rgb565_img == -1: print("获取通道1图像失败") continue detections = {name: False for name in BOXES} red_in_blue = None # 确保在循环外部初始化 red_count = 0 red_blobs = [] # 初始化红色目标列表 for name, box in BOXES.items(): blobs = rgb565_img.find_blobs( RED_THRESHOLDS, False, box, x_stride=5, y_stride=5, pixels_threshold=1000, area_threshold=1000, merge=True, margin=10 ) if blobs: detections[name] = True red_count += 1 if name == "B": # 蓝色方框 # 在蓝色方框中查找红色目标 red_blobs = rgb565_img.find_blobs( RED_THRESHOLDS, False, box, x_stride=5, y_stride=5, pixels_threshold=100, area_threshold=100, merge=True, margin=10 ) # 处理红色目标检测结果 if red_blobs: # 检查列表是否非空 largest_blob = max(red_blobs, key=lambda b: b.pixels()) red_in_blue = (largest_blob.cx(), largest_blob.cy()) else: red_in_blue = None # === 数字分类任务 === rgb888p_img = sensor.snapshot(chn=CAM_CHN_ID_2) if rgb888p_img == -1: print("获取通道2图像失败") continue cls_idx = -1 score = 0.0 digit_label = "N/A" if rgb888p_img.format() == image.RGBP888: try: ai2d_input = rgb888p_img.to_numpy_ref() ai2d_input_tensor = nn.from_numpy(ai2d_input) # 预处理并推理 ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) kpu.set_input_tensor(0, ai2d_output_tensor) kpu.run() # 获取输出 results = [] for i in range(kpu.outputs_size()): output_data = kpu.get_output_tensor(i) result = output_data.to_numpy() results.append(result) # 处理分类结果 if num_classes > 2: softmax_res = softmax(results[0][0]) cls_idx = np.argmax(softmax_res) if softmax_res[cls_idx] > confidence_threshold: score = softmax_res[cls_idx] digit_label = labels[cls_idx] else: sigmoid_res = sigmoid(results[0][0][0]) if sigmoid_res > confidence_threshold: cls_idx = 1 score = sigmoid_res digit_label = labels[1] else: cls_idx = 0 score = 1 - sigmoid_res digit_label = labels[0] except Exception as e: print(f"数字分类处理失败: {e}") # === UART数据包发送 === current_time = time.time() # 检查是否达到发送间隔 if current_time - last_send_time >= SEND_INTERVAL: try: if red_in_blue: x_val = red_in_blue[0] y_val = red_in_blue[1] else: x_val, y_val = 0, 0 packet = create_packet(x_val, y_val, red_count, digit_label) uart.write(packet) # 发送数据包 last_send_time = current_time # 更新最后发送时间 print(f"发送数据包: {packet}") # 打印发送日志 except Exception as e: print(f"发送数据包失败: {e}") # === 屏幕显示 === osd_img.clear() # 绘制方框 for name, box in BOXES.items(): osd_img.draw_rectangle(box[0], box[1], box[2], box[3], color=COLORS[name], thickness=2) # 绘制红色目标坐标 if red_in_blue: osd_img.draw_cross(red_in_blue[0], red_in_blue[1], color=(255, 0, 0), size=8, thickness=2) osd_img.draw_string_advanced( BOXES["B"][0] + 5, BOXES["B"][1] + 5, 10, f"Red: {red_in_blue[0]},{red_in_blue[1]}", color=(255, 0, 0), bg_color=(0, 0, 0) ) # 显示红色目标数量 osd_img.draw_string_advanced(10, 10, 10, f"Red Count: {red_count}", color=(255, 255, 0), bg_color=(0, 0, 0)) # 显示数字分类结果 if cls_idx >= 0: osd_img.draw_string_advanced( 10, 40, 10, f"Digit: {digit_label} ({score:.2f})", color=(0, 255, 0), bg_color=(0, 0, 0) ) # 显示帧率 osd_img.draw_string_advanced(550, 10, 10, f"FPS: {clock.fps():.1f}", color=(255, 255, 0), bg_color=(0, 0, 0)) Display.show_image(osd_img, 0, 0, Display.LAYER_OSD3) # === 控制台输出 === print(f"识别数字: {digit_label}") print(f"红色目标数量: {red_count}") if red_in_blue: print(f"蓝色框内红色坐标: ({red_in_blue[0]}, {red_in_blue[1]})") else: print("蓝色框内红色坐标: 无") except KeyboardInterrupt: print("程序已停止") except Exception as e: print(f"主循环错误: {e}") finally: # 清理资源 try: sensor.stop() except Exception as e: print(f"停止传感器失败: {e}") try: Display.deinit() except Exception as e: print(f"释放显示资源失败: {e}") try: MediaManager.deinit() except Exception as e: print(f"释放媒体资源失败: {e}") try: del ai2d_output_tensor nn.shrink_memory_pool() except Exception as e: print(f"释放AI资源失败: {e}") print("资源已释放") if __name__ == "__main__": main() 分析代码功能
07-15
import os import time import ujson import ustruct import nncase_runtime as nn import ulab.numpy as np from machine import UART, FPIOA from machine import Pin from machine import FPIOA import time from media.sensor import * from media.display import * from media.media import * fpioa = FPIOA() fpioa.set_function(53,FPIOA.GPIO53) button = Pin(53, Pin.IN, Pin.PULL_DOWN) # 使用下拉电阻 # 显示模式配置 display_mode = "lcd" # "lcd" 或 "hdmi" if display_mode == "lcd": DISPLAY_WIDTH = ALIGN_UP(800, 16) DISPLAY_HEIGHT = 480 else: DISPLAY_WIDTH = ALIGN_UP(1920, 16) DISPLAY_HEIGHT = 1080 # 图像尺寸配置 OUT_RGB888P_WIDTH = ALIGN_UP(1280, 16) OUT_RGB888P_HEIGHT = 720 NUM_CLASSIFY_PATH = "/sdcard/num_classify/" CONFIG_PATH = NUM_CLASSIFY_PATH + "deploy_config.json" # 红色检测配置 RED_THRESHOLDS = [ (30, 100, 15, 127, 15, 127), (30, 100, -127, -15, -127, -15) ] BOXES = { "B": (490, 0, 300, 720), # 蓝色方框 "R": (100, 210, 300, 300), # 红色方框 "Y": (880, 210, 300, 300) # 黄色方框 } COLORS = { "B": (0, 0, 255), "R": (255, 0, 0), "Y": (255, 255, 0) } # 初始化UART fpioa = FPIOA() fpioa.set_function(3, FPIOA.UART1_TXD) fpioa.set_function(4, FPIOA.UART1_RXD) uart = UART(UART.UART1, baudrate=115200) def create_packet(x, y, z, n): """创建UART数据包""" packet = bytearray(12) mv = memoryview(packet) # 使用内存视图避免复制 # 修复1: 正确的帧头赋值方式 mv[0] = 0x2C # 使用十进制值赋值 mv[1] = 0x12 # 修复2: 正确处理数字标签 try: digit_value = int(n) # 尝试转换为整数 except (TypeError, ValueError): digit_value = 0 # 转换失败时使用默认值 # 数据部分:4个16位整数 (8字节) ustruct.pack_into('>HHHH', mv, 2, x, y, z, digit_value) # 计算校验 (索引2-9的异或值) checksum = 0 for i in range(2, 10): checksum ^= mv[i] mv[10] = checksum # 帧尾 (1字节) mv[11] = 0x5B return packet class ScopedTiming: """性能计时工具类""" def __init__(self, info="", enable_profile=True): self.info = info self.enable_profile = enable_profile def __enter__(self): if self.enable_profile: self.start_time = time.time_ns() return self def __exit__(self, exc_type, exc_value, traceback): if self.enable_profile: elapsed_time = time.time_ns() - self.start_time print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") def read_deploy_config(config_path): """读取部署配置文件""" with open(config_path, 'r') as json_file: return ujson.load(json_file) def softmax(x): """Softmax函数""" exp_x = np.exp(x - np.max(x)) return exp_x / np.sum(exp_x) def sigmoid(x): """Sigmoid函数""" return 1 / (1 + np.exp(-x)) def main(): """主函数""" # 读取数字分类配置 deploy_conf = read_deploy_config(CONFIG_PATH) kmodel_name = deploy_conf["kmodel_path"] labels = deploy_conf["categories"] confidence_threshold = deploy_conf["confidence_threshold"] img_size = deploy_conf["img_size"] num_classes = deploy_conf["num_classes"] # 初始化KPU kpu = nn.kpu() kpu.load_kmodel(NUM_CLASSIFY_PATH + kmodel_name) # 打印模型输入形状用于调试 try: input_shape = kpu.get_input_shape(0) print(f"模型输入形状: {input_shape}") except Exception as e: print(f"获取模型输入形状失败: {e}") # 初始化AI2D预处理 ai2d = nn.ai2d() ai2d.set_dtype( nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8 ) ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) # 修复3: 确保AI2D输出形状与模型输入匹配 ai2d_output_shape = (1, 3, img_size[0], img_size[1]) ai2d_builder = ai2d.build([1, 3, OUT_RGB888P_HEIGHT, OUT_RGB888P_WIDTH], ai2d_output_shape) # 初始化传感器 sensor = Sensor() sensor.reset() sensor.set_hmirror(False) sensor.set_vflip(False) # 设置多路输出 sensor.set_framesize(width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT) # 通道0: 显示 sensor.set_pixformat(PIXEL_FORMAT_YUV_SEMIPLANAR_420, chn=CAM_CHN_ID_0) sensor.set_framesize(width=OUT_RGB888P_WIDTH, height=OUT_RGB888P_HEIGHT, chn=CAM_CHN_ID_1) # 通道1: 红色检测 sensor.set_pixformat(PIXEL_FORMAT_RGB_565, chn=CAM_CHN_ID_1) sensor.set_framesize(width=OUT_RGB888P_WIDTH, height=OUT_RGB888P_HEIGHT, chn=CAM_CHN_ID_2) # 通道2: 数字分类 sensor.set_pixformat(PIXEL_FORMAT_RGB_888_PLANAR, chn=CAM_CHN_ID_2) # 绑定显示 sensor_bind_info = sensor.bind_info(x=0, y=0, chn=CAM_CHN_ID_0) if display_mode == "lcd": Display.init(Display.ST7701, to_ide=True) else: Display.init(Display.LT9611, to_ide=True) Display.bind_layer(**sensor_bind_info, layer=Display.LAYER_VIDEO1) # 创建OSD层用于绘制结果 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 初始化媒体 MediaManager.init() sensor.run() # 初始化AI2D输入输出张量 data = np.ones(ai2d_output_shape, dtype=np.uint8) ai2d_output_tensor = nn.from_numpy(data) # 添数据包发送间隔控制变量 last_send_time = 0 # 上次发送时间戳 SEND_INTERVAL = 0.1 # 0.1秒发送间隔 # 主循环 clock = time.clock() try: while True: clock.tick() os.exitpoint() # === 红色目标检测任务 === rgb565_img = sensor.snapshot(chn=CAM_CHN_ID_1) if rgb565_img == -1: print("获取通道1图像失败") continue detections = {name: False for name in BOXES} red_in_blue = None # 确保在循环外部初始化 red_count = 0 red_blobs = [] # 初始化红色目标列表 for name, box in BOXES.items(): blobs = rgb565_img.find_blobs( RED_THRESHOLDS, False, box, x_stride=5, y_stride=5, pixels_threshold=1000, area_threshold=1000, merge=True, margin=10 ) if blobs: detections[name] = True red_count += 1 if name == "B": # 蓝色方框 # 在蓝色方框中查找红色目标 red_blobs = rgb565_img.find_blobs( RED_THRESHOLDS, False, box, x_stride=5, y_stride=5, pixels_threshold=100, area_threshold=100, merge=True, margin=10 ) # 处理红色目标检测结果 if red_blobs: # 检查列表是否非空 largest_blob = max(red_blobs, key=lambda b: b.pixels()) red_in_blue = (largest_blob.cx(), largest_blob.cy()) else: red_in_blue = None # === 数字分类任务 === if button.value() == 1: rgb888p_img = sensor.snapshot(chn=CAM_CHN_ID_2) if rgb888p_img == -1: print("获取通道2图像失败") continue cls_idx = -1 score = 0.0 digit_label = "N/A" if rgb888p_img.format() == image.RGBP888: try: ai2d_input = rgb888p_img.to_numpy_ref() ai2d_input_tensor = nn.from_numpy(ai2d_input) # 预处理并推理 ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) kpu.set_input_tensor(0, ai2d_output_tensor) kpu.run() # 获取输出 results = [] for i in range(kpu.outputs_size()): output_data = kpu.get_output_tensor(i) result = output_data.to_numpy() results.append(result) # 处理分类结果 if num_classes > 2: softmax_res = softmax(results[0][0]) cls_idx = np.argmax(softmax_res) if softmax_res[cls_idx] > confidence_threshold: score = softmax_res[cls_idx] digit_label = labels[cls_idx] else: sigmoid_res = sigmoid(results[0][0][0]) if sigmoid_res > confidence_threshold: cls_idx = 1 score = sigmoid_res digit_label = labels[1] else: cls_idx = 0 score = 1 - sigmoid_res digit_label = labels[0] except Exception as e: print(f"数字分类处理失败: {e}") # === UART数据包发送 === current_time = time.time() # 检查是否达到发送间隔 if current_time - last_send_time >= SEND_INTERVAL: try: if red_in_blue: x_val = red_in_blue[0] y_val = red_in_blue[1] else: x_val, y_val = 0, 0 packet = create_packet(x_val, y_val, red_count, digit_label) uart.write(packet) # 发送数据包 last_send_time = current_time # 更新最后发送时间 print(f"发送数据包: {packet}") # 打印发送日志 except Exception as e: print(f"发送数据包失败: {e}") # === 屏幕显示 === osd_img.clear() # 绘制方框 for name, box in BOXES.items(): osd_img.draw_rectangle(box[0], box[1], box[2], box[3], color=COLORS[name], thickness=2) # 绘制红色目标坐标 if red_in_blue: osd_img.draw_cross(red_in_blue[0], red_in_blue[1], color=(255, 0, 0), size=8, thickness=2) osd_img.draw_string_advanced( BOXES["B"][0] + 5, BOXES["B"][1] + 5, 10, f"Red: {red_in_blue[0]},{red_in_blue[1]}", color=(255, 0, 0), bg_color=(0, 0, 0) ) # 显示红色目标数量 osd_img.draw_string_advanced(10, 10, 10, f"Red Count: {red_count}", color=(255, 255, 0), bg_color=(0, 0, 0)) # 显示数字分类结果 if cls_idx >= 0: osd_img.draw_string_advanced( 10, 40, 10, f"Digit: {digit_label} ({score:.2f})", color=(0, 255, 0), bg_color=(0, 0, 0) ) # 显示帧率 osd_img.draw_string_advanced(550, 10, 10, f"FPS: {clock.fps():.1f}", color=(255, 255, 0), bg_color=(0, 0, 0)) Display.show_image(osd_img, 0, 0, Display.LAYER_OSD3) # === 控制台输出 === print(f"识别数字: {digit_label}") print(f"红色目标数量: {red_count}") if red_in_blue: print(f"蓝色框内红色坐标: ({red_in_blue[0]}, {red_in_blue[1]})") else: print("蓝色框内红色坐标: 无") except KeyboardInterrupt: print("程序已停止") except Exception as e: print(f"主循环错误: {e}") finally: # 清理资源 try: sensor.stop() except Exception as e: print(f"停止传感器失败: {e}") try: Display.deinit() except Exception as e: print(f"释放显示资源失败: {e}") try: MediaManager.deinit() except Exception as e: print(f"释放媒体资源失败: {e}") try: del ai2d_output_tensor nn.shrink_memory_pool() except Exception as e: print(f"释放AI资源失败: {e}") print("资源已释放") if __name__ == "__main__": main() 修改部分代码,只有当 fpioa.set_function(53,FPIOA.GPIO53)=1时,才进行数字识别, fpioa.set_function(53,FPIOA.GPIO53)=0时, digit_label=0
07-16
from maix import camera, display, image, nn, app, comm, uart, pinmap, time import struct, os #定义全局变量 devices = [] # 设置UART1引脚映射 pinmap.set_pin_function("A18", "UART1_RX") # 设置UART1接收引脚 pinmap.set_pin_function("A19", "UART1_TX") # 设置UART1发送引脚 # 初始化串口通信 device = "/dev/ttyS1" # UART1设备路径 uart1 = uart.UART(device, 115200) # 配置UART1,波特率115200 report_on = True APP_CMD_DETECT_RES = 0x02 def encode_objs(objs): ''' encode objs info to bytes body for protocol 2B x(LE) + 2B y(LE) + 2B w(LE) + 2B h(LE) + 2B idx + 4B score(float) ... ''' body = b'' for obj in objs: body += struct.pack("<hhHHHf", obj.x, obj.y, obj.w, obj.h, obj.class_id, obj.score) return body def build_packet(packet_id): packet = bytearray(28) packet[0] = 0x40 packet[1] = packet_id packet[27] = 0x5D return packet model_path = "model_222554.mud" if not os.path.exists(model_path): model_path = "/root/视觉_01/model_222554.mud" detector = nn.YOLOv5(model=model_path) cam = camera.Camera(detector.input_width(), detector.input_height(), detector.input_format()) dis = display.Display() p = comm.CommProtocol(buff_size = 1024) # ... 前面的代码保持不变 ... # ... 前面的导入初始化代码保持不变 ... while not app.need_exit(): img = cam.read() objs = detector.detect(img, conf_th=0.5, iou_th=0.45) # 添标志位避免重复发送 signal_sent = False if len(objs) > 0 and report_on: body = encode_objs(objs) p.report(APP_CMD_DETECT_RES, body) for obj in objs: img.draw_rect(obj.x, obj.y, obj.w, obj.h, color=image.COLOR_RED) msg = f'{detector.labels[obj.class_id]}: {obj.score:.2f}' img.draw_string(obj.x, obj.y, msg, color=image.COLOR_RED) object_name = detector.labels[obj.class_id] object_score = obj.score print(object_name, object_score) # 仅当分数足够高且未发送信号时才处理 if object_score > 0.85 and not signal_sent: signal_value = None # 修正条件判断:使用in运算符检查列表 if object_name in ["tudoukuai"]: signal_value = 1 elif object_name in ["bailuobo"]: signal_value = 2 elif object_name in ["huluobo"]: signal_value = 3 elif object_name in ["zhibei"]: signal_value = 4 elif object_name in ["eluanshi"]: signal_value = 5 elif object_name in ["cipian"]: signal_value = 6 elif object_name in ["wuhaodianchi"]: signal_value = 7 elif object_name in ["yihaodianchi"]: signal_value = 8 elif object_name in ["erhaodianchi"]: signal_value = 9 elif object_name in ["qihaodianchi"]: signal_value = 14 elif object_name in ["xiaotudou"]: signal_value = 15 elif object_name in ["suliaoping"]: # 名称统一为suliaoping signal_value = 16 elif object_name in ["yaobaozhuang"]: signal_value = 0x11 elif object_name in ["yaowan"]: signal_value = 18 elif object_name in ["zhituan"]: signal_value = 19 elif object_name in ["zhuankuai"]: # 名称修改为zhuankuai signal_value = 20 if signal_value is not None: # 构建结构化数据包 packet = bytearray(28) packet[0] = 0x40 # 包头 packet[1] = signal_value # 信号值(0-3) #packet[2] = obj.class_id # 类别ID packet[27] = 0x5D # 包尾 #bytes(packet) uart1.write(bytes(packet)) print(f"检测到 {object_name}, 发送信号: {signal_value}, 数据包: {packet.hex()}") signal_sent = True # 标记已发送 time.sleep(0.1) # 减少延时 dis.show(img) 这个程序为什么识别到yaobaozhuang后发送的数据仅有@],怎么解决
最新发布
08-03
import time import os import sys import ustruct import ustruct from machine import UART from machine import FPIOA import os,sys import time from machine import UART from media.sensor import * from media.display import * from media.media import * # 初始化FPIOAUART fpioa = FPIOA() fpioa.set_function(3, FPIOA.UART1_TXD) fpioa.set_function(4, FPIOA.UART1_RXD) uart = UART(UART.UART1, baudrate=115200) def create_packet(x, y, z): # 创建10字节缓冲区 packet = bytearray(10) mv = memoryview(packet) # 帧头 (2字节) mv[0:2] = b'\x2C\x12' # 数据部分:3个16位整数 (6字节) ustruct.pack_into('>HHH', mv, 2, x, y, z) # 计算校验 (索引2-7的异或值) checksum = 0 for i in range(2, 8): checksum ^= mv[i] mv[8] = checksum # 帧尾 (1字节) mv[9] = 0x5B return packet sensor = None try: print("camera_test") # 初始化摄像头 sensor = Sensor(width=640, height=640) sensor.reset() sensor.set_framesize(width=1280, height=720) sensor.set_pixformat(Sensor.RGB565) # 初始化显示 Display.init(Display.LT9611, to_ide=True) MediaManager.init() sensor.run() # 红色检测阈值 red_thresholds = [ (30, 100, 15, 127, 15, 127), (30, 100, -127, -15, -127, -15) ] # 方框定义 (x, y, w, h) boxes = { "B": (490, 0, 300, 720), # 蓝色方框 "R": (100, 210, 300, 300), # 红色方框 "Y": (880, 210, 300, 300) # 黄色方框 } # 颜色定义 colors = { "B": (0, 0, 255), "R": (255, 0, 0), "Y": (255, 255, 0) } clock = time.clock() while True: clock.tick() os.exitpoint() img = sensor.snapshot(chn=CAM_CHN_ID_0) # 1. 绘制方框 for name, box in boxes.items(): img.draw_rectangle(box[0], box[1], box[2], box[3], color=colors[name], thickness=2) # 2. 检测红色目标 detections = {name: False for name in boxes} blue_coord = None red_in_blue = None status_val = 0 # 初始化状态值 for name, box in boxes.items(): blobs = img.find_blobs( red_thresholds, False, box, x_stride=5, y_stride=5, pixels_threshold=1000, area_threshold=1000, merge=True, margin=10 ) if blobs: detections[name] = True if name == "B": # 蓝色方框 blob = blobs[0] blue_coord = (blob.cx(), blob.cy()) img.draw_cross(blue_coord[0], blue_coord[1], color=(0, 255, 255), size=8, thickness=2) # 在蓝色方框中查找红色目标 red_blobs = img.find_blobs( red_thresholds, False, box, x_stride=5, y_stride=5, pixels_threshold=100, area_threshold=100, merge=True, margin=10 ) if red_blobs: largest_blob = max(red_blobs, key=lambda b: b.pixels()) red_in_blue = (largest_blob.cx(), largest_blob.cy()) img.draw_cross(red_in_blue[0], red_in_blue[1], color=(255, 0, 0), size=8, thickness=2) img.draw_string_advanced( box[0] + 5, box[1] + 5, 10, f"Red: {red_in_blue[0]},{red_in_blue[1]}", color=(255, 0, 0), bg_color=(0, 0, 0) ) # 3. 计算状态值 total = sum(detections.values()) status_val = total # 保存为整数 status = f"{total}" # 显示用的字符串 # 4. 创建并发送数据包 if red_in_blue: x_val = red_in_blue[0] y_val = red_in_blue[1] else: # 未检测到时发送默认值 x_val, y_val = 0, 0 packet = create_packet(x_val, y_val, status_val) uart.write(packet) # 发送数据包 # 显示信息 img.draw_string_advanced(10, 10, 10, status, color=(255, 255, 0), bg_color=(0, 0, 0)) if blue_coord: coord = f"{blue_coord[0]},{blue_coord[1]}" img.draw_string_advanced(10, 40, 10, coord, color=(0, 255, 255), bg_color=(0, 0, 0)) img.draw_string_advanced(550, 10, 10, f"{clock.fps():.1f}", color=(255, 255, 0), bg_color=(0, 0, 0)) img.compressed_for_ide() Display.show_image(img) # 控制台输出 if red_in_blue: print(f"蓝色方框内红色目标坐标: ({red_in_blue[0]}, {red_in_blue[1]})") else: print("蓝色方框内未检测到红色目标") print(f"状态: {status_val}") except KeyboardInterrupt: print("程序已停止") except Exception as e: print(f"错误: {e}") finally: if sensor: sensor.stop() Display.deinit() MediaManager.deinit() import os import time import ujson import nncase_runtime as nn import ulab.numpy as np from media.sensor import * from media.display import * from media.media import * # 显示模式配置 display_mode = "lcd" # "lcd" 或 "hdmi" if display_mode == "lcd": DISPLAY_WIDTH = ALIGN_UP(800, 16) DISPLAY_HEIGHT = 480 else: DISPLAY_WIDTH = ALIGN_UP(1920, 16) DISPLAY_HEIGHT = 1080 # 图像尺寸配置 OUT_RGB888P_WIDTH = ALIGN_UP(1280, 16) OUT_RGB888P_HEIGHT = 720 NUM_CLASSIFY_PATH = "/sdcard/num_classify/" CONFIG_PATH = NUM_CLASSIFY_PATH + "deploy_config.json" # 红色检测配置 RED_THRESHOLDS = [ (30, 100, 15, 127, 15, 127), (30, 100, -127, -15, -127, -15) ] BOXES = { "B": (490, 0, 300, 720), # 蓝色方框 "R": (100, 210, 300, 300), # 红色方框 "Y": (880, 210, 300, 300) # 黄色方框 } COLORS = { "B": (0, 0, 255), "R": (255, 0, 0), "Y": (255, 255, 0) } class ScopedTiming: """性能计时工具类""" def __init__(self, info="", enable_profile=True): self.info = info self.enable_profile = enable_profile def __enter__(self): if self.enable_profile: self.start_time = time.time_ns() return self def __exit__(self, exc_type, exc_value, traceback): if self.enable_profile: elapsed_time = time.time_ns() - self.start_time print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") def read_deploy_config(config_path): """读取部署配置文件""" with open(config_path, 'r') as json_file: return ujson.load(json_file) def softmax(x): """Softmax函数""" exp_x = np.exp(x - np.max(x)) return exp_x / np.sum(exp_x) def sigmoid(x): """Sigmoid函数""" return 1 / (1 + np.exp(-x)) def main(): """主函数""" # 读取数字分类配置 deploy_conf = read_deploy_config(CONFIG_PATH) kmodel_name = deploy_conf["kmodel_path"] labels = deploy_conf["categories"] confidence_threshold = deploy_conf["confidence_threshold"] img_size = deploy_conf["img_size"] num_classes = deploy_conf["num_classes"] # 初始化KPU kpu = nn.kpu() kpu.load_kmodel(NUM_CLASSIFY_PATH + kmodel_name) # 初始化AI2D预处理 ai2d = nn.ai2d() ai2d.set_dtype( nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8 ) ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) ai2d_builder = ai2d.build([1, 3, OUT_RGB888P_HEIGHT, OUT_RGB888P_WIDTH], [1, 3, img_size[0], img_size[1]]) # 初始化传感器 sensor = Sensor() sensor.reset() sensor.set_hmirror(False) sensor.set_vflip(False) # 设置多路输出 sensor.set_framesize(width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT) # 通道0: 显示 sensor.set_pixformat(PIXEL_FORMAT_YUV_SEMIPLANAR_420, chn=CAM_CHN_ID_0) sensor.set_framesize(width=OUT_RGB888P_WIDTH, height=OUT_RGB888P_HEIGHT, chn=CAM_CHN_ID_1) # 通道1: 红色检测 sensor.set_pixformat(PIXEL_FORMAT_RGB_565, chn=CAM_CHN_ID_1) sensor.set_framesize(width=OUT_RGB888P_WIDTH, height=OUT_RGB888P_HEIGHT, chn=CAM_CHN_ID_2) # 通道2: 数字分类 sensor.set_pixformat(PIXEL_FORMAT_RGB_888_PLANAR, chn=CAM_CHN_ID_2) # 绑定显示 sensor_bind_info = sensor.bind_info(x=0, y=0, chn=CAM_CHN_ID_0) if display_mode == "lcd": Display.init(Display.ST7701, to_ide=True) else: Display.init(Display.LT9611, to_ide=True) Display.bind_layer(**sensor_bind_info, layer=Display.LAYER_VIDEO1) # 创建OSD层用于绘制结果 osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) # 初始化媒体 MediaManager.init() sensor.run() # 初始化AI2D输入输出张量 data = np.ones((1, 3, img_size[0], img_size[1]), dtype=np.uint8) ai2d_output_tensor = nn.from_numpy(data) # 主循环 clock = time.clock() try: while True: clock.tick() os.exitpoint() # === 红色目标检测任务 === rgb565_img = sensor.snapshot(chn=CAM_CHN_ID_1) if rgb565_img == -1: continue detections = {name: False for name in BOXES} red_in_blue = None red_count = 0 for name, box in BOXES.items(): blobs = rgb565_img.find_blobs( RED_THRESHOLDS, False, box, x_stride=5, y_stride=5, pixels_threshold=1000, area_threshold=1000, merge=True, margin=10 ) if blobs: detections[name] = True red_count += 1 if name == "B": # 蓝色方框 blob = blobs[0] # 在蓝色方框中查找红色目标 red_blobs = rgb565_img.find_blobs( RED_THRESHOLDS, False, box, x_stride=5, y_stride=5, pixels_threshold=100, area_threshold=100, merge=True, margin=10 ) if red_blobs: largest_blob = max(red_blobs, key=lambda b: b.pixels()) red_in_blue = (largest_blob.cx(), largest_blob.cy()) # === 数字分类任务 === rgb888p_img = sensor.snapshot(chn=CAM_CHN_ID_2) if rgb888p_img == -1: continue cls_idx = -1 score = 0.0 digit_label = "N/A" if rgb888p_img.format() == image.RGBP888: ai2d_input = rgb888p_img.to_numpy_ref() ai2d_input_tensor = nn.from_numpy(ai2d_input) # 预处理并推理 ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) kpu.set_input_tensor(0, ai2d_output_tensor) kpu.run() # 获取输出 results = [] for i in range(kpu.outputs_size()): output_data = kpu.get_output_tensor(i) result = output_data.to_numpy() results.append(result) # 处理分类结果 if num_classes > 2: softmax_res = softmax(results[0][0]) cls_idx = np.argmax(softmax_res) if softmax_res[cls_idx] > confidence_threshold: score = softmax_res[cls_idx] digit_label = labels[cls_idx] else: sigmoid_res = sigmoid(results[0][0][0]) if sigmoid_res > confidence_threshold: cls_idx = 1 score = sigmoid_res digit_label = labels[1] else: cls_idx = 0 score = 1 - sigmoid_res digit_label = labels[0] # === 控制台输出(简化版) === print(f"识别数字: {digit_label}") print(f"红色目标数量: {red_count}") if red_in_blue: print(f"蓝色框内红色坐标: ({red_in_blue[0]}, {red_in_blue[1]})") else: print("蓝色框内红色坐标: 无") # === 屏幕显示 === osd_img.clear() # 绘制方框 for name, box in BOXES.items(): osd_img.draw_rectangle(box[0], box[1], box[2], box[3], color=COLORS[name], thickness=2) # 绘制红色目标坐标 if red_in_blue: osd_img.draw_cross(red_in_blue[0], red_in_blue[1], color=(255, 0, 0), size=8, thickness=2) osd_img.draw_string_advanced( BOXES["B"][0] + 5, BOXES["B"][1] + 5, 10, f"Red: {red_in_blue[0]},{red_in_blue[1]}", color=(255, 0, 0), bg_color=(0, 0, 0) ) # 显示红色目标数量 osd_img.draw_string_advanced(10, 10, 10, f"Red Count: {red_count}", color=(255, 255, 0), bg_color=(0, 0, 0)) # 显示数字分类结果 if cls_idx >= 0: osd_img.draw_string_advanced( 10, 40, 10, f"Digit: {digit_label} ({score:.2f})", color=(0, 255, 0), bg_color=(0, 0, 0) ) # 显示帧率 osd_img.draw_string_advanced(550, 10, 10, f"FPS: {clock.fps():.1f}", color=(255, 255, 0), bg_color=(0, 0, 0)) Display.show_image(osd_img, 0, 0, Display.LAYER_OSD3) except KeyboardInterrupt: print("程序已停止") except Exception as e: print(f"错误: {e}") finally: # 清理资源 sensor.stop() Display.deinit() MediaManager.deinit() del ai2d_output_tensor nn.shrink_memory_pool() print("资源已释放") if __name__ == "__main__": main() 将两个代码合成到一起,保证核心功能不变
07-12
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值