display_res_icon.h

本文介绍了Windows应用程序中消息处理函数WndProc及对话框过程函数About的声明方式,并展示了资源标识符的定义方法,包括退出命令IDM_EXIT、测试命令IDM_TEST及关于对话框命令IDM_ABOUT。

  name="google_ads_frame" marginwidth="0" marginheight="0" src="http://pagead2.googlesyndication.com/pagead/ads?client=ca-pub-5572165936844014&dt=1194442938015&lmt=1194190197&format=336x280_as&output=html&correlator=1194442937843&url=file%3A%2F%2F%2FC%3A%2FDocuments%2520and%2520Settings%2Flhh1%2F%E6%A1%8C%E9%9D%A2%2FCLanguage.htm&color_bg=FFFFFF&color_text=000000&color_link=000000&color_url=FFFFFF&color_border=FFFFFF&ad_type=text&ga_vid=583001034.1194442938&ga_sid=1194442938&ga_hid=1942779085&flash=9&u_h=768&u_w=1024&u_ah=740&u_aw=1024&u_cd=32&u_tz=480&u_java=true" frameborder="0" width="336" scrolling="no" height="280" allowtransparency="allowtransparency"> #define IDM_EXIT           100
#define IDM_TEST           200
#define IDM_ABOUT          301

LRESULT CALLBACK WndProc  (HWND, UINT, WPARAM, LPARAM);
LRESULT CALLBACK About    (HWND, UINT, WPARAM, LPARAM);

分析以下报错信息: Failed to execute the transaction: tId:295913160 ClientTransaction{ tId:295913160 transactionItems=[ tId:295913160 LaunchActivityItem{activityToken=android.os.BinderProxy@b6a90f7,intent=Intent { act=android.intent.action.MAIN cat=[android.intent.category.LAUNCHER] flg=0x10000000 xflg=0x4 cmp=com.example.contacts2/.MainActivity },ident=264802081,info=ActivityInfo{4ffa7a8 com.example.contacts2.MainActivity},curConfig={1.0 310mcc260mnc [en_US] ldltr sw411dp w411dp h866dp 420dpi nrml long port finger qwerty/v/v dpad/v winConfig={ mBounds=Rect(0, 0 - 1080, 2400) mAppBounds=Rect(0, 0 - 1080, 2337) mMaxBounds=Rect(0, 0 - 1080, 2400) mDisplayRotation=ROTATION_0 mWindowingMode=fullscreen mActivityType=undefined mAlwaysOnTop=undefined mRotation=ROTATION_0} s.86 fontWeightAdjustment=0},overrideConfig={1.0 310mcc260mnc [en_US] ldltr sw411dp w411dp h866dp 420dpi nrml long port finger qwerty/v/v dpad/v winConfig={ mBounds=Rect(0, 0 - 1080, 2400) mAppBounds=Rect(0, 0 - 1080, 2337) mMaxBounds=Rect(0, 0 - 1080, 2400) mDisplayRotation=ROTATION_0 mWindowingMode=fullscreen mActivityType=standard mAlwaysOnTop=undefined mRotation=ROTATION_0} s.2 fontWeightAdjustment=0},deviceId=0,referrer=com.android.shell,procState=2,state=null,persistentState=null,pendingResults=null,pendingNewIntents=null,sceneTransitionInfo=null,profilerInfo=null,assistToken=android.os.BinderProxy@711f745,shareableActivityToken=android.os.BinderProxy@5b7e9a,activityWindowInfo=ActivityWindowInfo{isEmbedded=false, taskBounds=Rect(0, 0 - 1080, 2400), taskFragmentBounds=Rect(0, 0 - 1080, 2400)}} tId:295913160 ResumeActivityItem{mActivityToken=android.os.BinderProxy@b6a90f7,procState=-1,isForward=true,shouldSendCompatFakeFocus=false} tId:295913160 Target activity: com.example.contacts2.MainActivity tId:295913160 ] tId:295913160 } 2025-07-22 13:25:30.038 10369-10369 AndroidRuntime com.example.contacts2 D Shutting down VM 2025-07-22 13:25:30.039 10369-10369 AndroidRuntime com.example.contacts2 E FATAL EXCEPTION: main Process: com.example.contacts2, PID: 10369 java.lang.RuntimeException: Unable to start activity ComponentInfo{com.example.contacts2/com.example.contacts2.MainActivity}: android.database.sqlite.SQLiteException: unrecognized token: ":" (code 1 SQLITE_ERROR): , while compiling: SELECT phonetic_name, status_updates.status_res_package AS status_res_package, custom_ringtone, contacts_status_updates.status_ts AS contact_status_ts, account_type, data_version, photo_file_id, contacts_status_updates.status_res_package AS contact_status_res_package, group_sourceid, display_name_alt, sort_key_alt, presence.mode AS mode, 0 AS last_time_used, starred, contacts_status_updates.status_label AS contact_status_label, has_phone_number, presence.chat_capability AS chat_capability, raw_contact_id, carrier_presence, contact_last_updated_timestamp, res_package, photo_uri, data_sync4, phonebook_bucket, 0 AS times_used, display_name, sort_key, data_sync1, version, data_sync2, data_sync3, photo_thumb_uri, status_updates.status_label AS status_label, agg_presence.mode AS contact_presence, in_default_directory, 0 AS times_contacted, _id, account_type_and_data_set, name_raw_contact_id, status_updates.status AS status, phonebook_bucket_alt, 0 AS last_time_contacted, pinned, is_primary, photo_id, contact_id, agg_presence.chat_capability AS contact_chat_capability, contacts_status_updates.status_icon AS contact_status_icon, in_visible_group, phonebook_label, account_name, display_name_source, data9, dirty, sourceid, phonetic_name_style, send_to_voicemail, data8, lookup, data7, data6, phonebook_label_alt, data5, is_super_primary, data4, data3, data2, data1, data_set, contacts_status_updates.status AS contact_status, backup_id, preferred_phone_account_component_name, raw_contact_is_user_profile, status_updates.status_ts AS status_ts, data10, preferred_phone_account_id, data12, mimetype, status_updates.status_icon AS status_icon, data11, data14, data13, hash_id, data15 FROM view_data data LEFT OUTER JOIN agg_presence ON (contact_id = agg_presence.presence_contact_id) LEFT OUTER JOIN status_updates contacts_status_updates ON (status_update_id=contacts_status_updates.status_update_data_id) LEFT OUTER JOIN presence ON (presence_data_id=data._id) LEFT OUTER JOIN status_updates ON (status_updates.status_update_data_id=data._id) LEFT OUTER JOIN (SELECT 0 as STAT_DATA_ID,0 as x_times_used, 0 as x_last_time_used,0 as times_used, 0 as last_time_used where 0) as data_usage_stat ON (STAT_DATA_ID=data._id) WHERE (1 AND mimetype_id=5) AND (content://com.android.contacts/data/phones=1) at android.app.ActivityThread.performLaunchActivity(ActivityThread.java:4280) at android.app.ActivityThread.handleLaunchActivity(ActivityThread.java:4467) at android.app.servertransaction.LaunchActivityItem.execute(LaunchActivityItem.java:222) at android.app.servertransaction.TransactionExecutor.executeNonLifecycleItem(TransactionExecutor.java:133) at android.app.servertransaction.TransactionExecutor.executeTransactionItems(TransactionExecutor.java:103) at android.app.servertransaction.TransactionExecutor.execute(TransactionExecutor.java:80) at android.app.ActivityThread$H.handleMessage(ActivityThread.java:2823) at android.os.Handler.dispatchMessage(Handler.java:110) at android.os.Looper.loopOnce(Looper.java:248) at android.os.Looper.loop(Looper.java:338) at android.app.ActivityThread.main(ActivityThread.java:9067) at java.lang.reflect.Method.invoke(Native Method) at com.android.internal.os.RuntimeInit$MethodAndArgsCaller.run(RuntimeInit.java:593) at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:932) 2025-07-22 13:25:30.041 10369-10369 AndroidRuntime com.example.contacts2 E Caused by: android.database.sqlite.SQLiteException: unrecognized token: ":" (code 1 SQLITE_ERROR): , while compiling: SELECT phonetic_name, status_updates.status_res_package AS status_res_package, custom_ringtone, contacts_status_updates.status_ts AS contact_status_ts, account_type, data_version, photo_file_id, contacts_status_updates.status_res_package AS contact_status_res_package, group_sourceid, display_name_alt, sort_key_alt, presence.mode AS mode, 0 AS last_time_used, starred, contacts_status_updates.status_label AS contact_status_label, has_phone_number, presence.chat_capability AS chat_capability, raw_contact_id, carrier_presence, contact_last_updated_timestamp, res_package, photo_uri, data_sync4, phonebook_bucket, 0 AS times_used, display_name, sort_key, data_sync1, version, data_sync2, data_sync3, photo_thumb_uri, status_updates.status_label AS status_label, agg_presence.mode AS contact_presence, in_default_directory, 0 AS times_contacted, _id, account_type_and_data_set, name_raw_contact_id, status_updates.status AS status, phonebook_bucket_alt, 0 AS last_time_contacted, pinned, is_primary, photo_id, contact_id, agg_presence.chat_capability AS contact_chat_capability, contacts_status_updates.status_icon AS contact_status_icon, in_visible_group, phonebook_label, account_name, display_name_source, data9, dirty, sourceid, phonetic_name_style, send_to_voicemail, data8, lookup, data7, data6, phonebook_label_alt, data5, is_super_primary, data4, data3, data2, data1, data_set, contacts_status_updates.status AS contact_status, backup_id, preferred_phone_account_component_name, raw_contact_is_user_profile, status_updates.status_ts AS status_ts, data10, preferred_phone_account_id, data12, mimetype, status_updates.status_icon AS status_icon, data11, data14, data13, hash_id, data15 FROM view_data data LEFT OUTER JOIN agg_presence ON (contact_id = agg_presence.presence_contact_id) LEFT OUTER JOIN status_updates contacts_status_updates ON (status_update_id=contacts_status_updates.status_update_data_id) LEFT OUTER JOIN presence ON (presence_data_id=data._id) LEFT OUTER JOIN status_updates ON (status_updates.status_update_data_id=data._id) LEFT OUTER JOIN (SELECT 0 as STAT_DATA_ID,0 as x_times_used, 0 as x_last_time_used,0 as times_used, 0 as last_time_used where 0) as data_usage_stat ON (STAT_DATA_ID=data._id) WHERE (1 AND mimetype_id=5) AND (content://com.android.contacts/data/phones=1) at android.database.DatabaseUtils.readExceptionFromParcel(DatabaseUtils.java:197) at android.database.DatabaseUtils.readExceptionFromParcel(DatabaseUtils.java:153) at android.content.ContentProviderProxy.query(ContentProviderNative.java:495) at android.content.ContentResolver.query(ContentResolver.java:1231) at android.content.ContentResolver.query(ContentResolver.java:1163) at android.content.ContentResolver.query(ContentResolver.java:1119) at com.example.contacts2.MainActivity.readContacts(MainActivity.java:66) at com.example.contacts2.MainActivity.onCreate(MainActivity.java:36) at android.app.Activity.performCreate(Activity.java:9155) at android.app.Activity.performCreate(Activity.java:9133) at android.app.Instrumentation.callActivityOnCreate(Instrumentation.java:1521) at android.app.ActivityThread.performLaunchActivity(ActivityThread.java:4262) ... 13 more ---------------------------- PROCESS ENDED (10369) for package com.example.contacts2 ----------------------------
07-23
根据以下代码实现实验场景:起始位置放置“start”二维码及纸箱,小车识别二维码后,绕过纸箱,向前行进;小车行进中侧方放置各种颜色图片,小车边运动边识别;终点放置“stop”二维码,小车停止运动,返回到起始位置,并播报识别的颜色。 (1)基本任务1:小车识别二维码 小车通过摄像头获取图像数据,当屏幕中出现含有 start 字样的二维码时,小车开始行进,当屏幕中出现含有 stop 字样的二维码时,小车停止行进。 (2)基本任务2:小车自主行走 小车读取二维码内容为 start 时,小车向右开始运动,随后再向前运动,并旋转摄像头云台以对准小车侧面的各种颜色图片。 (3)基本任务3:颜色识别 小车在前进过程中,需要通过摄像头采集小车侧面的图片,识别位于小车侧面图片颜色,并记录下小车识别到的颜色。 (4)基本任务4:语音播报所识别的颜色 当小车行进时,记录下识别到的颜色,当图像中出现内容含有 stop 的二维码时,控制小车返回,并语音播报出识别到的颜色。给我一个Python代码使其能在jupyter环境中正常运行import time import pygame from aip import AipSpeech #下面的key要换成自己的 """ 语音技术 APPID AK SK """ SpeechAPP_ID = '17852430' SpeechAPI_KEY ='eGeO4iQGAjHCrzBTYd1uvTtf' SpeechSECRET_KEY = 'Cn1EVsUngZDbRLv4OxAFrDHSo8PsvFVP' #连接客户端 Speechclient = AipSpeech(SpeechAPP_ID, SpeechAPI_KEY, SpeechSECRET_KEY) #语音播报初始化 pygame.mixer.init() #语音合成 result = Speechclient.synthesis("请输入要播报的文字", 'zh', 1, {'spd': 2, 'vol': 2, 'per': 1}) #写文件然后播报 if not isinstance(result, dict): with open('./01.mp3', 'wb') as f: f.write(result) pygame.mixer.init() pygame.mixer.music.load('./01.mp3') pygame.mixer.music.play()任务一 读取USB摄像头图像 import cv2 import ipywidgets.widgets as widgets import threading import time #设置摄像头显示组件 image_widget = widgets.Image(format='jpeg', width=600, height=500) display(image_widget) #显示摄像头组件 #bgr8转jpeg格式 import enum import cv2 def bgr8_to_jpeg(value, quality=75): return bytes(cv2.imencode('.jpg', value)[1]) image = cv2.VideoCapture(0) #打开摄像头 # width=1280 # height=960 # cap.set(cv2.CAP_PROP_FRAME_WIDTH,width)#设置图像宽度 # cap.set(cv2.CAP_PROP_FRAME_HEIGHT,height)#设置图像高度 image.set(3,600) #3 视频流中帧的宽度 image.set(4,500) #4 视频流中帧的高度 image.set(5, 30) #设置帧率 image.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')) image.set(cv2.CAP_PROP_BRIGHTNESS, 40) #设置亮度 -64 - 64 0.0 image.set(cv2.CAP_PROP_CONTRAST, 50) #设置对比度 -64 - 64 2.0 image.set(cv2.CAP_PROP_EXPOSURE, 156) #设置曝光值 1.0 - 5000 156.0 ret, frame = image.read() #读取摄像头数据 image_widget.value = bgr8_to_jpeg(frame) while 1: ret, frame = image.read() image_widget.value = bgr8_to_jpeg(frame) time.sleep(0.010) image.release() #使用完成对象记住释放掉对象,不然下一个程序使用这个对象模块会被占用,导致无法使用 保存读取的摄像头图像: 任务二 树莓派颜色识别 #bgr8转jpeg格式 import enum import cv2 def bgr8_to_jpeg(value, quality=75): return bytes(cv2.imencode('.jpg', value)[1]) #摄像头组件显示 import traitlets import ipywidgets.widgets as widgets import time # 线程功能操作库 import threading import inspect import ctypes origin_widget = widgets.Image(format='jpeg', width=320, height=240) mask_widget = widgets.Image(format='jpeg',width=320, height=240) result_widget = widgets.Image(format='jpeg',width=320, height=240) # create a horizontal box container to place the image widget next to eachother image_container = widgets.HBox([origin_widget, mask_widget, result_widget]) display(image_container) #线程相关函数 def _async_raise(tid, exctype): """raises the exception, performs cleanup if needed""" tid = ctypes.c_long(tid) if not inspect.isclass(exctype): exctype = type(exctype) res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype)) if res == 0: raise ValueError("invalid thread id") elif res != 1: # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) def stop_thread(thread): _async_raise(thread.ident, SystemExit) #主进程函数 import cv2 import numpy as np import ipywidgets.widgets as widgets cap = cv2.VideoCapture(0) cap.set(3, 640) cap.set(4, 480) cap.set(5, 120) #设置帧率 cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')) # image.set(cv2.CAP_PROP_BRIGHTNESS, 40) #设置亮度 -64 - 64 0.0 # image.set(cv2.CAP_PROP_CONTRAST, 50) #设置对比度 -64 - 64 2.0 # image.set(cv2.CAP_PROP_EXPOSURE, 156) #设置曝光值 1.0 - 5000 156.0 #默认选择红色的,想识别其他请注释下面红色区间代码,放开后面其他区间代码段 # 红色区间 color_lower = np.array([0, 43, 46]) color_upper = np.array([10, 255, 255]) # #绿色区间 # color_lower = np.array([35, 43, 46]) # color_upper = np.array([77, 255, 255]) # #蓝色区间 # color_lower=np.array([100, 43, 46]) # color_upper = np.array([124, 255, 255]) # #黄色区间 # color_lower = np.array([26, 43, 46]) # color_upper = np.array([34, 255, 255]) # #橙色区间 # color_lower = np.array([11, 43, 46]) # color_upper = np.array([25, 255, 255]) def Color_Recongnize(): while(1): # get a frame and show 获取视频帧并转成HSV格式, 利用cvtColor()将BGR格式转成HSV格式,参数为cv2.COLOR_BGR2HSV。 ret, frame = cap.read() #cv2.imshow('Capture', frame) origin_widget.value = bgr8_to_jpeg(frame) # 颜色空间转换 hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # get mask 利用inRange()函数和HSV模型中蓝色范围的上下界获取mask,mask中原视频中的蓝色部分会被弄成白色,其他部分黑色。 mask = cv2.inRange(hsv, color_lower, color_upper) #cv2.imshow('Mask', mask) mask_widget.value = bgr8_to_jpeg(mask) # detect blue 将mask于原视频帧进行按位与操作,则会把mask中的白色用真实的图像替换: res = cv2.bitwise_and(frame, frame, mask=mask) #cv2.imshow('Result', res) result_widget.value = bgr8_to_jpeg(res) # if cv2.waitKey(1) & 0xFF == ord('q'): # break time.sleep(0.01) cap.release() #cv2.destroyAllWindows() #启动进程 thread1 = threading.Thread(target=Color_Recongnize) thread1.setDaemon(True) thread1.start() #结束进程,只有在结束时才需要执行此段代码 stop_thread(thread1) 保存颜色识别图像: 任务三 颜色识别与云台追踪 #底层驱动方法 from Raspblock import Raspblock robot = Raspblock() #bgr8转jpeg格式 import enum import cv2 def bgr8_to_jpeg(value, quality=75): return bytes(cv2.imencode('.jpg', value)[1]) #显示摄像头组件 import cv2 import traitlets import ipywidgets.widgets as widgets from IPython.display import display import time # 线程功能操作库 import threading import inspect import ctypes image_widget = widgets.Image(format='jpeg', width=300, height=300) display(image_widget) def _async_raise(tid, exctype): """raises the exception, performs cleanup if needed""" tid = ctypes.c_long(tid) if not inspect.isclass(exctype): exctype = type(exctype) res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype)) if res == 0: raise ValueError("invalid thread id") elif res != 1: # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) def stop_thread(thread): _async_raise(thread.ident, SystemExit) image = cv2.VideoCapture(0) image.set(3, 320) image.set(4, 240) image.set(5, 120) #设置帧率 image.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')) # image.set(cv2.CAP_PROP_BRIGHTNESS, 40) #设置亮度 -64 - 64 0.0 # image.set(cv2.CAP_PROP_CONTRAST, 50) #设置对比度 -64 - 64 2.0 # image.set(cv2.CAP_PROP_EXPOSURE, 156) #设置曝光值 1.0 - 5000 156.0 ret, frame = image.read() image_widget.value = bgr8_to_jpeg(frame) global color_x, color_y, color_radius color_x = color_y = color_radius = 0 global target_valuex target_valuex = 1500 global target_valuey target_valuey = 1500 global g_mode g_mode = 0 ## 创建存储HSV色域颜色分类数据的数组 import numpy as np global color_lower color_lower = np.array([156, 43, 46]) global color_upperv color_upper = np.array([180, 255, 255]) ## 创建PID控制实例 import PID xservo_pid = PID.PositionalPID(1.1, 0.2, 0.8) yservo_pid = PID.PositionalPID(0.8, 0.2, 0.8) ## 颜色选择按钮配置 Redbutton = widgets.Button( value=False, description='红色', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Description', icon='uncheck' ) # 设置绿色按钮 Greenbutton = widgets.Button( value=False, description='绿色', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Description', icon='uncheck' ) Bluebutton = widgets.Button( value=False, description='蓝色', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Description', icon='uncheck' ) Yellowbutton = widgets.Button( value=False, description='黄色', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Description', icon='uncheck' ) Orangebutton = widgets.Button( value=False, description='橙色', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Description', icon='uncheck' ) Closebutton = widgets.Button( value=False, description='关闭', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Description', icon='uncheck' ) output = widgets.Output() display(Redbutton, Greenbutton, Bluebutton, Yellowbutton, Orangebutton, Closebutton, output) def ALL_Uncheck(): Redbutton.icon = 'uncheck' Greenbutton.icon = 'uncheck' Bluebutton.icon = 'uncheck' Yellowbutton.icon = 'uncheck' Orangebutton.icon = 'uncheck' def on_Redbutton_clicked(b): global color_lower, color_upper, g_mode global target_valuex, target_valuey ALL_Uncheck() b.icon = 'check' color_lower = np.array([0, 43, 46]) color_upper = np.array([10, 255, 255]) target_valuex = target_valuey = 2048 robot.Servo_control(1000,1500) g_mode = 1 with output: print(g_mode) print(color_lower) print("RedButton clicked.") def on_Greenbutton_clicked(b): global color_lower, color_upper, g_mode global target_valuex, target_valuey ALL_Uncheck() b.icon = 'check' color_lower = np.array([35, 43, 46]) color_upper = np.array([77, 255, 255]) target_valuex = target_valuey = 2048 robot.Servo_control(1500,1500) g_mode = 1 with output: print("GreenButton clicked.") def on_Bluebutton_clicked(b): global color_lower, color_upper, g_mode global target_valuex, target_valuey ALL_Uncheck() b.icon = 'check' color_lower=np.array([100, 43, 46]) color_upper = np.array([124, 255, 255]) target_valuex = target_valuey = 2048 robot.Servo_control(1500,1500) g_mode = 1 with output: print("Bluebutton clicked.") def on_Yellowbutton_clicked(b): global color_lower, color_upper, g_mode global target_valuex, target_valuey ALL_Uncheck() b.icon = 'check' color_lower = np.array([26, 43, 46]) color_upper = np.array([34, 255, 255]) target_valuex = target_valuey = 150 robot.Servo_control(1500,1500) g_mode = 1 with output: print("Yellowbutton clicked.") def on_Orangebutton_clicked(b): global color_lower, color_upper, g_mode global target_valuex, target_valuey ALL_Uncheck() b.icon = 'check' color_lower = np.array([11, 43, 46]) color_upper = np.array([25, 255, 255]) target_valuex = target_valuey = 2048 robot.Servo_control(1500,1500) g_mode = 1 with output: print("Orangebutton clicked.") def on_Closebutton_clicked(b): global g_mode ALL_Uncheck() g_mode = 0 with output: print("CloseButton clicked.") Redbutton.on_click(on_Redbutton_clicked) Greenbutton.on_click(on_Greenbutton_clicked) Bluebutton.on_click(on_Bluebutton_clicked) Yellowbutton.on_click(on_Yellowbutton_clicked) Orangebutton.on_click(on_Orangebutton_clicked) Closebutton.on_click(on_Closebutton_clicked) ## 启动进程 thread1 = threading.Thread(target=Color_track) thread1.setDaemon(True) thread1.start() ## 关闭进程 stop_thread(thread1) 保存图像处理的效果图: 任务一 树莓派二维码识别 #bgr8转jpeg格式 import enum import cv2 def bgr8_to_jpeg(value, quality=75): return bytes(cv2.imencode('.jpg', value)[1]) # 导入库并显示摄像头显示组件 # import the necessary packages #import simple_barcode_detection import cv2 import numpy as np import pyzbar.pyzbar as pyzbar from PIL import Image import ipywidgets.widgets as widgets image_widget = widgets.Image(format='jpeg', width=320, height=240) display(image_widget) #显示摄像头组件 # 定义解析二维码接口 def decodeDisplay(image): barcodes = pyzbar.decode(image) for barcode in barcodes: # 提取二维码的边界框的位置 # 画出图像中条形码的边界框 (x, y, w, h) = barcode.rect cv2.rectangle(image, (x, y), (x + w, y + h), (225, 225, 225), 2) # 提取二维码数据为字节对象,所以如果想在输出图像上画出来,就需要先将它转换成字符串 barcodeData = barcode.data.decode("utf-8") barcodeType = barcode.type # 绘出图像上条形码的数据和条形码类型 text = "{} ({})".format(barcodeData, barcodeType) cv2.putText(image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,0.5, (225, 225, 225), 2) # 向终端打印条形码数据和条形码类型 print("[INFO] Found {} barcode: {}".format(barcodeType, barcodeData)) return image def detect(): camera = cv2.VideoCapture(0) camera.set(3, 320) camera.set(4, 240) camera.set(5, 120) #设置帧率 # fourcc = cv2.VideoWriter_fourcc(*"MPEG") camera.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')) camera.set(cv2.CAP_PROP_BRIGHTNESS, 40) #设置亮度 -64 - 64 0.0 camera.set(cv2.CAP_PROP_CONTRAST, 50) #设置对比度 -64 - 64 2.0 camera.set(cv2.CAP_PROP_EXPOSURE, 156) #设置曝光值 1.0 - 5000 156.0 ret, frame = camera.read() image_widget.value = bgr8_to_jpeg(frame) while True: # 读取当前帧 ret, frame = camera.read() # 转为灰度图像 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) im = decodeDisplay(gray) cv2.waitKey(5) image_widget.value = bgr8_to_jpeg(im) # 如果按键q则跳出本次循环 if cv2.waitKey(10) & 0xFF == ord('q'): break camera.release() cv2.destroyAllWindows() while 1: detect() 任务二 树莓派二维码识别运动 #bgr8转jpeg格式 import enum import cv2 def bgr8_to_jpeg(value, quality=75): return bytes(cv2.imencode('.jpg', value)[1]) # 导入库并显示摄像头显示组件 # import the necessary packages #import simple_barcode_detection import cv2 import numpy as np import pyzbar.pyzbar as pyzbar from PIL import Image import ipywidgets.widgets as widgets #底层驱动方法 from Raspblock import Raspblock robot = Raspblock() image_widget = widgets.Image(format='jpeg', width=320, height=240) display(image_widget) #显示摄像头组件 # 定义识别运动函数 def detect_control(info): if info == "forward": robot.Speed_axis_Yawhold_control(0, 2) #前进 elif info == "back": robot.Speed_axis_Yawhold_control(0, -2) #后退 elif info == "left": robot.Speed_axis_Yawhold_control(-2, 0) #左平移 elif info == "right": robot.Speed_axis_Yawhold_control(2, 0) #右平移 else: robot.Speed_axis_Yawhold_control(0, 0) #停车 # 定义解析二维码接口 def decodeDisplay(image): barcodes = pyzbar.decode(image) for barcode in barcodes: # 提取二维码的边界框的位置 # 画出图像中条形码的边界框 (x, y, w, h) = barcode.rect cv2.rectangle(image, (x, y), (x + w, y + h), (225, 225, 225), 2) # 提取二维码数据为字节对象,所以如果想在输出图像上 # 画出来,就需要先将它转换成字符串 barcodeData = barcode.data.decode("utf-8") barcodeType = barcode.type # 绘出图像上条形码的数据和条形码类型 text = "{} ({})".format(barcodeData, barcodeType) cv2.putText(image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (225, 225, 225), 2) # 向终端打印条形码数据和条形码类型 print("[INFO] Found {} barcode: {}".format(barcodeType, barcodeData)) detect_control(barcodeData) return image def detect(): camera = cv2.VideoCapture(0) camera.set(3, 320) camera.set(4, 240) camera.set(5, 120) #设置帧率 # fourcc = cv2.VideoWriter_fourcc(*"MPEG") camera.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')) camera.set(cv2.CAP_PROP_BRIGHTNESS, 40) #设置亮度 -64 - 64 0.0 camera.set(cv2.CAP_PROP_CONTRAST, 50) #设置对比度 -64 - 64 2.0 camera.set(cv2.CAP_PROP_EXPOSURE, 156) #设置曝光值 1.0 - 5000 156.0 ret, frame = camera.read() image_widget.value = bgr8_to_jpeg(frame) while True: # 读取当前帧 ret, frame = camera.read() # 转为灰度图像 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) im = decodeDisplay(gray) cv2.waitKey(5) image_widget.value = bgr8_to_jpeg(im) # 如果按键q则跳出本次循环 if cv2.waitKey(10) & 0xFF == ord('q'): break camera.release() cv2.destroyAllWindows() while 1: detect() 通过对forward、back、left、right、stop五个二维码的识别控制机器人运动。尝试自己生成一些带有运动指令的二维码,更改代码实现小车的其他运动。 保存小车识别二维码运动的结果视频: 下一节上一节 time.sleep(2)
10-12
<template> <view class="sunui-uploader-bd"> <view class="sunui-uploader-files"> <block v-for="(item, index) in upload_before_list" :key="item.uploadTaskId || index"> <view class="sunui-uploader-file" :class="[item.upload_percent < 100 && upload_before_list.length - 1 === index ? 'sunui-uploader-file-status' : '']" @click="previewImage(index)"> <!-- step1.这里修改服务器返回字段 !!! --> <image class="sunui-uploader-img" :style="upload_img_wh" :src="item.path" mode="scaleToFill" /> <view class="iconfont icon-ashbin" @click.stop="removeImage(index)" v-show="upimg_move"></view> <view class="sunui-loader-filecontent" v-if="item.upload_percent < 100 && upload_before_list.length - 1 === index"> {{ item.upload_percent }}% </view> </view> </block> <view class="bgimgClass" v-if="upload_bgimg === 'one'" @click="chooseImage"> <view class="bgimgs" v-if="upload_len < 1"> <image class="bgimg" src="/static/defaultAvater.png" mode="scaleToFill"></image> </view> <view class="camera" v-if="upload_len < 1"> <image class="cameras" src="/static/vouchsafe/cameras.png" mode="scaleToFill"></image> </view> </view> <view v-else class="imgbox"> <view v-show="upload_len < upload_count" hover-class="sunui-uploader-hover" class="sunui-uploader-inputbox" @click="chooseImage" :style="upload_img_wh"> <text class="iconfont icon-camera"></text> <view class="text">添加图片</view> </view> </view> </view> </view> <!-- <upload-img ref="upimg" :upload_img_wh="'width:166rpx;height:166rpx;'" :upload_bgimg="1" :upload_auto="true" :upimg_preview="imgList" :upload_count="4" @changeImg="getImageIn" v-model="formData.imgList" /> --> </template> <script> import api from "@/server.js"; export default { name: "uploadImg", model: { prop: "imgList", event: "changeImg", // 修复:与v-model事件名匹配 }, props: { // 服务器url url: { type: String, default: api.serviceUrl + "/system/oss/upload", }, // 上传样式宽高 upload_img_wh: { type: String, default: "width:162rpx;height:162rpx;", }, // 默认显示背景图片 upload_bgimg: { type: [Number, String], default: "", }, // 上传数量 upload_count: { type: [Number, String], default: 9, }, // 是否自动上传? 可以先用变量为false然后再改为true即为手动上传 upload_auto: { type: Boolean, default: true, }, // 是否显示删除 upimg_move: { type: Boolean, default: true, }, // 服务器预览图片 upimg_preview: { type: Array, default: () => { return []; }, }, // 服务器返回预览(看服务器卡顿情况设定) upimg_delaytime: { type: [Number, String], default: 300, }, // 请求头信息 token: { type: String, default: "", }, }, data() { return { upload_len: 0, upload_cache: [], // 格式改为:[{ path: 临时路径, taskId: 唯一ID }] imgList: [], upload_before_list: [], }; }, async created() { let _self = this; console.log(this.upimg_preview); console.log(this.upload_before_list); setTimeout(() => { // 修复:给初始化的预览图添加唯一ID和进度 const initList = this.upimg_preview.map(item => ({ ...item, uploadTaskId: Date.now() + Math.random(), upload_percent: 100 // 已上传图片进度设为100% })); this.upload_before_list = this.upload_before_list.concat(initList); this.upload_len = this.upload_before_list.length; // 修复:正确提取path到imgList this.imgList = this.upload_before_list.map(item => item.path || ""); this.emit(); }, this.upimg_delaytime); }, methods: { upImage(paths, token) { let _self = this; // 修复:传递带taskId的参数,用于匹配进度 const promises = paths.map(function (item) { return promisify(upload)({ url: _self.url, path: item.path, taskId: item.taskId, // 新增:传递唯一ID name: "file", extra: token, _self: _self, }); }); uni.showLoading({ title: `正在上传...`, }); Promise.all(promises) .then(function (data) { uni.hideLoading(); // 修复:过滤无效路径,避免重复添加 const validPaths = data.filter(path => path); if (validPaths.length === 0) return; // 修复:更新预览列表的path为服务器路径 _self.upload_before_list.forEach((previewItem, idx) => { const matchItem = paths.find(p => p.taskId === previewItem.uploadTaskId); if (matchItem) { const pathIndex = paths.indexOf(matchItem); if (validPaths[pathIndex]) { _self.$set(_self.upload_before_list[idx], "path", validPaths[pathIndex]); _self.$set(_self.upload_before_list[idx], "upload_percent", 100); } } }); _self.imgList = [...new Set([..._self.imgList, ...validPaths])]; _self.emit(); _self.upload_cache = []; // 清空上传缓存 }) .catch(function (res) { uni.hideLoading(); uni.showToast({ title: "上传失败,请重试", icon: "none" }); console.error("上传错误:", res); }); }, chooseImage() { let _self = this; const remainCount = _self.upload_count - _self.upload_before_list.length; if (remainCount <= 0) { uni.showToast({ title: `最多只能上传${_self.upload_count}张图片`, icon: "none" }); return; } uni.chooseImage({ count: remainCount, sizeType: ["compressed", "original"], sourceType: ["album", "camera"], success: function (res) { // 修复:给每个临时文件添加唯一ID和初始进度 const newFiles = res.tempFiles.map((file, index) => ({ ...file, uploadTaskId: Date.now() + index, // 生成唯一ID upload_percent: 0 })); _self.upload_before_list.push(...newFiles); // 修复:缓存格式改为含taskId的对象 _self.upload_cache = newFiles.map(file => ({ path: file.path, taskId: file.uploadTaskId })); _self.upload_len = _self.upload_before_list.length; _self.upload(_self.upload_auto); }, fail: function (err) { console.log(err); }, }); }, async upload(upload_auto) { let _self = this; // 修复:await修饰正确的异步函数,增加缓存非空判断 if (upload_auto && _self.upload_cache.length > 0) { await _self.upImage(_self.upload_cache, _self.token); } else if (!upload_auto) { console.warn(`传输参数:this.$refs.xx.upload(true)才可上传,默认false`); } }, previewImage(idx) { let _self = this; let preview = []; for (let i = 0, len = _self.upload_before_list.length; i < len; i++) { // step3.这里修改服务器返回字段 !!! preview.push(_self.upload_before_list[i].path); } uni.previewImage({ current: idx, urls: preview, }); }, removeImage(idx) { let _self = this; const removedItem = _self.upload_before_list[idx]; _self.upload_before_list.splice(idx, 1); // 修复:从imgList中移除对应的服务器路径 if (removedItem.path) { _self.imgList = _self.imgList.filter(path => path !== removedItem.path); } _self.upload_len = _self.upload_before_list.length; _self.emit(); // 修复:删除上传中文件时清空对应缓存 if (removedItem.upload_percent < 100) { _self.upload_cache = _self.upload_cache.filter(item => item.taskId !== removedItem.uploadTaskId); } }, emit() { let _self = this; // 修复:事件名与model选项的event一致,确保v-model生效 _self.$emit("changeImg", _self.imgList); // 保留原change事件,兼容旧用法 _self.$emit("change", _self.imgList); }, }, }; const promisify = (api) => { return function (options, ...params) { return new Promise(function (resolve, reject) { api( Object.assign({}, options, { success: resolve, fail: reject, }), ...params ); }); }; }; const upload = function (options) { let url = options.url, _self = options._self, path = options.path, taskId = options.taskId, // 新增:接收唯一ID name = options.name, token = options.extra, success = options.success, fail = options.fail; const uploadTask = uni.uploadFile({ url: url, filePath: path, // 修复:正确传递临时文件路径 name: name, header: { token: token, "Content-Type": "multipart/form-data" // 新增:明确上传文件类型 }, formData: { path: "/wxApp/template" // 修复:移除错误的file: path.file参数 }, success: function (res) { var data = res.data; console.warn("真实返回路径", JSON.parse(data)); try { //Tip : 真实返回路径 data = JSON.parse(res.data).result.fileUrl; } catch (e) { throw (e, data); } // 根据自己的返回数据做相应判断,服务器返回200即代表成功请求 if (res.statusCode == 200) { if (success) { console.log(1111, data); success(data); uni.hideLoading(); } } else { if (fail) { fail(data); } } }, fail: function (res) { console.log(res); if (fail) { fail(res); } }, }); uploadTask.onProgressUpdate(function (res) { console.log(res, "===onProgressUpdate===="); // 修复:通过taskId匹配当前上传项,只更新对应进度 const targetIndex = _self.upload_before_list.findIndex( item => item.uploadTaskId === taskId ); if (targetIndex !== -1) { _self.$set(_self.upload_before_list[targetIndex], "upload_percent", res.progress); } _self.upload_len = _self.upload_before_list.length; }); }; </script> <style lang="scss"> .sunui-uploader-inputbox { position: relative; display: flex; flex-direction: column; align-items: center; justify-content: center; border: 1px dashed #ccc; border-radius: 8px; background-color: #fcfcfc; .icon-camera { width: 100%; text-align: center; font-size: 26px; color: #93969c; } .text { color: #7a7d82; } } .bgimgClass { border-radius: 50%; position: relative; } // 图片背景 .bgimgs { width: 62px; height: 62px; border-radius: 50%; } .bgimg { width: 62px; height: 62px; border-radius: 50%; } .camera { width: 20px !important; height: 20px !important; display: flex; align-items: center; justify-content: center; position: absolute; border-radius: 50%; background: rgba($color: #000000, $alpha: 0.5); left: 50px; top: 37px; } .cameras { width: 12px !important; height: 12px !important; } .icon-ashbin { position: absolute; color: #fff; width: 20px; height: 20px; line-height: 20px; z-index: 2; top: 0; right: 0; text-align: center; font-size: 18px; &.right { top: 0; right: 0; } } // .icon-mn_shangchuantupian { // &:before { // content: '\e559'; // } // font-size: 3em; // } .sunui-uploader-img { display: block; } .sunui-uploader-input { position: absolute; z-index: 1; top: 0; left: 0; width: 100%; height: 100%; opacity: 0; } .sunui-uploader-file { position: relative; margin-right: 16rpx; margin-bottom: 16rpx; image { border-radius: 6px; } } .sunui-uploader-file-status:before { content: " "; position: absolute; top: 0; right: 0; bottom: 0; left: 0; background-color: rgba(0, 0, 0, 0.5); } .sunui-loader-filecontent { position: absolute; top: 50%; left: 50%; transform: translate(-50%, -50%); color: #fff; z-index: 9; } .sunui-uploader-bd { // padding: 0 26rpx; display: inline-block; padding-top: 10px; margin: 0; } .sunui-uploader-files { display: flex; flex-wrap: wrap; } .sunui-uploader-file:nth-child(3n + 0) { margin-right: 0; } .sunui-uploader-file-status:after { content: " "; position: absolute; top: 0; right: 0; bottom: 0; left: 0; background-color: rgba(0, 0, 0, 0.5); } .sunui-uploader-hover { box-shadow: 0 0 0 #e5e5e5; background: #e5e5e5; } </style>
最新发布
10-22
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值