Loading data from .plist files

本文介绍如何使用.plist文件存储和检索数据,包括展示应用版本号的方法,并提供了一个修复代码路径问题的解决方案。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Saving and Reloading from .plist files…

A great way to store dictionary data that does not change during runtime is in a .plist file. Say you want to organize some data hierarchically or you want to store the navigation structure of a drill-down somewhere more convenient (see drill-down save example in apple docs), then a .plist file is a great way to go.

Here’s a quick example of how to restore data from a plist file. I’ll use a plist file that you can find in every app out there: Info.plist

Sometimes it’s useful to display a version number on a splash view and here’s how you can do that using the Info.plist CFBundleVersion value.


NSString *path = [[NSBundle mainBundle] bundlePath];
NSString *finalPath = [path stringByAppendingPathComponent:@"Info.plist"];
NSDictionary *plistData = [[NSDictionary dictionaryWithContentsOfFile:finalPath] retain];

versionLabel = [[UILabel alloc] initWithFrame:CGRectMake(100,100,60,25)]; // for example
versionLabel.backgroundColor = [UIColor clearColor];
versionLabel.textColor = [UIColor whiteColor];
versionLabel.font = [UIFont systemFontOfSize:10];
NSString *versionString = [NSString stringWithFormat:@"v%@", [plistData objectForKey:@"CFBundleVersion"]];
versionLabel.text = versionString;
[self.view addSubview:versionLabel];

the above content is from http://www.icodeblog.com/2009/02/14/loading-data-from-plist-files/, I try the code , but not work, the Nslog show that I got the path  is the simulator path, It seems the "NSString *path = [[NSBundle mainBundle] bundlePath];" doesn't work correctly. Then I fix the code like this:

NSArray *documentPaths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectoryNSUserDomainMaskYES);  

NSString *documentDir = [documentPaths objectAtIndex:0];  

NSString *plistPath = [documentDir stringByAppendingPathComponent@"data.plist"];

NSDictionary *plistData = [NSDictionary dictionaryWithContentsOfFile: MetalPlistPath];    

NSString *serverString = [NSString stringWithFormat@"%@", [plistDataobjectForKey@"SERVER"]];

NSLog(@"plist path: %@", plistPath);

NSLog(@"data.plist value SERVER: %@", serverString);


and it done.

 

下面是手势识别模型的main.py代码: # generated by maixhub, tested on maixpy3 v0.4.8 # copy files to TF card and plug into board and power on # 手势识别模型 import sensor, image, lcd, time import KPU as kpu from machine import UART import gc, sys from fpioa_manager import fm input_size = (224, 224) labels = ['go', 'stop'] anchors = [1.41, 2.13, 1.25, 1.72, 1.94, 2.53, 1.16, 2.06, 1.66, 2.19] def lcd_show_except(e): import uio err_str = uio.StringIO() sys.print_exception(e, err_str) err_str = err_str.getvalue() img = image.Image(size=input_size) img.draw_string(0, 10, err_str, scale=1, color=(0xff,0x00,0x00)) lcd.display(img) class Comm: def __init__(self, uart): self.uart = uart def send_detect_result(self, objects, labels): msg = "" for obj in objects: pos = obj.rect() p = obj.value() idx = obj.classid() label = labels[idx] msg += "{}:{}:{}:{}:{}:{:.2f}:{}, ".format(pos[0], pos[1], pos[2], pos[3], idx, p, label) if msg: msg = msg[:-2] + "\n" self.uart.write(msg.encode()) def init_uart(): fm.register(10, fm.fpioa.UART1_TX, force=True) fm.register(11, fm.fpioa.UART1_RX, force=True) uart = UART(UART.UART1, 115200, 8, 0, 0, timeout=1000, read_buf_len=256) return uart def main(anchors, labels = None, model_addr="/sd/m.kmodel", sensor_window=input_size, lcd_rotation=0, sensor_hmirror=False, sensor_vflip=False): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing(sensor_window) sensor.set_hmirror(sensor_hmirror) sensor.set_vflip(sensor_vflip) sensor.run(1) lcd.init(type=1) lcd.rotation(lcd_rotation) lcd.clear(lcd.WHITE) if not labels: with open('labels.txt','r') as f: exec(f.read()) if not labels: print("no labels.txt") img = image.Image(size=(320, 240)) img.draw_string(90, 110, "no labels.txt", color=(255, 0, 0), scale=2) lcd.display(img) return 1 try: img = image.Image("startup.jpg") lcd.display(img) except Exception: img = image.Image(size=(320, 240)) img.draw_string(90, 110, "loading model...", color=(255, 255, 255), scale=2) lcd.display(img) uart = init_uart() comm = Comm(uart) try: task = None task = kpu.load(model_addr) kpu.init_yolo2(task, 0.5, 0.3, 5, anchors) # threshold:[0,1], nms_value: [0, 1] while(True): img = sensor.snapshot() t = time.ticks_ms() objects = kpu.run_yolo2(task, img) t = time.ticks_ms() - t if objects: for obj in objects: pos = obj.rect() img.draw_rectangle(pos) img.draw_string(pos[0], pos[1], "%s : %.2f" %(labels[obj.classid()], obj.value()), scale=2, color=(255, 0, 0)) comm.send_detect_result(objects, labels) img.draw_string(0, 200, "t:%dms" %(t), scale=2, color=(255, 0, 0)) img.draw_string(0, 2, "Upgrade to MaixCAM to use YOLOv8", scale=1.2, color=(255, 0, 0)) img.draw_string(0, 30, "wiki.sipeed.com/maixcam", scale=1.2, color=(255, 0, 0)) lcd.display(img) except Exception as e: raise e finally: if not task is None: kpu.deinit(task) if __name__ == "__main__": try: # main(anchors = anchors, labels=labels, model_addr=0x300000, lcd_rotation=0) main(anchors = anchors, labels=labels, model_addr="/sd/model-174293.kmodel") except Exception as e: sys.print_exception(e) lcd_show_except(e) finally: gc.collect() 下面是人脸识别模型的代码: # 人脸识别模型 import sensor import image import lcd import KPU as kpu import time from Maix import FPIOA, GPIO import gc from fpioa_manager import fm from board import board_info import utime from machine import UART # 初始化串口 uart = UART(UART.UART1, 115200, 8, 0, 1, timeout=1000, read_buf_len=4096) fm.register(6, fm.fpioa.UART1_TX) fm.register(7, fm.fpioa.UART1_RX) # 加载模型 task_fd = kpu.load("/sd/FaceDetection.smodel") task_ld = kpu.load("/sd/FaceLandmarkDetection.smodel")# task_fe = kpu.load("/sd/FeatureExtraction.smodel") clock = time.clock() # 按键初始化 fm.register(board_info.BOOT_KEY, fm.fpioa.GPIOHS0) key_gpio = GPIO(GPIO.GPIOHS0, GPIO.IN) start_processing = False BOUNCE_PROTECTION = 50 def set_key_state(*_): global start_processing start_processing = True utime.sleep_ms(BOUNCE_PROTECTION) key_gpio.irq(set_key_state, GPIO.IRQ_RISING, GPIO.WAKEUP_NOT_SUPPORT) # 摄像头初始化 lcd.init() sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_hmirror(1) sensor.set_vflip(1) sensor.run(1) # 人脸检测参数 anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) dst_point = [(44, 59), (84, 59), (64, 82), (47, 105), (81, 105)] # 标准人脸关键点位置 a = kpu.init_yolo2(task_fd, 0.5, 0.3, 5, anchor) img_lcd = image.Image() img_face = image.Image(size=(128, 128)) a = img_face.pix_to_ai() record_ftr = [] record_ftrs = [] names = ['worker1', 'worker2', 'workerr3', 'worker4', 'worker5', 'worker6', 'worker7', 'worker8', 'worker9', 'worker10'] ACCURACY = 85 # 识别准确率阈值 while True: img = sensor.snapshot() clock.tick() code = kpu.run_yolo2(task_fd, img) if code: for i in code: # 裁剪并调整人脸大小 a = img.draw_rectangle(i.rect()) face_cut = img.cut(i.x(), i.y(), i.w(), i.h()) face_cut_128 = face_cut.resize(128, 128) a = face_cut_128.pix_to_ai() # 人脸关键点检测 fmap = kpu.forward(task_ld, face_cut_128) plist = fmap[:] le = (i.x() + int(plist[0] * i.w() - 10), i.y() + int(plist[1] * i.h())) re = (i.x() + int(plist[2] * i.w()), i.y() + int(plist[3] * i.h())) nose = (i.x() + int(plist[4] * i.w()), i.y() + int(plist[5] * i.h())) lm = (i.x() + int(plist[6] * i.w()), i.y() + int(plist[7] * i.h())) rm = (i.x() + int(plist[8] * i.w()), i.y() + int(plist[9] * i.h())) # 在图像上绘制关键点 a = img.draw_circle(le[0], le[1], 4) a = img.draw_circle(re[0], re[1], 4) a = img.draw_circle(nose[0], nose[1], 4) a = img.draw_circle(lm[0], lm[1], 4) a = img.draw_circle(rm[0], rm[1], 4) # 对齐人脸到标准位置 src_point = [le, re, nose, lm, rm] T = image.get_affine_transform(src_point, dst_point) a = image.warp_affine_ai(img, img_face, T) a = img_face.ai_to_pix() del (face_cut_128) # 计算人脸特征向量 fmap = kpu.forward(task_fe, img_face) feature = kpu.face_encode(fmap[:]) # 与已记录的特征比较 scores = [] for j in range(len(record_ftrs)): score = kpu.face_compare(record_ftrs[j], feature) scores.append(score) max_score = 0 index = 0 for k in range(len(scores)): if max_score < scores[k]: max_score = scores[k] index = k if max_score > ACCURACY: # 在图像上显示识别结果 a = img.draw_string(i.x(), i.y(), names[index], color=(0, 255, 0), scale=2) # 通过串口发送识别结果 uart.write(names[index] + "\n") print(names[index] + "\n") else: a = img.draw_string(i.x(), i.y(), "Unknown", color=(255, 0, 0), scale=2) uart.write("Unknown") print("Unknown") #received = uart.read() # 读取回环数据 #if received: #print("发送成功,收到回环数据:", received.decode()) #else: #print("发送失败") # 如果按键按下,记录当前人脸特征 if start_processing: record_ftr = feature record_ftrs.append(record_ftr) start_processing = False uart.write("New face registered\n") print("New face registered\n") break # 显示帧率 fps = clock.fps() a = lcd.display(img) gc.collect() 我提供的是可在k210板子上的使用的模型的main.py文件,分别是手势识别模型的和人脸识别模型的,我现在想要实现先通过了人脸识别后再进行手势识别的功能,我下面的代码有没有实现这些功能########################################################################### # Pipeline 框架(可复用) ########################################################################### class Stage: """所有模型阶段的基类,返回 True 表示通过,False 表示不通过""" def enter(self): # 第一次进入该阶段 pass def run(self, img): # 每帧运行 return True def exit(self): # 离开该阶段 pass class Pipeline: def __init__(self): self.stages = [] self.cur = 0 def add_stage(self, stage): self.stages.append(stage) def run(self): while True: if self.cur >= len(self.stages): # 所有阶段完成,重启 self.cur = 0 stage = self.stages[self.cur] if self.cur == 0 or self.stages[self.cur-1].__class__ != stage.__class__: stage.enter() # 只有第一次进入时调用 img = sensor.snapshot() ok = stage.run(img) lcd.display(img) if ok: # 通过,进入下一阶段 stage.exit() self.cur += 1 gc.collect() ########################################################################### # 人脸识别阶段 ########################################################################### import sensor, image, lcd, time, KPU as kpu, gc from Maix import GPIO, FPIOA from machine import UART from board import board_info import uos class FaceAuthStage(Stage): def __init__(self, threshold=80): self.thresh = threshold self.task_fd = None # face detect self.task_ld = None # landmark self.task_fe = None # feature extraction self.record = [] # 已注册特征 self.names = ['worker1','worker2','worker3','worker4','worker5', 'worker6','worker7','worker8','worker9','worker10'] self.dst_pt = [(44,59),(84,59),(64,82),(47,105),(81,105)] self.anchor = (1.889,2.5245,2.9465,3.94056,3.99987,5.3658, 5.155437,6.92275,6.718375,9.01025) # 按键注册新人脸 fm.register(board_info.BOOT_KEY, fm.fpioa.GPIOHS0) self.key = GPIO(GPIO.GPIOHS0, GPIO.IN) self.need_reg = False self.key.irq(self._key_cb, GPIO.IRQ_RISING) def _key_cb(self, *_): self.need_reg = True def enter(self): lcd.clear(lcd.WHITE) lcd.draw_string(10, 10, "Face Auth Stage", lcd.RED, lcd.WHITE) self.task_fd = kpu.load("/sd/FaceDetection.smodel") self.task_ld = kpu.load("/sd/FaceLandmarkDetection.smodel") self.task_fe = kpu.load("/sd/FeatureExtraction.smodel") kpu.init_yolo2(self.task_fd, 0.5, 0.3, 5, self.anchor) # 如果 SD 根目录有 features.txt 则自动加载 if "features.txt" in uos.listdir("/sd"): with open("/sd/features.txt") as f: import json data = json.load(f) self.record = data["feats"] self.names = data["names"] def exit(self): for t in (self.task_fd, self.task_ld, self.task_fe): if t: kpu.deinit(t) # 保存注册结果 with open("/sd/features.txt", "w") as f: import json json.dump({"feats": self.record, "names": self.names}, f) def run(self, img): code = kpu.run_yolo2(self.task_fd, img) ok = False if code: for face in code: rect = face.rect() cut = img.cut(rect[0], rect[1], rect[2], rect[3]) cut128 = cut.resize(128,128); cut128.pix_to_ai() # landmark fmap = kpu.forward(self.task_ld, cut128) pts = fmap[:] src = [(rect[0]+int(pts[i]*rect[2]), rect[1]+int(pts[i+1]*rect[3])) for i in range(0,10,2)] # align M = image.get_affine_transform(src, self.dst_pt) warped = image.Image(size=(128,128)) image.warp_affine_ai(img, warped, M) warped.ai_to_pix() # feature fmap = kpu.forward(self.task_fe, warped) feat = kpu.face_encode(fmap[:]) if self.need_reg and len(self.record) < len(self.names): self.record.append(feat) self.need_reg = False print("New face registered") # 比对 max_score = 0 idx = 0 for j,f in enumerate(self.record): score = kpu.face_compare(f, feat) if score > max_score: max_score, idx = score, j if max_score > self.thresh: img.draw_string(rect[0], rect[1]-20, self.names[idx], scale=2, color=(0,255,0)) ok = True # 通过认证 else: img.draw_string(rect[0], rect[1]-20, "Unknown", scale=2, color=(255,0,0)) break return ok ########################################################################### # 手势识别阶段 ########################################################################### class GestureStage(Stage): def __init__(self): self.task = None self.labels = ['go', 'stop'] self.anchors = [1.41,2.13,1.25,1.72,1.94,2.53,1.16,2.06,1.66,2.19] def enter(self): lcd.clear(lcd.WHITE) lcd.draw_string(10, 10, "Gesture Stage", lcd.RED, lcd.WHITE) self.task = kpu.load("/sd/model-174293.kmodel") kpu.init_yolo2(self.task, 0.5, 0.3, 5, self.anchors) def exit(self): if self.task: kpu.deinit(self.task) def run(self, img): objs = kpu.run_yolo2(self.task, img) for obj in objs: rect = obj.rect() label = self.labels[obj.classid()] img.draw_rectangle(rect) img.draw_string(rect[0], rect[1], "{}:{:.2f}".format(label, obj.value())) return True # 始终通过,如果想“按手势结束”再改 ########################################################################### # 主程序 ########################################################################### if __name__ == "__main__": sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_hmirror(1) sensor.set_vflip(1) sensor.run(1) lcd.init() lcd.rotation(2) pipe = Pipeline() pipe.add_stage(FaceAuthStage(threshold=80)) pipe.add_stage(GestureStage()) try: pipe.run() except KeyboardInterrupt: pass finally: gc.collect() 如果实现了,是否完整实现了最开始的两个py文件的功能,请帮我解决为什么在板子上会显示灰屏,看不到画面
最新发布
07-31
现在我有maixpy ide代码如下 # generated by maixhub, tested on maixpy v0.6.0_2_g9720594 # copy files to TF card and plug into board and power on import sensor, image, lcd, time import KPU as kpu import gc, sys from machine import UART from fpioa_manager import fm input_size = (224, 224) labels = ['jinyinhua', 'jianghuang', 'wumei', 'baibu', 'honghua', 'longgu', 'kushen', 'lingzhi', 'lulutong', 'rougui', 'wulingzhi', 'chuanxinlian', 'ejiao', 'baihe', 'taoren', 'zhimu', 'baimaogen', 'chaihu', 'mohanlian', 'banlangen'] def lcd_show_except(e): import uio err_str = uio.StringIO() sys.print_exception(e, err_str) err_str = err_str.getvalue() img = image.Image(size=input_size) img.draw_string(0, 10, err_str, scale=1, color=(0xff,0x00,0x00)) lcd.display(img) class Comm: def __init__(self, uart): self.uart = uart def send_classify_result(self, pmax, idx, label): msg = "{}:{:.2f}:{}\n".format(idx, pmax, label) self.uart.write(msg.encode()) def init_uart(): fm.register(10, fm.fpioa.UART1_TX, force=True) fm.register(11, fm.fpioa.UART1_RX, force=True) uart = UART(UART.UART1, 115200, 8, 0, 0, timeout=1000, read_buf_len=256) return uart def main(labels = None, model_addr="0x300000", sensor_window=input_size, lcd_rotation=0, sensor_hmirror=False, sensor_vflip=False): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing(sensor_window) sensor.set_hmirror(sensor_hmirror) sensor.set_vflip(sensor_vflip) sensor.run(1) lcd.init(type=1) lcd.rotation(lcd_rotation) lcd.clear(lcd.WHITE) if not labels: with open('labels.txt','r') as f: exec(f.read()) if not labels: print("no labels.txt") img = image.Image(size=(320, 240)) img.draw_string(90, 110, "no labels.txt", color=(255, 0, 0), scale=2) lcd.display(img) return 1 try: img = image.Image("startup.jpg") lcd.display(img) except Exception: img = image.Image(size=(320, 240)) img.draw_string(90, 110, "loading model...", color=(255, 255, 255), scale=2) lcd.display(img) uart = init_uart() comm = Comm(uart) try: task = None task = kpu.load(0x300000) while(True): img = sensor.snapshot() t = time.ticks_ms() fmap = kpu.forward(task, img) t = time.ticks_ms() - t plist=fmap[:] pmax=max(plist) max_index=plist.index(pmax) img.draw_string(0,0, "%.2f : %s" %(pmax, labels[max_index].strip()), scale=2, color=(255, 0, 0)) img.draw_string(0, 200, "t:%dms" %(t), scale=2, color=(255, 0, 0)) comm.send_classify_result(pmax, max_index, labels[max_index].strip()) lcd.display(img) except Exception as e: raise e finally: if not task is None: kpu.deinit(task) if __name__ == "__main__": try: # main(labels=labels, model_addr=0x300000) main(labels=labels, model_addr="/sd/model-210801.kmodel") except Exception as e: sys.print_exception(e) lcd_show_except(e) finally: gc.collect() 现在有一个STM32F103C8T6(代码在keil5运行/编译/烧录),一个syn6288,我想要实现k210识别中药材后,k210和stm32通信,syn6288播报识别结果。由于识别结果不稳定,需要syn6288当前播报的语音完成后才会播报后边的识别内容。现在给出如下参考连线方式, K210模块:Type-C接电脑,GND与STM32共地; IO27、IO28连接到STM32的PA10、PA9引脚; 语音模块SY6288:电源引脚VCC接5V;GND与STM32共地;接收脚RX与STM32板的PA2引脚相连,TX与STM32板的PA3相连; 我该如何设计stm32的代码,要求简单,操作步骤少,适合新手。给我连接硬件(一个硬件的哪个引脚连另一个硬件的哪个引脚)最具体的步骤和keil5完整代码(包括.c/,h之类的文件)和keil5创建工程的最详细步骤。
06-17
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值