py_匿名函数2(use)

#传递匿名函数
def test(a,b,func):
    result=func(a,b)
    print(result)
test(11,22,lambda x,y:x+y)#传递为引用

利用匿名函数传递运算规则

 

import os from glob import glob import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np from joblib import Parallel, delayed from PIL import Image, ImageDraw, ImageFont from utils.function import (XYZ2BT2020_MAT, XYZ2P3_MAT, XYZ2SRGB_MAT, adjust_saturation_hisi_CA, get_cross_dimension) from utils.gamma_opt import gamma_hlg, gamma_ns from utils.parse import LSC_OSR, HistoryFrame, ImageInfo, QImageInfo from utils.xstyle_ce_opt import CESimulator from utils.xstyle_lut_opt import apply_lut3d matplotlib.use("Agg") os.chdir(os.path.dirname(__file__)) def process(image_info: ImageInfo, qimage_info: QImageInfo, lightness:float): picture_dict = {} # 1 HVM mscStats @ msc2xyz @ caMat @ LCH @ gamma simu_msc2xyz = (get_cross_dimension(image_info.msc_golden_raw.clip(0, None)) @ image_info.msc2xyz_mat).reshape(-1, 3) simu_msc2xyz = adjust_saturation_hisi_CA(simu_msc2xyz, image_info.hvm_xy1) simu_msc2xyz /= (simu_msc2xyz.max() * lightness) simu_msc2xyz = (simu_msc2xyz @ image_info.color_space_trans_mat).clip(0, 1).reshape(48, 64, 3) simu_msc2xyz_nonlinear = image_info.gamma(simu_msc2xyz).clip(0, 1) picture_dict["p1"] = np.rot90(simu_msc2xyz_nonlinear, image_info.config['msc_rotation']) # 考虑 MSC 倒装的情况 picture_dict['p1_name'] = 'P1 MSC目标效果(HVM)' picture_dict['p1_description'] = 'mscStats @ msc2xyz @ caMat @ LCH @ gamma' # if image_info.cam_type == 'tele': # msc_xyz_pipe = (image_info.msc_xyz_pipe @ image_info.color_space_trans_mat).clip(0, 1).reshape(48, 64, 3) # pipe_msc2xyz_nonlinear = image_info.gamma(msc_xyz_pipe).clip(0, 1) # if np.sum(msc_xyz_pipe) > 0: # picture_dict["p1"] = np.rot90(pipe_msc2xyz_nonlinear, image_info.config['msc_rotation']) # 考虑 MSC 倒装的情况 # simu_msc2xyz = msc_xyz_pipe # 2 CT 3*3 mscStats @ msc2cam @ baseCT_cam2XYZ @ gamma simu_msc2cam_CT = (get_cross_dimension( (image_info.msc_golden_raw @ image_info.msc2cam_basect).clip(0, None).reshape(-1, 3)) @ image_info.cam2std_basect).clip(0, None) simu_msc2cam_CT = (simu_msc2cam_CT @ image_info.color_space_trans_mat).clip(0, 1).reshape(48, 64, 3) simu_msc2cam_CT *= (simu_msc2xyz.mean() / simu_msc2cam_CT.mean()) simu_msc2cam_CT_nonlinear = image_info.gamma(simu_msc2cam_CT).clip(0, 1) # picture_dict["p2_divcc3x3"] = divide_matrix((image_info.cam2std @ image_info.ca_mat.T @ image_info.color_space_trans_mat).T) if len(image_info.history_frame.face_pos_left_top_x) > 0: try: coordinate = [image_info.history_frame.face_pos_left_top_x[-1] / 2, image_info.history_frame.face_pos_left_top_y[-1] / 2, image_info.history_frame.face_pos_width[-1] / 2, image_info.history_frame.face_pos_height[-1] / 2] h1, h2 = int((coordinate[1] - 14) / 7), int((coordinate[1] + coordinate[3] - 14) / 7) w1, w2 = int((coordinate[0] - 20) / 7), int((coordinate[0] + coordinate[2] - 20) / 7) simu_msc2cam_CT_nonlinear[h1 : h2 + 0, w1] = [1, 0, 0] simu_msc2cam_CT_nonlinear[h1 : h2 + 0, w2] = [1, 0, 0] simu_msc2cam_CT_nonlinear[h1, w1 : w2 + 0] = [1, 0, 0] simu_msc2cam_CT_nonlinear[h2, w1 : w2 + 0] = [1, 0, 0] #---# 人脸区域提亮处理 # simu_msc2cam_CT_nonlinear[h1:h2, w1:w2] = (simu_msc2cam_CT_nonlinear[h1:h2, w1:w2] * 1.5).clip(0, 1) simu_msc2cam_CT_nonlinear[h1:h2, w1:w2] /= (simu_msc2cam_CT_nonlinear[h1 + 0 : h2, w1 + 0 : w2].max() / 1.5) except: pass picture_dict["p2"] = np.rot90(simu_msc2cam_CT_nonlinear, image_info.config['msc_rotation']) # 考虑 MSC 倒装的情况 picture_dict['p2_name'] = 'P2 虚拟CAM,baseCT效果' picture_dict['p2_description'] = '(mscStats @ msc2cam) @ cam2xyz @ gamma' # 3 baseCT 3*6 camStats @ baseCT_cam2XYZ @ gamma simu_baseCT = ((get_cross_dimension(image_info.cam_golden_raw.clip(0, None)) @ image_info.cam2std_basect).clip(0, None)) simu_baseCT = (simu_baseCT @ image_info.color_space_trans_mat).clip(0, 1).reshape(64, 64, 3) simu_baseCT *= (simu_msc2xyz.mean() / simu_baseCT.mean()) simu_baseCT_nonlinear = image_info.gamma(simu_baseCT).clip(0, 1) picture_dict["p3"] = simu_baseCT_nonlinear picture_dict["p3_name"] = "P3 真实CAM,baseCT效果" picture_dict["p3_description"] = "camGoldenStats @ cam2xyz_baseCT @ gamma" # 3 baseCT 3*6 camStats @ baseCT_cam2XYZ @ gamma: test # curr_cam2std_basect = np.array([0.70212766,0.14216634,-0.1615087, # -0.11035156,0.67480469,-0.89160156, # 0.35798817, -0.13510848, 2.55325444, 0.05150692, 0.18464747, # -0.4120554 , 0.16797676, 0.01757896, 0.67288365, # 0.0637888 ,-0.03140371, 1.26203678]).reshape(6, 3) # simu_baseCT = ((get_cross_dimension(image_info.cam_golden_raw.clip(0, None)) @ curr_cam2std_basect).clip(0, None)) # # print(image_info.cam2std_basect) # simu_baseCT = (simu_baseCT @ image_info.color_space_trans_mat).clip(0, 1).reshape(64, 64, 3) # simu_baseCT *= (simu_msc2xyz.mean() / simu_baseCT.mean()) # simu_baseCT_nonlinear = image_info.gamma(simu_baseCT).clip(0, 1) # picture_dict["p3"] = simu_baseCT_nonlinear # picture_dict["p3_name"] = "P3 真实CAM,baseCT效果,Jock" # picture_dict["p3_description"] = "camGoldenStats @ cam2xyz_baseCT @ gamma" # 4 refCT camGoldenStats @ cam2std_refCT @ gamma cam_refCT = (get_cross_dimension(image_info.cam_golden_raw.clip(0, None).reshape(-1, 3)) @ image_info.cam2std_refct).clip(0, None) cam_refCT = (cam_refCT @ image_info.color_space_trans_mat).clip(0, 1).reshape(64, 64, 3) cam_refCT *= (simu_msc2xyz.mean() / cam_refCT.mean()) cam_refCT_nonlinear = image_info.gamma(cam_refCT).clip(0, 1) picture_dict["p4"] = cam_refCT_nonlinear picture_dict["p4_name"] = "P4 真实CAM,refCT效果" picture_dict["p4_description"] = "camGoldenStats @ cam2xyz_refCT @ gamma" # 5 带滤波cam效果 camStats @ pipe_awb_filter @ algo_hdcc @ gamma cam_filter = get_cross_dimension((image_info.cam_raw.reshape(-1, 3) * image_info.pipe_awb_filter).clip(0, 1)) cam_filter = (cam_filter @ image_info.algo_ccm).reshape(64, 64, 3).clip(0, 1) cam_filter *= (simu_msc2xyz.mean() / cam_filter.mean()) cam_filter_nonlinear = image_info.gamma(cam_filter).clip(0, 1) picture_dict["p5"] = cam_filter_nonlinear picture_dict['p5_name'] = 'P5 真实CAM,ISP滤波' picture_dict['p5_description'] = 'camStats @ pipe_awb_filter @ algo_hdcc @ gamma' # 6 带ccgm的cam效果 camStats @ pipe_awb_filter @ pipe_hdcc @ gamma cam_ccgm = get_cross_dimension((image_info.cam_raw.reshape(-1, 3) * image_info.pipe_awb_filter).clip(0, 1)) cam_ccgm = (cam_ccgm @ image_info.pipe_ccm).reshape(64, 64, 3).clip(0, 1) cam_ccgm *= (simu_msc2xyz.mean() / cam_ccgm.mean()) cam_ccgm_nonlinear = image_info.gamma(cam_ccgm).clip(0, 1) picture_dict["p6"] = cam_ccgm_nonlinear picture_dict["p6_name"] = 'P6 真实CAM,CCGM后' picture_dict["p6_description"] = 'camStats @ pipe_awb_filter @ pipe_hdcc @ gamma' xstyle_ce_opt = CESimulator() # 预览 # 1. xstyle on: 预览BE + sync-lut + xstyle-lut + xstyle-ce # 2. xstyle off: 预览BE + sync-lut + real-lut + xstyle-ce if qimage_info.preview_be is not None: picture_dict['p7'] = qimage_info.preview_be picture_dict['p7_name'] = 'P7 预览,LUT前的效果' picture_dict['p7_description'] = 'rawNR(rdp) + awb + hdcc + aitm' if qimage_info.lut_sync is not None: preview_be_synclut = apply_lut3d(qimage_info.preview_be, qimage_info.lut_sync) picture_dict['p8'] = preview_be_synclut picture_dict['p8_name'] = 'P7 预览,DytoneSync效果' picture_dict['p8_description'] = 'P7 + sync-lut' if qimage_info.xstyle_enable and qimage_info.xstyle_enable != 'unknown': if qimage_info.lut_xstyle is not None: preview_be_xstylelut = apply_lut3d(qimage_info.preview_be, qimage_info.lut_xstyle) picture_dict['p9'] = preview_be_xstylelut picture_dict['p9_name'] = 'P8 预览,XStyle-Lut效果' picture_dict['p9_description'] = 'P7 + xstyle-lut' else: if qimage_info.lut_real is not None: preview_be_reallut = apply_lut3d(qimage_info.preview_be, qimage_info.lut_real) picture_dict['p9'] = preview_be_reallut picture_dict['p9_name'] = 'P8 预览,真实性Lut效果' picture_dict['p9_description'] = 'P7 + real-lut' if qimage_info.lut_pre is not None: preview_be_lut = apply_lut3d(qimage_info.preview_be, qimage_info.lut_pre) picture_dict['p10'] = preview_be_lut picture_dict['p10_name'] = 'P10 预览,final-lut效果' picture_dict['p10_description'] = 'P7 + final-lut' if qimage_info.xstyle_enable and qimage_info.xstyle_enable != 'unknown' and qimage_info.scene_type not in ['flower', 'plants']: _preview_be_lut_ce = xstyle_ce_opt.forward(qimage_info.ce_data, preview_be_lut, None) picture_dict['p11'] = _preview_be_lut_ce # 预览 + final_lut + xstyle_yuv picture_dict['p11_name'] = 'P11 预览,XStyle后的效果' picture_dict['p11_description'] = 'P10 + xstyle-ce' # 预览 dytonesync if qimage_info.dytonesync_ref is not None: picture_dict['p12'] = qimage_info.dytonesync_ref # 输入dytoneSync的红枫小图, cap/sdr -> P3,CUVA -> BT2020 picture_dict['p12_name'] = 'P12 MSC目标效果(DytoneSync)' picture_dict['p12_description'] = 'mscRaw + ...(Preprocess)' picture_dict['p13'] = qimage_info.dytonesync_input # 输入dytoneSync的预览小图, cap/sdr -> P3,CUVA -> BT2020 picture_dict['p13_name'] = 'P13 预览(),DytoneSync前' picture_dict['p13_description'] = 'P7 + ...(Preporcess)' _dytonesync_input_synclut = apply_lut3d(qimage_info.dytonesync_input, qimage_info.lut_sync) picture_dict['p14'] = _dytonesync_input_synclut picture_dict['p14_name'] = 'P14 预览(),DytoneSync后' picture_dict['p14_description'] = 'P13 + sync-lut' # 拍照: # 1. 没有打开拍照dytonesync # 未进入Net2:拍照BE + lutcap + xstyle-ce # 进入Net2:拍照BE + Net2 # 2. 打开拍照dytonesync # 未进入Net2:拍照BE + sync-lut(cap) + lutpre + xstyle-ce # 进入Net2:拍照BE + sync-lut(cap) + sync-lut(pre) + Net2 if qimage_info.capture_be is not None: picture_dict["p15"] = qimage_info.capture_be picture_dict["p15_name"] = 'P15 拍照,AirawIspIn的效果' picture_dict['p15_description'] = 'MEF + awb + hdcc + aitone-net1' if qimage_info.dycap_merged_lut is not None: if 'net2yuv' in qimage_info.model_name_version: capture_be_mergelut = apply_lut3d(qimage_info.capture_be, qimage_info.dycap_merged_lut) picture_dict['p16'] = capture_be_mergelut picture_dict['p16_name'] = 'P16 拍照,DytoneSync红枫效果' picture_dict['p16_description'] = 'P15 + sync-msc-lut' else: capture_be_synclut = apply_lut3d(qimage_info.capture_be, qimage_info.dycap_sync_lut) picture_dict['p16'] = capture_be_synclut picture_dict['p16_name'] = 'P16 拍照,DytoneSync预览效果' picture_dict['p16_description'] = 'P15 + sync-pre-lut' if qimage_info.lut_pre is not None: capture_be_synclut_lut = apply_lut3d(capture_be_synclut, qimage_info.lut_pre) picture_dict['p17'] = capture_be_synclut_lut picture_dict['p17_name'] = 'P17 拍照,final-lut效果' picture_dict['p17_description'] = 'P15 + final-lut' if qimage_info.xstyle_enable and qimage_info.xstyle_enable != 'unknown' and qimage_info.scene_type not in ['flower', 'plants']: capture_be_synclut_lut_ce = xstyle_ce_opt.forward(qimage_info.ce_data, capture_be_synclut_lut, None) picture_dict['p18'] = capture_be_synclut_lut_ce picture_dict['p18_name'] = 'P18 拍照,XStyle后的效果' picture_dict['p18_description'] = 'P17 + xstyle-ce' else: if 'net2yuv' not in qimage_info.model_name_version and qimage_info.lut_cap is not None: capture_be_lut = apply_lut3d(qimage_info.capture_be, qimage_info.lut_cap) picture_dict['p17'] = capture_be_lut picture_dict['p17_name'] = 'P17 拍照,final-lut效果' picture_dict['p17_description'] = 'P15 + final-lut' if qimage_info.xstyle_enable and qimage_info.xstyle_enable != 'unknown' and qimage_info.scene_type not in ['flower', 'plants']: capture_be_lut_ce = xstyle_ce_opt.forward(qimage_info.ce_data, capture_be_lut, None) picture_dict['p18'] = capture_be_lut_ce # capture + final_lut + xstyle_yuv picture_dict['p18_name'] = 'P18 拍照,XStyle后的效果' picture_dict['p18_description'] = 'P17 + xstyle-ce' # RGB2YUV 出图 if qimage_info.captureOut is not None: picture_dict["p16"] = qimage_info.captureOut picture_dict['p16_name'] = 'P16 拍照,airawRgb2yuvOut的效果' picture_dict['p16_description'] = 'P15 + RGB2YUV Process' # 拍照 net2 出图 if qimage_info.rgb_net2 is not None: picture_dict["p18"] = qimage_info.rgb_net2 picture_dict['p18_name'] = 'P18 拍照,Net2/LUT3D后的效果' picture_dict['p18_description'] = 'P16 + net2/LUT3D, or rather, YuvEnhance or FaceSr' return picture_dict def process_line_graph(lo: LSC_OSR, hf: HistoryFrame): plt.figure(figsize=[12, 10]) # LSC-OSR draw osr_spectral lsc_spectral compare lp1 = plt.subplot2grid((4, 2), (0, 0), rowspan=2) lp1.plot(np.linspace(380, 780, 81), lo.lsc_spectral, linewidth=2, color="#FF8C00") lp1.plot(np.linspace(380, 780, 81), lo.osr_spectral, linewidth=2, color="#008B8B") lp1.set_xlabel("wavelength, lightType = {:.2f}, {:.2f}, {:.2f}".format(lo.osr_weight[0], lo.osr_weight[1], lo.osr_weight[2])) lp1.legend(["lsc spectra", "osr spectra"]) # 历史帧msc ae lp2 = plt.subplot2grid((4, 2), (0, 1)) x = np.linspace(1, hf.n_frame, hf.n_frame) lp2.plot(x, hf.expo / (hf.expo.max() + 0.001), linewidth=1, marker=".", markersize=10, color="#FF8C00") lp2.plot(x, hf.iso / (hf.iso.max() + 0.001), linewidth=1, marker=".", markersize=10, color="#008B8B") lp2.plot(x, hf.lv / (hf.lv.max() + 0.001), linewidth=1, marker=".", markersize=10, color="#6A5ACD") lp2.legend(["expo", "iso", "lv"]) lp2.set_xlabel( f"mscAE: expo_max = {hf.expo.max()}, iso_max = {hf.iso.max()}, lv_max = {hf.lv.max()}") # 历史帧msc2cam lp3 = plt.subplot2grid((4, 2), (1, 1)) lp3.plot(x, hf.msc2main, linewidth=1, marker=".", markersize=10, color="#FF8C00") lp3.plot(x, hf.msc2wide, linewidth=1, marker=".", markersize=10, color="#008B8B") lp3.plot(x, hf.msc2tele, linewidth=1, marker=".", markersize=10, color="#6A5ACD") lp3.set_xlabel("baseCT: msc2cam first element") lp3.legend(["main", "wide", "tele"]) # 历史帧 大直方图均值 小直方图高亮区域的当前均值和target # lp3 = plt.subplot2grid((4, 2), (1, 1)) # lp3.plot(np.linspace(1, len(hf.avg_val), len(hf.avg_val)), hf.avg_val, linewidth=1, marker=".", markersize=10, color="#FF8C00") # lp3.plot(np.linspace(1, len(hf.cur_lum), len(hf.cur_lum)), hf.cur_lum, linewidth=1, marker=".", markersize=10, color="#008B8B") # lp3.plot(np.linspace(1, len(hf.tar_lum), len(hf.tar_lum)), hf.tar_lum, linewidth=1, marker=".", markersize=10, color="#6A5ACD") # lp3.legend(["histAveragevalue", "histCurlum", "histTarlum"]) # lp3.set_xlabel("mscAE: histAveragevalue, histCurlum, histTarlum") # 历史帧hvm XZ 计算cct duv lp4 = plt.subplot2grid((4, 2), (2, 0)) Y = np.array([1.00] * hf.hvm_cct.shape[0]) lp4.plot(x, hf.hvm_cct / hf.hvm_cct.max(), linewidth=1, marker=".", markersize=10, color="#FF8C00") lp4.plot(x, hf.hvm_duv / hf.hvm_duv.max(), linewidth=1, marker=".", markersize=10, color="#008B8B") lp4.legend(["cct", "duv"]) lp4.set_xlabel(f"HVM: cct_max = {hf.hvm_cct.max():.1f}, duv_max = {np.abs(hf.hvm_duv).max():.4f}") # 历史帧refCT lp5 = plt.subplot2grid((4, 2), (3, 0)) lp5.plot(x, hf.refct_l, linewidth=1, marker=".", markersize=10, color="#FF8C00") lp5.plot(x, hf.refct_m, linewidth=1, marker=".", markersize=10, color="#008B8B") lp5.plot(x, hf.refct_s, linewidth=1, marker=".", markersize=10, color="#6A5ACD") lp5.legend(["l", "m", "s"]) lp5.set_xlabel("refCT: lms gain") # 历史帧awb lp6 = plt.subplot2grid((4, 2), (2, 1)) x = np.linspace(1, hf.awb_rgbg.shape[0], hf.awb_rgbg.shape[0]) lp6.plot(x, hf.awb_rgbg[:, 0], linewidth=1, marker="o", markerfacecolor='w', color="#FF8C00") lp6.plot(x, hf.awb_rgbg_filtered[:, 0], linewidth=1, marker=".", markersize=10, color="#008B8B") lp6.legend(["before", "after"], prop={"size": 8}) lp6.set_xlabel("SIA filter - Rgain") lp7 = plt.subplot2grid((4, 2), (3, 1)) lp7.plot(x, hf.awb_rgbg[:, 1], linewidth=1, marker="o", markerfacecolor='w', color="#FF8C00") lp7.plot(x, hf.awb_rgbg_filtered[:, 1], linewidth=1, marker=".", markersize=10, color="#008B8B") lp7.legend(["before", "after"], prop={"size": 8}) lp7.set_xlabel("SIA filter - Bgain") plt.tight_layout() line_picture = plt.gcf() line_picture.canvas.draw() line = np.array(line_picture.canvas.renderer.buffer_rgba())[...,0:3] plt.close() return line def draw(jpg_path, save_path, picture_dict: dict, image_info: ImageInfo, qimage_info: QImageInfo): # 读取原始图像 image = plt.imread(jpg_path)[::3, ::3] # 色域转换 if "hdrvivid" in image_info.pq_info: image_linear = gamma_hlg(image / 255.0, degamma=True) image_xyz = image_linear @ np.linalg.inv(XYZ2P3_MAT) image_linear_2020 = image_xyz @ XYZ2BT2020_MAT image = (image_info.gamma(image_linear_2020) * 255.0).astype('uint8') elif "standardvideo" in image_info.pq_info: image_linear = gamma_ns(image / 255.0, degamma=True) image_xyz = image_linear @ np.linalg.inv(XYZ2P3_MAT) image_linear_srgb = image_xyz @ XYZ2SRGB_MAT image = (image_info.gamma(image_linear_srgb) * 255.0).astype('uint8') else: image = image tiny_image_rot = True if image.shape[0] > image.shape[1] else False # 基础参数配置 h, w, c = 1451, 1931, 3 hist_h, hist_w = 1120, 1340 tiny_row, tiny_col = len(image_info.config['pic_plot']), max(len(_) for _ in image_info.config['pic_plot']) tiny_row_gap, tiny_col_gap = 60, 5 tiny_w = ((w + hist_w) - (tiny_col - 1) * tiny_col_gap) // tiny_col tiny_h = int(tiny_w / 1.33) imageLarge = (np.ones([h + (tiny_h + tiny_row_gap) * tiny_row, w + hist_w, c])).astype(np.uint8) font = ImageFont.truetype("arial.ttf", size=15) # 设置字体样式及大小 font_c = ImageFont.truetype("simsun.ttc", size=25) color = (255, 255, 0) # 文本颜色(RGB格式) # image = cv2.resize(image, (w, h)) imageLarge[:h, :w] = image # 曲线图 imageLarge[0: hist_h, w : w + hist_w ] = cv2.resize(picture_dict['line'], (hist_w, hist_h)) def paste_tiny_image(r, c, i): tiny_img: np.ndarray = picture_dict.get(f"p{i}", None) if tiny_img is not None and not np.isnan(tiny_img).any(): if qimage_info.res_rotation != "unknown": if qimage_info.res_rotation == 14: tiny_img = np.rot90(tiny_img, -3) else: tiny_img = np.rot90(tiny_img, -qimage_info.res_rotation // 90) else: if tiny_image_rot: tiny_img = np.rot90(tiny_img, -1) tiny_img = cv2.resize(tiny_img.clip(0, 1), (tiny_w, tiny_h)).clip(0, 1) imageLarge[ h + (r - 1) * (tiny_h + tiny_row_gap): h + (r - 1) * (tiny_h + tiny_row_gap) + tiny_h, (tiny_w + tiny_col_gap) * (c - 1): (tiny_w + tiny_col_gap) * (c - 1) + tiny_w, ] = (tiny_img * 255).astype(np.uint8) # def paste_tiny_image(r, c, i): # tiny_img: np.ndarray = picture_dict.get(f'p{i}', None) # if tiny_img is not None and not np.isnan(tiny_img).any(): # if tiny_image_rot: # tiny_img = np.rot90(tiny_img, -1) # tiny_img = cv2.resize(tiny_img.clip(0, 1), (tiny_w, tiny_h)).clip(0, 1) # imageLarge[h + (r - 1) * (tiny_h + tiny_row_gap) : h + (r - 1) * (tiny_h + tiny_row_gap) + tiny_h , (tiny_w + tiny_col_gap) * (c - 1): (tiny_w + tiny_col_gap) * (c - 1) + tiny_w] = (tiny_img * 255).astype(np.uint8) for r in range(0, tiny_row): for c, i in enumerate(image_info.config['pic_plot'][r]): paste_tiny_image(r + 1, c + 1, i) imageLarge = Image.fromarray(imageLarge) draw = ImageDraw.Draw(imageLarge) # 在图像上添加文本 def draw_tiny_text(s1: str, s2: str, r, c): draw.text(((tiny_w + tiny_col_gap) * (c - 1) + tiny_w // 2 - draw.textlength(s1, font=font_c) // 2, h + (tiny_h + tiny_row_gap) * (r - 1) + tiny_h + 2), s1, font=font_c, color=color, fill=color) draw.text(((tiny_w + tiny_col_gap) * (c - 1) + tiny_w // 2 - draw.textlength(s2, font=font) // 2, h + (tiny_h + tiny_row_gap) * (r - 1) + tiny_h + 32), s2, font=font, color=color, fill=color) for r in range(0, tiny_row): for c, i in enumerate(image_info.config['pic_plot'][r]): draw_tiny_text(picture_dict.get(f'p{i}_name', ''), picture_dict.get(f'p{i}_description', ''), r + 1, c + 1) draw.text((10, 10), os.path.basename(jpg_path)[:-4], font=ImageFont.truetype("simsun.ttc", size=50), color=color, fill=color) text1 = ( """Camera: {:<4} -- ZOOM: {:<4} -- AI_enable: {:<4} -- AiType: {:<4} -- Capture_Mode: {:<4} -- XtCodeMode: {:<4} CAM - {:<12}: expo = {:<6.0f} us, iso = {:<6.0f}, fn = {:<6.2f}, expo_ratio = {:<6.2f} MSC - {:<12}: expo = {:<6.0f} us, iso = {:<6.0f}, lv = {:<6.0f}, lux = {:<6.2f}, raw_mean = {:6.2f} {:<70} SIA_D_A_LED_Prob = {:<1d},{:<1d},{:<1d} Tone_Net2 = {:<58} airaw_type = {:<12} Tone_Net1 = {:<58} AiRawV2PpAlgo = {:<54} ppalgo = {:<61} algo_refct_res = {:<4.4f},{:<4.4f},{:<4.4f} iso_speed = {:<58} pipe_refct_gain = {:<4d},{:<4d},{:<4d} highSatCardNum = {:<.0f},{:<.0f},{:<.0f}{:3}redHighSatCnt = {:<.0f},{:<.0f},{:<.0f}{:3}blueHighSatCnt = {:<.0f},{:<.0f},{:<.0f}{:3}purpleHighSatCnt = {:<.0f},{:<.0f},{:<.0f}{:3} hvm_cct0 = {:4.0f}K, hvm_duv0 = {:8.4f}{:9}algo_awb = {:<4.0f},{:<4.0f},{:<4.0f}{:14}ccm_com_vector = {:<3d},{:<3d},{:<3d} hvm_cct1 = {:4.0f}K, hvm_duv1 = {:8.4f}{:9}pipe_awb = {:<4.0f},{:<4.0f},{:<4.0f}{:14}ccm_com_weight = {:<3d},{:<3d},{:<3d} osr_cct = {:4.0f}K, osr_duv = {:8.4f}{:9}pipe_awb_filter = {:<4d},{:<4d},{:<4d}{:7}bv = {:<3d} msc2cam = {:8.4f},{:8.4f},{:8.4f}{:10}awb_otp = {:<4d},{:<4d},{:<4d} msc2xyz = {:8.4f},{:8.4f},{:8.4f} """.format( image_info.cam_type, qimage_info.zoom_ratio, str(qimage_info.masterAiEnable), str(qimage_info.masterAiType), str(qimage_info.Capture_Mode), str(qimage_info.XtCodeMode), image_info.cam_sensor_name, image_info.cam_expo, image_info.cam_iso, image_info.cam_fn, image_info.msc_expo * image_info.msc_iso / (1 + image_info.cam_expo * image_info.cam_iso), image_info.msc_sensor_name, image_info.msc_expo, image_info.msc_iso, image_info.msc_lv, 2 ** (image_info.msc_lv / 10 - 1), image_info.msc_raw_mean, '', *image_info.sia_light, qimage_info.model_name_version, qimage_info.airaw_type, qimage_info.model_name_version_net1, qimage_info.AiRawV2PpAlgo, image_info.pq_info[13:], *image_info.algo_refct_result, ','.join([str(i) for i in qimage_info.iso_speed]) if qimage_info.iso_speed else 'unknown', *image_info.refct_gain, image_info.highSatCardNum[0], image_info.highSatCardNum[1], image_info.highSatCardNum[2], '', image_info.redHighSatCnt[0], image_info.redHighSatCnt[1], image_info.redHighSatCnt[2], '', image_info.blueHighSatCnt[0], image_info.blueHighSatCnt[1], image_info.blueHighSatCnt[2], '', image_info.purpleHighSatCnt[0], image_info.purpleHighSatCnt[1], image_info.purpleHighSatCnt[2], '', image_info.hvm_cct0, image_info.hvm_duv0, ' ', *[1092 * _ for _ in image_info.algo_awb], ' ', *image_info.ccm_com_vec, image_info.hvm_cct1, image_info.hvm_duv1, ' ', *[1092 * _ for _ in image_info.pipe_awb], ' ', *image_info.ccm_com_weight, image_info.lsc_osr.osr_cct, image_info.lsc_osr.osr_duv, ' ', *[int(1092 * _) for _ in image_info.pipe_awb_filter], ' ', image_info.bv, image_info.msc2cam[0][0], image_info.msc2cam[0][1], image_info.msc2cam[0][2], ' ', *image_info.awb_otp, image_info.msc2xyz_mat[0][0], image_info.msc2xyz_mat[0][1], image_info.msc2xyz_mat[0][2], )) draw.text((w, hist_h + 10), text1, font=ImageFont.truetype("consola.ttf", size=22), color=color,fill=color) imageLarge.save(save_path, quality=100, dpi=(300, 300, 0)) def parse_image(idx, idx_end, image_path, save_path, phone, cam_type, lightness): print("[{:04d}/{:04d}] {}".format(idx, idx_end, image_path)) image_info = ImageInfo(image_path, phone, cam_type) qimage_info = QImageInfo(image_path) picture_dict = process(image_info, qimage_info, lightness) picture_dict['line'] = process_line_graph(image_info.lsc_osr, image_info.history_frame) jpg_save_name = os.path.join(save_path, os.path.splitext(os.path.basename(image_path))[0] + '_info.jpg') draw(image_path, jpg_save_name, picture_dict, image_info, qimage_info) def try_parse_image(_i,__i, _j, _p, __p, _c, _l): try: parse_image(_i,__i, _j, _p, __p, _c, _l) except Exception as e: print(e) if __name__ == "__main__": ''' ☆☆☆ 脚本详情: 1. 运行之前需要拿到 QTimeMachine v1.6.4 以上版本手动解析所有图获取airaw维测信息 2. 支持图像根据ppAlgoInfo自适应选择色域和相应 gamma (佳露提供) 3. 支持对LUT模块更精细的解析, 预览4个, 拍照2(连鹏提供) 4. 如果Q时光机解析不出来这个Xstyle文件或者是CLT平台,就只绘制拍照的aiRawIspIn和YuvEnhance图 5. 支持目前月亮舞台夕阳逻辑的维测适配 6. 添加预览 dytonesync 的三张维测小图,添加拍照 dytonesync 的 sync_msc_lut, 依赖于 QTimeMachine 解析的维测信息 7. 支持 config.yaml 中自定义配置图像摆放方式 8. 工具使用说明: https://wiki.huawei.com/domains/95501/wiki/163974/WIKI202412315590569 ''' # 数据路径 path0 = r"\\10.185.104.154\Cam_Pub\00.SKP_color_data\1_自测数据\KP长焦\1110-模型验证\QT" path1 = os.path.join(path0, 'simu_2.0.0') # 用于保存解析结果的路径 os.makedirs(path1, exist_ok=True) jpgs = glob(path0 + "/*.jpg") debug = False # 调试 parallel = False # 并行 phone_name = None # None: 根据文件名自动选择; 指定手机可使用: CLS, PLR, PLA,... cam_type = None # None: 根据维测信息自动读取,如果报错 "[error] parse.py LINE 278 ...", 需要自动添加模组匿名化在 config.yaml 的 modules 中,或者指定类型使用:main, wide, tele lightness = 0.9 # lightness 默认为1,可调整图1至图6的亮度, 建议纯色场景使用稍低的值如0.4, 其他普通场景可以设置为0.9, 高动态场景适当调高观察暗区 if debug: parse_image(1, 1, jpgs[0], path1, phone_name, cam_type, lightness) exit() if parallel: para = Parallel(n_jobs=16, backend='multiprocessing') para(delayed(try_parse_image)(idx + 1, len(jpgs), jpg, path1, phone_name, cam_type, lightness) for idx, jpg in enumerate(jpgs)) else: for idx, jpg in enumerate(jpgs): try_parse_image(idx + 1, len(jpgs), jpg, path1, phone_name, cam_type, lightness) 报了这个错误[error] parse.py LINE 278, THERE IS NO MATCHING MODULE NAME, ADD MODULE NAME IN config.yaml,请修改代码,出现这个错误时跳过,继续解析后面的图,不要停止程序
最新发布
11-12
<think> 我们面对的需求是:在解析图片时,如果遇到特定的错误('[error] parse.py LINE 278, THERE IS NO MATCHING MODULE NAME, ADD MODULE NAME IN config.yaml'),就跳过当前图片,继续解析后续的图片。 通常,我们会使用异常处理(try-except)来捕获并处理错误。但是,错误信息是以字符串形式输出的,可能是在代码中通过打印或日志记录输出的,而不是抛出的异常。因此,我们需要先确定这个错误是如何产生的。 根据错误信息,它是在parse.py的第278行打印的,可能是因为某个条件不满足而打印的错误,但程序可能因此抛出异常或者终止。我们需要修改代码,使得当这个错误发生时,我们能够捕获它并跳过当前图片的解析,继续执行。 有两种可能的情况: 1. 这个错误是一个异常(比如自定义异常或内置异常),然后被捕获并打印了错误信息,但程序可能退出或抛出异常导致停止。 2. 这个错误只是打印了一条错误信息,但程序仍然继续运行,不过从描述看,用户说程序停止了,所以很可能是抛出了异常。 用户希望修改代码,在出现这个错误时跳过当前图片,继续解析后面的图片。 因此,我们需要做: 1. 定位到parse.py中第278行附近的代码,看看是什么情况下产生这个错误,以及是否抛出异常。 2. 如果没有抛出异常,只是打印错误并停止(比如通过sys.exit()),那么我们需要修改这部分代码,使其不停止,而是跳过当前图片并继续。 3. 如果抛出异常,那么我们可以通过捕获该异常来实现跳过。 但是,由于我们无法直接看到代码,我们只能假设这个错误是由某个异常导致的。因此,我们可以尝试捕获这个异常(如果有)或者捕获所有异常,然后检查错误信息中是否包含指定的字符串,如果是,则跳过当前图片。 然而,捕获所有异常并不推荐,因为可能会掩盖其他错误。最佳实践是如果这个错误是由特定的异常类型引起的,我们就捕获该特定异常。但如果我们无法确定,或者这个错误信息是由我们自己定义的,那么我们可以考虑在打印这个错误的地方修改代码,将其改为抛出一个自定义异常,然后在主循环中捕获这个自定义异常。 但是,用户可能无法修改错误产生的代码(或者不想修改原错误产生点),那么我们可以用以下方法: 在调用解析图片的代码处,使用try-except块,然后在except中检查错误信息(如果这个错误是以异常形式出现的,我们可以通过捕获Exception并检查其字符串),如果匹配我们指定的错误信息,就跳过。 例如: 原代码可能是这样循环的: for image in images: parse_image(image) 我们可以改为: for image in images: try: parse_image(image) except Exception as e: if "THERE IS NO MATCHING MODULE NAME" in str(e): # 跳过这个图片,继续下一个 continue else: # 其他异常,可以选择继续抛出或者处理 raise e # 或者记录日志等 但是,请注意,如果这个错误并不是以异常形式抛出的,而是通过打印日志然后调用sys.exit()之类的函数退出的,那么try-except就捕获不到。这种情况下,我们需要修改产生错误的那部分代码(即parse.py的第278行附近的代码),将其改为抛出异常。 由于用户要求修改代码以在出现指定错误时跳过,并且用户没有提供完整的代码,我们只能给出两种建议: 情况1:如果错误是通过抛出异常实现的(比如raise Exception('[error]...')),那么我们可以按照上面的方法捕获异常并跳过。 情况2:如果错误只是打印然后退出,那么我们需要修改产生错误的代码部分,将其改为抛出异常,然后再捕获。 但是,根据错误信息,它打印了错误,并且程序停止了,所以很可能是调用了sys.exit()或者抛出了未被捕获的异常。如果它是未被捕获的异常,那么我们的try-except可以捕获到。如果是调用了sys.exit(),那么我们需要修改产生错误的那段代码,避免调用sys.exit()。 由于我们不清楚代码的具体实现,这里提供两种方案: 方案一(推荐,如果能够修改错误产生的代码): 1. 找到parse.py中打印这个错误的地方(第278行附近),将原来的错误处理改为抛出自定义异常,例如: 原代码可能是: if module_name not in config: print('[error] parse.py LINE 278, THERE IS NO MATCHING MODULE NAME, ADD MODULE NAME IN config.yaml') sys.exit(1) 修改为: if module_name not in config: raise CustomException('[error] parse.py LINE 278, THERE IS NO MATCHING MODULE NAME, ADD MODULE NAME IN config.yaml') 然后定义一个CustomException(或者直接使用Exception,但自定义更好)。 2. 在调用解析图片的循环中,捕获这个CustomException,然后跳过当前图片。 方案二(如果不能修改错误产生的代码,或者错误是第三方代码产生的): 我们可以通过重定向标准错误或者使用上下文管理器来捕获打印的错误信息,但是这样比较复杂,而且可能会影响性能,且不可靠。 因此,我们优先考虑方案一。但是,如果用户不能修改错误产生的代码,我们可以尝试在调用解析函数的地方,通过检查函数的返回值或者全局状态来判断是否发生了错误。但根据错误信息,程序会停止,所以没有返回值。 所以,我们假设用户能够修改产生错误的代码部分。 具体步骤: 步骤1:在代码中定义自定义异常(可以在文件顶部): class SkipImageException(Exception): pass 步骤2:在parse.py中第278行附近,将原来的错误退出改为抛出SkipImageException: 原错误处理代码(假设): if module_name not in config: print('[error] parse.py LINE 278, THERE IS NO MATCHING MODULE NAME, ADD MODULE NAME IN config.yaml') sys.exit(1) 修改为: if module_name not in config: # 可以打印错误,也可以不打印,因为后面捕获异常时可能会打印日志 # 或者我们直接抛出异常,然后在上层捕获时再打印 raise SkipImageException('[error] parse.py LINE 278, THERE IS NO MATCHING MODULE NAME, ADD MODULE NAME IN config.yaml') 步骤3:在解析图片的循环中,捕获SkipImageException,并跳过当前图片。 例如,假设主循环如下: images = [...] # 图片列表 for image in images: try: parse_image(image) # 这里面会调用到可能出错的地方 except SkipImageException as e: print(f"跳过图片 {image},原因:{e}") # 可以选择记录日志,然后继续 continue 这样,当遇到这个错误时,就会跳过当前图片,继续解析后面的图片。 如果用户不能修改parse.py(比如它是外部库),那么这种方法就不行。但根据错误信息,它出现在parse.py,所以很可能是用户自己的代码。 如果用户不能修改parse.py,那么可能需要使用其他方法,比如使用子进程执行解析单个图片的任务,然后检查子进程的输出,但这会非常复杂,而且效率低。 因此,我们假设用户可以修改parse.py。 总结修改方法: 1. 在parse.py中定义自定义异常(或者在调用parse.py的代码所在文件中定义,然后在parse.py中导入)。 2. 修改parse.py中第278行附近的代码,将错误退出改为抛出异常。 3. 在主循环中捕获这个异常,跳过当前图片。 注意:如果parse.py中多处需要这个异常,可以统一使用这个自定义异常。 如果用户需要代码示例,我们可以提供: 在parse.py中(假设在顶部): class SkipImageException(Exception): pass 然后在第278行附近: from . import SkipImageException # 或者根据导入路径调整 # ... 原来的代码 if module_name not in config: raise SkipImageException('THERE IS NO MATCHING MODULE NAME, ADD MODULE NAME IN config.yaml') 在主程序(调用parse.py的代码)中: from parse import SkipImageException # 导入自定义异常 images = get_images() # 获取图片列表 for img in images: try: parse_image(img) # 调用解析函数 except SkipImageException as e: print(f"跳过图片 {img},错误信息:{e}") except Exception as e: print(f"解析图片 {img} 时发生未知错误:{e}") # 根据需求决定是否继续 # 如果希望继续,则使用continue continue # 或者可以选择记录后继续 这样,当出现指定的错误时,就会跳过当前图片,继续执行。 如果用户希望不修改parse.py文件,那么可能需要使用一些技巧,但这里不推荐,因为那样会使代码变得复杂且脆弱。 因此,我们建议用户按照上述方案修改parse.py和主程序代码。 注意:由于用户没有提供具体的代码结构,以上代码仅为示例,需要根据实际代码进行调整。 如果用户无法定位到错误产生的代码位置,或者需要更具体的帮助,请提供更多信息。 相关问题: 1. 如何在Python中自定义异常? 2. 在Python中,如何处理多个不同的异常? 3. 除了使用异常,还有其他方式在遇到错误时跳过当前循环继续执行吗? </think> 根据您的需求,当出现指定错误时跳过当前图片解析并继续执行程序,以下是修改建议: ### 修改方案 在解析图片的主循环中添加异常处理机制,捕获特定错误信息: ```python import traceback # 图片解析主循环 def parse_images(image_list): for image in image_list: try: # 调用解析单个图片的函数 result = parse_single_image(image) process_result(result) except Exception as e: error_msg = traceback.format_exc() # 检查是否为指定错误 if "THERE IS NO MATCHING MODULE NAME" in error_msg: print(f"⚠️ 跳过图片 {image},原因: {str(e)}") continue # 跳过当前图片继续执行 else: # 其他异常处理 print(f"❌ 严重错误,停止程序: {error_msg}") raise # 重新抛出异常终止程序 # 示例解析函数 def parse_single_image(image_path): # 这里是您的解析逻辑... # 如果出现配置错误会抛出异常 if not validate_config(image_path): raise RuntimeError( "[error] parse.py LINE 278, " "THERE IS NO MATCHING MODULE NAME, " "ADD MODULE NAME IN config.yaml" ) # ...其他解析代码 ``` ### 关键修改说明 1. **异常捕获机制**: - 使用 `try-except` 块捕获解析过程中的所有异常 - 通过 `traceback.format_exc()` 获取完整错误信息 2. **错误识别处理**: - 检查错误信息中是否包含 `"THERE IS NO MATCHING MODULE NAME"` - 匹配到指定错误时: * 打印警告信息 `print(f"⚠️ 跳过图片 {image}...")` * 使用 `continue` 跳过当前循环项[^1] 3. **错误隔离**: - 其他未指定的异常会触发程序终止 - 使用 `raise` 重新抛出异常保证程序安全退出[^2] 4. **日志记录**: - 明确区分跳过警告(⚠️)和终止错误() - 保留原始错误信息便于后续调试 ### 注意事项 1. 确保 `parse_single_image()` 函数在遇到配置错误时确实抛出异常 2. 错误信息字符串匹配需完全一致(包括大小写和标点) 3. 建议在 `config.yaml` 中添加默认模块配置项,减少此类错误发生[^3] 4. 对于重要业务场景,可添加错误图片记录功能: ```python with open("skipped_images.log", "a") as f: f.write(f"{image} | {str(e)}\n") ```
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值