import os
from glob import glob
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from joblib import Parallel, delayed
from PIL import Image, ImageDraw, ImageFont
from utils.function import (XYZ2BT2020_MAT, XYZ2P3_MAT, XYZ2SRGB_MAT,
adjust_saturation_hisi_CA, get_cross_dimension)
from utils.gamma_opt import gamma_hlg, gamma_ns
from utils.parse import LSC_OSR, HistoryFrame, ImageInfo, QImageInfo
from utils.xstyle_ce_opt import CESimulator
from utils.xstyle_lut_opt import apply_lut3d
matplotlib.use("Agg")
os.chdir(os.path.dirname(__file__))
def process(image_info: ImageInfo, qimage_info: QImageInfo, lightness:float):
picture_dict = {}
# 1 HVM mscStats @ msc2xyz @ caMat @ LCH @ gamma
simu_msc2xyz = (get_cross_dimension(image_info.msc_golden_raw.clip(0, None)) @ image_info.msc2xyz_mat).reshape(-1, 3)
simu_msc2xyz = adjust_saturation_hisi_CA(simu_msc2xyz, image_info.hvm_xy1)
simu_msc2xyz /= (simu_msc2xyz.max() * lightness)
simu_msc2xyz = (simu_msc2xyz @ image_info.color_space_trans_mat).clip(0, 1).reshape(48, 64, 3)
simu_msc2xyz_nonlinear = image_info.gamma(simu_msc2xyz).clip(0, 1)
picture_dict["p1"] = np.rot90(simu_msc2xyz_nonlinear, image_info.config['msc_rotation']) # 考虑 MSC 倒装的情况
picture_dict['p1_name'] = 'P1 MSC目标效果(HVM)'
picture_dict['p1_description'] = 'mscStats @ msc2xyz @ caMat @ LCH @ gamma'
# if image_info.cam_type == 'tele':
# msc_xyz_pipe = (image_info.msc_xyz_pipe @ image_info.color_space_trans_mat).clip(0, 1).reshape(48, 64, 3)
# pipe_msc2xyz_nonlinear = image_info.gamma(msc_xyz_pipe).clip(0, 1)
# if np.sum(msc_xyz_pipe) > 0:
# picture_dict["p1"] = np.rot90(pipe_msc2xyz_nonlinear, image_info.config['msc_rotation']) # 考虑 MSC 倒装的情况
# simu_msc2xyz = msc_xyz_pipe
# 2 CT 3*3 mscStats @ msc2cam @ baseCT_cam2XYZ @ gamma
simu_msc2cam_CT = (get_cross_dimension(
(image_info.msc_golden_raw @ image_info.msc2cam_basect).clip(0, None).reshape(-1, 3)) @ image_info.cam2std_basect).clip(0, None)
simu_msc2cam_CT = (simu_msc2cam_CT @ image_info.color_space_trans_mat).clip(0, 1).reshape(48, 64, 3)
simu_msc2cam_CT *= (simu_msc2xyz.mean() / simu_msc2cam_CT.mean())
simu_msc2cam_CT_nonlinear = image_info.gamma(simu_msc2cam_CT).clip(0, 1)
# picture_dict["p2_divcc3x3"] = divide_matrix((image_info.cam2std @ image_info.ca_mat.T @ image_info.color_space_trans_mat).T)
if len(image_info.history_frame.face_pos_left_top_x) > 0:
try:
coordinate = [image_info.history_frame.face_pos_left_top_x[-1] / 2, image_info.history_frame.face_pos_left_top_y[-1] / 2,
image_info.history_frame.face_pos_width[-1] / 2, image_info.history_frame.face_pos_height[-1] / 2]
h1, h2 = int((coordinate[1] - 14) / 7), int((coordinate[1] + coordinate[3] - 14) / 7)
w1, w2 = int((coordinate[0] - 20) / 7), int((coordinate[0] + coordinate[2] - 20) / 7)
simu_msc2cam_CT_nonlinear[h1 : h2 + 0, w1] = [1, 0, 0]
simu_msc2cam_CT_nonlinear[h1 : h2 + 0, w2] = [1, 0, 0]
simu_msc2cam_CT_nonlinear[h1, w1 : w2 + 0] = [1, 0, 0]
simu_msc2cam_CT_nonlinear[h2, w1 : w2 + 0] = [1, 0, 0]
#---# 人脸区域提亮处理
# simu_msc2cam_CT_nonlinear[h1:h2, w1:w2] = (simu_msc2cam_CT_nonlinear[h1:h2, w1:w2] * 1.5).clip(0, 1)
simu_msc2cam_CT_nonlinear[h1:h2, w1:w2] /= (simu_msc2cam_CT_nonlinear[h1 + 0 : h2, w1 + 0 : w2].max() / 1.5)
except:
pass
picture_dict["p2"] = np.rot90(simu_msc2cam_CT_nonlinear, image_info.config['msc_rotation']) # 考虑 MSC 倒装的情况
picture_dict['p2_name'] = 'P2 虚拟CAM,baseCT效果'
picture_dict['p2_description'] = '(mscStats @ msc2cam) @ cam2xyz @ gamma'
# 3 baseCT 3*6 camStats @ baseCT_cam2XYZ @ gamma
simu_baseCT = ((get_cross_dimension(image_info.cam_golden_raw.clip(0, None)) @ image_info.cam2std_basect).clip(0, None))
simu_baseCT = (simu_baseCT @ image_info.color_space_trans_mat).clip(0, 1).reshape(64, 64, 3)
simu_baseCT *= (simu_msc2xyz.mean() / simu_baseCT.mean())
simu_baseCT_nonlinear = image_info.gamma(simu_baseCT).clip(0, 1)
picture_dict["p3"] = simu_baseCT_nonlinear
picture_dict["p3_name"] = "P3 真实CAM,baseCT效果"
picture_dict["p3_description"] = "camGoldenStats @ cam2xyz_baseCT @ gamma"
# 3 baseCT 3*6 camStats @ baseCT_cam2XYZ @ gamma: test
# curr_cam2std_basect = np.array([0.70212766,0.14216634,-0.1615087,
# -0.11035156,0.67480469,-0.89160156,
# 0.35798817, -0.13510848, 2.55325444, 0.05150692, 0.18464747,
# -0.4120554 , 0.16797676, 0.01757896, 0.67288365,
# 0.0637888 ,-0.03140371, 1.26203678]).reshape(6, 3)
# simu_baseCT = ((get_cross_dimension(image_info.cam_golden_raw.clip(0, None)) @ curr_cam2std_basect).clip(0, None))
# # print(image_info.cam2std_basect)
# simu_baseCT = (simu_baseCT @ image_info.color_space_trans_mat).clip(0, 1).reshape(64, 64, 3)
# simu_baseCT *= (simu_msc2xyz.mean() / simu_baseCT.mean())
# simu_baseCT_nonlinear = image_info.gamma(simu_baseCT).clip(0, 1)
# picture_dict["p3"] = simu_baseCT_nonlinear
# picture_dict["p3_name"] = "P3 真实CAM,baseCT效果,Jock"
# picture_dict["p3_description"] = "camGoldenStats @ cam2xyz_baseCT @ gamma"
# 4 refCT camGoldenStats @ cam2std_refCT @ gamma
cam_refCT = (get_cross_dimension(image_info.cam_golden_raw.clip(0, None).reshape(-1, 3)) @ image_info.cam2std_refct).clip(0, None)
cam_refCT = (cam_refCT @ image_info.color_space_trans_mat).clip(0, 1).reshape(64, 64, 3)
cam_refCT *= (simu_msc2xyz.mean() / cam_refCT.mean())
cam_refCT_nonlinear = image_info.gamma(cam_refCT).clip(0, 1)
picture_dict["p4"] = cam_refCT_nonlinear
picture_dict["p4_name"] = "P4 真实CAM,refCT效果"
picture_dict["p4_description"] = "camGoldenStats @ cam2xyz_refCT @ gamma"
# 5 带滤波cam效果 camStats @ pipe_awb_filter @ algo_hdcc @ gamma
cam_filter = get_cross_dimension((image_info.cam_raw.reshape(-1, 3) * image_info.pipe_awb_filter).clip(0, 1))
cam_filter = (cam_filter @ image_info.algo_ccm).reshape(64, 64, 3).clip(0, 1)
cam_filter *= (simu_msc2xyz.mean() / cam_filter.mean())
cam_filter_nonlinear = image_info.gamma(cam_filter).clip(0, 1)
picture_dict["p5"] = cam_filter_nonlinear
picture_dict['p5_name'] = 'P5 真实CAM,ISP滤波'
picture_dict['p5_description'] = 'camStats @ pipe_awb_filter @ algo_hdcc @ gamma'
# 6 带ccgm的cam效果 camStats @ pipe_awb_filter @ pipe_hdcc @ gamma
cam_ccgm = get_cross_dimension((image_info.cam_raw.reshape(-1, 3) * image_info.pipe_awb_filter).clip(0, 1))
cam_ccgm = (cam_ccgm @ image_info.pipe_ccm).reshape(64, 64, 3).clip(0, 1)
cam_ccgm *= (simu_msc2xyz.mean() / cam_ccgm.mean())
cam_ccgm_nonlinear = image_info.gamma(cam_ccgm).clip(0, 1)
picture_dict["p6"] = cam_ccgm_nonlinear
picture_dict["p6_name"] = 'P6 真实CAM,CCGM后'
picture_dict["p6_description"] = 'camStats @ pipe_awb_filter @ pipe_hdcc @ gamma'
xstyle_ce_opt = CESimulator()
# 预览
# 1. xstyle on: 预览BE + sync-lut + xstyle-lut + xstyle-ce
# 2. xstyle off: 预览BE + sync-lut + real-lut + xstyle-ce
if qimage_info.preview_be is not None:
picture_dict['p7'] = qimage_info.preview_be
picture_dict['p7_name'] = 'P7 预览,LUT前的效果'
picture_dict['p7_description'] = 'rawNR(rdp) + awb + hdcc + aitm'
if qimage_info.lut_sync is not None:
preview_be_synclut = apply_lut3d(qimage_info.preview_be, qimage_info.lut_sync)
picture_dict['p8'] = preview_be_synclut
picture_dict['p8_name'] = 'P7 预览,DytoneSync效果'
picture_dict['p8_description'] = 'P7 + sync-lut'
if qimage_info.xstyle_enable and qimage_info.xstyle_enable != 'unknown':
if qimage_info.lut_xstyle is not None:
preview_be_xstylelut = apply_lut3d(qimage_info.preview_be, qimage_info.lut_xstyle)
picture_dict['p9'] = preview_be_xstylelut
picture_dict['p9_name'] = 'P8 预览,XStyle-Lut效果'
picture_dict['p9_description'] = 'P7 + xstyle-lut'
else:
if qimage_info.lut_real is not None:
preview_be_reallut = apply_lut3d(qimage_info.preview_be, qimage_info.lut_real)
picture_dict['p9'] = preview_be_reallut
picture_dict['p9_name'] = 'P8 预览,真实性Lut效果'
picture_dict['p9_description'] = 'P7 + real-lut'
if qimage_info.lut_pre is not None:
preview_be_lut = apply_lut3d(qimage_info.preview_be, qimage_info.lut_pre)
picture_dict['p10'] = preview_be_lut
picture_dict['p10_name'] = 'P10 预览,final-lut效果'
picture_dict['p10_description'] = 'P7 + final-lut'
if qimage_info.xstyle_enable and qimage_info.xstyle_enable != 'unknown' and qimage_info.scene_type not in ['flower', 'plants']:
_preview_be_lut_ce = xstyle_ce_opt.forward(qimage_info.ce_data, preview_be_lut, None)
picture_dict['p11'] = _preview_be_lut_ce # 预览 + final_lut + xstyle_yuv
picture_dict['p11_name'] = 'P11 预览,XStyle后的效果'
picture_dict['p11_description'] = 'P10 + xstyle-ce'
# 预览 dytonesync
if qimage_info.dytonesync_ref is not None:
picture_dict['p12'] = qimage_info.dytonesync_ref # 输入dytoneSync的红枫小图, cap/sdr -> P3,CUVA -> BT2020
picture_dict['p12_name'] = 'P12 MSC目标效果(DytoneSync)'
picture_dict['p12_description'] = 'mscRaw + ...(Preprocess)'
picture_dict['p13'] = qimage_info.dytonesync_input # 输入dytoneSync的预览小图, cap/sdr -> P3,CUVA -> BT2020
picture_dict['p13_name'] = 'P13 预览(小),DytoneSync前'
picture_dict['p13_description'] = 'P7 + ...(Preporcess)'
_dytonesync_input_synclut = apply_lut3d(qimage_info.dytonesync_input, qimage_info.lut_sync)
picture_dict['p14'] = _dytonesync_input_synclut
picture_dict['p14_name'] = 'P14 预览(小),DytoneSync后'
picture_dict['p14_description'] = 'P13 + sync-lut'
# 拍照:
# 1. 没有打开拍照dytonesync
# 未进入Net2:拍照BE + lutcap + xstyle-ce
# 进入Net2:拍照BE + Net2
# 2. 打开拍照dytonesync
# 未进入Net2:拍照BE + sync-lut(cap) + lutpre + xstyle-ce
# 进入Net2:拍照BE + sync-lut(cap) + sync-lut(pre) + Net2
if qimage_info.capture_be is not None:
picture_dict["p15"] = qimage_info.capture_be
picture_dict["p15_name"] = 'P15 拍照,AirawIspIn的效果'
picture_dict['p15_description'] = 'MEF + awb + hdcc + aitone-net1'
if qimage_info.dycap_merged_lut is not None:
if 'net2yuv' in qimage_info.model_name_version:
capture_be_mergelut = apply_lut3d(qimage_info.capture_be, qimage_info.dycap_merged_lut)
picture_dict['p16'] = capture_be_mergelut
picture_dict['p16_name'] = 'P16 拍照,DytoneSync红枫效果'
picture_dict['p16_description'] = 'P15 + sync-msc-lut'
else:
capture_be_synclut = apply_lut3d(qimage_info.capture_be, qimage_info.dycap_sync_lut)
picture_dict['p16'] = capture_be_synclut
picture_dict['p16_name'] = 'P16 拍照,DytoneSync预览效果'
picture_dict['p16_description'] = 'P15 + sync-pre-lut'
if qimage_info.lut_pre is not None:
capture_be_synclut_lut = apply_lut3d(capture_be_synclut, qimage_info.lut_pre)
picture_dict['p17'] = capture_be_synclut_lut
picture_dict['p17_name'] = 'P17 拍照,final-lut效果'
picture_dict['p17_description'] = 'P15 + final-lut'
if qimage_info.xstyle_enable and qimage_info.xstyle_enable != 'unknown' and qimage_info.scene_type not in ['flower', 'plants']:
capture_be_synclut_lut_ce = xstyle_ce_opt.forward(qimage_info.ce_data, capture_be_synclut_lut, None)
picture_dict['p18'] = capture_be_synclut_lut_ce
picture_dict['p18_name'] = 'P18 拍照,XStyle后的效果'
picture_dict['p18_description'] = 'P17 + xstyle-ce'
else:
if 'net2yuv' not in qimage_info.model_name_version and qimage_info.lut_cap is not None:
capture_be_lut = apply_lut3d(qimage_info.capture_be, qimage_info.lut_cap)
picture_dict['p17'] = capture_be_lut
picture_dict['p17_name'] = 'P17 拍照,final-lut效果'
picture_dict['p17_description'] = 'P15 + final-lut'
if qimage_info.xstyle_enable and qimage_info.xstyle_enable != 'unknown' and qimage_info.scene_type not in ['flower', 'plants']:
capture_be_lut_ce = xstyle_ce_opt.forward(qimage_info.ce_data, capture_be_lut, None)
picture_dict['p18'] = capture_be_lut_ce # capture + final_lut + xstyle_yuv
picture_dict['p18_name'] = 'P18 拍照,XStyle后的效果'
picture_dict['p18_description'] = 'P17 + xstyle-ce'
# RGB2YUV 出图
if qimage_info.captureOut is not None:
picture_dict["p16"] = qimage_info.captureOut
picture_dict['p16_name'] = 'P16 拍照,airawRgb2yuvOut的效果'
picture_dict['p16_description'] = 'P15 + RGB2YUV Process'
# 拍照 net2 出图
if qimage_info.rgb_net2 is not None:
picture_dict["p18"] = qimage_info.rgb_net2
picture_dict['p18_name'] = 'P18 拍照,Net2/LUT3D后的效果'
picture_dict['p18_description'] = 'P16 + net2/LUT3D, or rather, YuvEnhance or FaceSr'
return picture_dict
def process_line_graph(lo: LSC_OSR, hf: HistoryFrame):
plt.figure(figsize=[12, 10])
# LSC-OSR draw osr_spectral lsc_spectral compare
lp1 = plt.subplot2grid((4, 2), (0, 0), rowspan=2)
lp1.plot(np.linspace(380, 780, 81), lo.lsc_spectral, linewidth=2, color="#FF8C00")
lp1.plot(np.linspace(380, 780, 81), lo.osr_spectral, linewidth=2, color="#008B8B")
lp1.set_xlabel("wavelength, lightType = {:.2f}, {:.2f}, {:.2f}".format(lo.osr_weight[0], lo.osr_weight[1], lo.osr_weight[2]))
lp1.legend(["lsc spectra", "osr spectra"])
# 历史帧msc ae
lp2 = plt.subplot2grid((4, 2), (0, 1))
x = np.linspace(1, hf.n_frame, hf.n_frame)
lp2.plot(x, hf.expo / (hf.expo.max() + 0.001), linewidth=1, marker=".", markersize=10,
color="#FF8C00")
lp2.plot(x, hf.iso / (hf.iso.max() + 0.001), linewidth=1, marker=".", markersize=10,
color="#008B8B")
lp2.plot(x, hf.lv / (hf.lv.max() + 0.001), linewidth=1, marker=".", markersize=10, color="#6A5ACD")
lp2.legend(["expo", "iso", "lv"])
lp2.set_xlabel(
f"mscAE: expo_max = {hf.expo.max()}, iso_max = {hf.iso.max()}, lv_max = {hf.lv.max()}")
# 历史帧msc2cam
lp3 = plt.subplot2grid((4, 2), (1, 1))
lp3.plot(x, hf.msc2main, linewidth=1, marker=".", markersize=10, color="#FF8C00")
lp3.plot(x, hf.msc2wide, linewidth=1, marker=".", markersize=10, color="#008B8B")
lp3.plot(x, hf.msc2tele, linewidth=1, marker=".", markersize=10, color="#6A5ACD")
lp3.set_xlabel("baseCT: msc2cam first element")
lp3.legend(["main", "wide", "tele"])
# 历史帧 大直方图均值 小直方图高亮区域的当前均值和target
# lp3 = plt.subplot2grid((4, 2), (1, 1))
# lp3.plot(np.linspace(1, len(hf.avg_val), len(hf.avg_val)), hf.avg_val, linewidth=1, marker=".", markersize=10, color="#FF8C00")
# lp3.plot(np.linspace(1, len(hf.cur_lum), len(hf.cur_lum)), hf.cur_lum, linewidth=1, marker=".", markersize=10, color="#008B8B")
# lp3.plot(np.linspace(1, len(hf.tar_lum), len(hf.tar_lum)), hf.tar_lum, linewidth=1, marker=".", markersize=10, color="#6A5ACD")
# lp3.legend(["histAveragevalue", "histCurlum", "histTarlum"])
# lp3.set_xlabel("mscAE: histAveragevalue, histCurlum, histTarlum")
# 历史帧hvm XZ 计算cct duv
lp4 = plt.subplot2grid((4, 2), (2, 0))
Y = np.array([1.00] * hf.hvm_cct.shape[0])
lp4.plot(x, hf.hvm_cct / hf.hvm_cct.max(), linewidth=1, marker=".", markersize=10, color="#FF8C00")
lp4.plot(x, hf.hvm_duv / hf.hvm_duv.max(), linewidth=1, marker=".", markersize=10, color="#008B8B")
lp4.legend(["cct", "duv"])
lp4.set_xlabel(f"HVM: cct_max = {hf.hvm_cct.max():.1f}, duv_max = {np.abs(hf.hvm_duv).max():.4f}")
# 历史帧refCT
lp5 = plt.subplot2grid((4, 2), (3, 0))
lp5.plot(x, hf.refct_l, linewidth=1, marker=".", markersize=10, color="#FF8C00")
lp5.plot(x, hf.refct_m, linewidth=1, marker=".", markersize=10, color="#008B8B")
lp5.plot(x, hf.refct_s, linewidth=1, marker=".", markersize=10, color="#6A5ACD")
lp5.legend(["l", "m", "s"])
lp5.set_xlabel("refCT: lms gain")
# 历史帧awb
lp6 = plt.subplot2grid((4, 2), (2, 1))
x = np.linspace(1, hf.awb_rgbg.shape[0], hf.awb_rgbg.shape[0])
lp6.plot(x, hf.awb_rgbg[:, 0], linewidth=1, marker="o", markerfacecolor='w', color="#FF8C00")
lp6.plot(x, hf.awb_rgbg_filtered[:, 0], linewidth=1, marker=".", markersize=10, color="#008B8B")
lp6.legend(["before", "after"], prop={"size": 8})
lp6.set_xlabel("SIA filter - Rgain")
lp7 = plt.subplot2grid((4, 2), (3, 1))
lp7.plot(x, hf.awb_rgbg[:, 1], linewidth=1, marker="o", markerfacecolor='w', color="#FF8C00")
lp7.plot(x, hf.awb_rgbg_filtered[:, 1], linewidth=1, marker=".", markersize=10, color="#008B8B")
lp7.legend(["before", "after"], prop={"size": 8})
lp7.set_xlabel("SIA filter - Bgain")
plt.tight_layout()
line_picture = plt.gcf()
line_picture.canvas.draw()
line = np.array(line_picture.canvas.renderer.buffer_rgba())[...,0:3]
plt.close()
return line
def draw(jpg_path, save_path, picture_dict: dict, image_info: ImageInfo, qimage_info: QImageInfo):
# 读取原始图像
image = plt.imread(jpg_path)[::3, ::3]
# 色域转换
if "hdrvivid" in image_info.pq_info:
image_linear = gamma_hlg(image / 255.0, degamma=True)
image_xyz = image_linear @ np.linalg.inv(XYZ2P3_MAT)
image_linear_2020 = image_xyz @ XYZ2BT2020_MAT
image = (image_info.gamma(image_linear_2020) * 255.0).astype('uint8')
elif "standardvideo" in image_info.pq_info:
image_linear = gamma_ns(image / 255.0, degamma=True)
image_xyz = image_linear @ np.linalg.inv(XYZ2P3_MAT)
image_linear_srgb = image_xyz @ XYZ2SRGB_MAT
image = (image_info.gamma(image_linear_srgb) * 255.0).astype('uint8')
else:
image = image
tiny_image_rot = True if image.shape[0] > image.shape[1] else False
# 基础参数配置
h, w, c = 1451, 1931, 3
hist_h, hist_w = 1120, 1340
tiny_row, tiny_col = len(image_info.config['pic_plot']), max(len(_) for _ in image_info.config['pic_plot'])
tiny_row_gap, tiny_col_gap = 60, 5
tiny_w = ((w + hist_w) - (tiny_col - 1) * tiny_col_gap) // tiny_col
tiny_h = int(tiny_w / 1.33)
imageLarge = (np.ones([h + (tiny_h + tiny_row_gap) * tiny_row, w + hist_w, c])).astype(np.uint8)
font = ImageFont.truetype("arial.ttf", size=15) # 设置字体样式及大小
font_c = ImageFont.truetype("simsun.ttc", size=25)
color = (255, 255, 0) # 文本颜色(RGB格式)
#
image = cv2.resize(image, (w, h))
imageLarge[:h, :w] = image
# 曲线图
imageLarge[0: hist_h, w : w + hist_w ] = cv2.resize(picture_dict['line'], (hist_w, hist_h))
def paste_tiny_image(r, c, i):
tiny_img: np.ndarray = picture_dict.get(f"p{i}", None)
if tiny_img is not None and not np.isnan(tiny_img).any():
if qimage_info.res_rotation != "unknown":
if qimage_info.res_rotation == 14:
tiny_img = np.rot90(tiny_img, -3)
else:
tiny_img = np.rot90(tiny_img, -qimage_info.res_rotation // 90)
else:
if tiny_image_rot:
tiny_img = np.rot90(tiny_img, -1)
tiny_img = cv2.resize(tiny_img.clip(0, 1), (tiny_w, tiny_h)).clip(0, 1)
imageLarge[
h + (r - 1) * (tiny_h + tiny_row_gap): h + (r - 1) * (tiny_h + tiny_row_gap) + tiny_h,
(tiny_w + tiny_col_gap) * (c - 1): (tiny_w + tiny_col_gap) * (c - 1) + tiny_w,
] = (tiny_img * 255).astype(np.uint8)
# def paste_tiny_image(r, c, i):
# tiny_img: np.ndarray = picture_dict.get(f'p{i}', None)
# if tiny_img is not None and not np.isnan(tiny_img).any():
# if tiny_image_rot:
# tiny_img = np.rot90(tiny_img, -1)
# tiny_img = cv2.resize(tiny_img.clip(0, 1), (tiny_w, tiny_h)).clip(0, 1)
# imageLarge[h + (r - 1) * (tiny_h + tiny_row_gap) : h + (r - 1) * (tiny_h + tiny_row_gap) + tiny_h , (tiny_w + tiny_col_gap) * (c - 1): (tiny_w + tiny_col_gap) * (c - 1) + tiny_w] = (tiny_img * 255).astype(np.uint8)
for r in range(0, tiny_row):
for c, i in enumerate(image_info.config['pic_plot'][r]):
paste_tiny_image(r + 1, c + 1, i)
imageLarge = Image.fromarray(imageLarge)
draw = ImageDraw.Draw(imageLarge)
# 在图像上添加文本
def draw_tiny_text(s1: str, s2: str, r, c):
draw.text(((tiny_w + tiny_col_gap) * (c - 1) + tiny_w // 2 - draw.textlength(s1, font=font_c) // 2, h + (tiny_h + tiny_row_gap) * (r - 1) + tiny_h + 2), s1, font=font_c, color=color, fill=color)
draw.text(((tiny_w + tiny_col_gap) * (c - 1) + tiny_w // 2 - draw.textlength(s2, font=font) // 2, h + (tiny_h + tiny_row_gap) * (r - 1) + tiny_h + 32), s2, font=font, color=color, fill=color)
for r in range(0, tiny_row):
for c, i in enumerate(image_info.config['pic_plot'][r]):
draw_tiny_text(picture_dict.get(f'p{i}_name', ''), picture_dict.get(f'p{i}_description', ''), r + 1, c + 1)
draw.text((10, 10), os.path.basename(jpg_path)[:-4], font=ImageFont.truetype("simsun.ttc", size=50), color=color, fill=color)
text1 = (
"""Camera: {:<4} -- ZOOM: {:<4} -- AI_enable: {:<4} -- AiType: {:<4} -- Capture_Mode: {:<4} -- XtCodeMode: {:<4}
CAM - {:<12}: expo = {:<6.0f} us, iso = {:<6.0f}, fn = {:<6.2f}, expo_ratio = {:<6.2f}
MSC - {:<12}: expo = {:<6.0f} us, iso = {:<6.0f}, lv = {:<6.0f}, lux = {:<6.2f}, raw_mean = {:6.2f}
{:<70} SIA_D_A_LED_Prob = {:<1d},{:<1d},{:<1d}
Tone_Net2 = {:<58} airaw_type = {:<12}
Tone_Net1 = {:<58} AiRawV2PpAlgo = {:<54}
ppalgo = {:<61} algo_refct_res = {:<4.4f},{:<4.4f},{:<4.4f}
iso_speed = {:<58} pipe_refct_gain = {:<4d},{:<4d},{:<4d}
highSatCardNum = {:<.0f},{:<.0f},{:<.0f}{:3}redHighSatCnt = {:<.0f},{:<.0f},{:<.0f}{:3}blueHighSatCnt = {:<.0f},{:<.0f},{:<.0f}{:3}purpleHighSatCnt = {:<.0f},{:<.0f},{:<.0f}{:3}
hvm_cct0 = {:4.0f}K, hvm_duv0 = {:8.4f}{:9}algo_awb = {:<4.0f},{:<4.0f},{:<4.0f}{:14}ccm_com_vector = {:<3d},{:<3d},{:<3d}
hvm_cct1 = {:4.0f}K, hvm_duv1 = {:8.4f}{:9}pipe_awb = {:<4.0f},{:<4.0f},{:<4.0f}{:14}ccm_com_weight = {:<3d},{:<3d},{:<3d}
osr_cct = {:4.0f}K, osr_duv = {:8.4f}{:9}pipe_awb_filter = {:<4d},{:<4d},{:<4d}{:7}bv = {:<3d}
msc2cam = {:8.4f},{:8.4f},{:8.4f}{:10}awb_otp = {:<4d},{:<4d},{:<4d}
msc2xyz = {:8.4f},{:8.4f},{:8.4f}
""".format(
image_info.cam_type, qimage_info.zoom_ratio, str(qimage_info.masterAiEnable), str(qimage_info.masterAiType), str(qimage_info.Capture_Mode), str(qimage_info.XtCodeMode),
image_info.cam_sensor_name, image_info.cam_expo, image_info.cam_iso, image_info.cam_fn, image_info.msc_expo * image_info.msc_iso / (1 + image_info.cam_expo * image_info.cam_iso),
image_info.msc_sensor_name, image_info.msc_expo, image_info.msc_iso, image_info.msc_lv, 2 ** (image_info.msc_lv / 10 - 1), image_info.msc_raw_mean,
'', *image_info.sia_light,
qimage_info.model_name_version, qimage_info.airaw_type,
qimage_info.model_name_version_net1, qimage_info.AiRawV2PpAlgo,
image_info.pq_info[13:], *image_info.algo_refct_result,
','.join([str(i) for i in qimage_info.iso_speed]) if qimage_info.iso_speed else 'unknown', *image_info.refct_gain,
image_info.highSatCardNum[0], image_info.highSatCardNum[1], image_info.highSatCardNum[2], '',
image_info.redHighSatCnt[0], image_info.redHighSatCnt[1], image_info.redHighSatCnt[2], '',
image_info.blueHighSatCnt[0], image_info.blueHighSatCnt[1], image_info.blueHighSatCnt[2], '',
image_info.purpleHighSatCnt[0], image_info.purpleHighSatCnt[1], image_info.purpleHighSatCnt[2], '',
image_info.hvm_cct0, image_info.hvm_duv0, ' ', *[1092 * _ for _ in image_info.algo_awb], ' ', *image_info.ccm_com_vec,
image_info.hvm_cct1, image_info.hvm_duv1, ' ', *[1092 * _ for _ in image_info.pipe_awb], ' ', *image_info.ccm_com_weight,
image_info.lsc_osr.osr_cct, image_info.lsc_osr.osr_duv, ' ', *[int(1092 * _) for _ in image_info.pipe_awb_filter], ' ', image_info.bv,
image_info.msc2cam[0][0], image_info.msc2cam[0][1], image_info.msc2cam[0][2], ' ', *image_info.awb_otp,
image_info.msc2xyz_mat[0][0], image_info.msc2xyz_mat[0][1], image_info.msc2xyz_mat[0][2],
))
draw.text((w, hist_h + 10), text1, font=ImageFont.truetype("consola.ttf", size=22), color=color,fill=color)
imageLarge.save(save_path, quality=100, dpi=(300, 300, 0))
def parse_image(idx, idx_end, image_path, save_path, phone, cam_type, lightness):
print("[{:04d}/{:04d}] {}".format(idx, idx_end, image_path))
image_info = ImageInfo(image_path, phone, cam_type)
qimage_info = QImageInfo(image_path)
picture_dict = process(image_info, qimage_info, lightness)
picture_dict['line'] = process_line_graph(image_info.lsc_osr, image_info.history_frame)
jpg_save_name = os.path.join(save_path, os.path.splitext(os.path.basename(image_path))[0] + '_info.jpg')
draw(image_path, jpg_save_name, picture_dict, image_info, qimage_info)
def try_parse_image(_i,__i, _j, _p, __p, _c, _l):
try:
parse_image(_i,__i, _j, _p, __p, _c, _l)
except Exception as e:
print(e)
if __name__ == "__main__":
'''
☆☆☆ 脚本详情:
1. 运行之前需要拿到 QTimeMachine v1.6.4 以上版本手动解析所有图获取airaw维测信息
2. 支持图像根据ppAlgoInfo自适应选择色域和相应 gamma (佳露提供)
3. 支持对LUT模块更精细的解析, 预览4个, 拍照2个 (连鹏提供)
4. 如果Q时光机解析不出来这个Xstyle文件或者是CLT平台,就只绘制拍照的aiRawIspIn和YuvEnhance图
5. 支持目前月亮舞台夕阳逻辑的维测适配
6. 添加预览 dytonesync 的三张维测小图,添加拍照 dytonesync 的 sync_msc_lut, 依赖于 QTimeMachine 解析的维测信息
7. 支持 config.yaml 中自定义配置图像摆放方式
8. 工具使用说明: https://wiki.huawei.com/domains/95501/wiki/163974/WIKI202412315590569
'''
# 数据路径
path0 = r"\\10.185.104.154\Cam_Pub\00.SKP_color_data\1_自测数据\KP长焦\1110-模型验证\QT"
path1 = os.path.join(path0, 'simu_2.0.0') # 用于保存解析结果的路径
os.makedirs(path1, exist_ok=True)
jpgs = glob(path0 + "/*.jpg")
debug = False # 调试
parallel = False # 并行
phone_name = None # None: 根据文件名自动选择; 指定手机可使用: CLS, PLR, PLA,...
cam_type = None # None: 根据维测信息自动读取,如果报错 "[error] parse.py LINE 278 ...", 需要自动添加模组匿名化在 config.yaml 的 modules 中,或者指定类型使用:main, wide, tele
lightness = 0.9 # lightness 默认为1,可调整图1至图6的亮度, 建议纯色场景使用稍低的值如0.4, 其他普通场景可以设置为0.9, 高动态场景适当调高观察暗区
if debug:
parse_image(1, 1, jpgs[0], path1, phone_name, cam_type, lightness)
exit()
if parallel:
para = Parallel(n_jobs=16, backend='multiprocessing')
para(delayed(try_parse_image)(idx + 1, len(jpgs), jpg, path1, phone_name, cam_type, lightness) for idx, jpg in enumerate(jpgs))
else:
for idx, jpg in enumerate(jpgs):
try_parse_image(idx + 1, len(jpgs), jpg, path1, phone_name, cam_type, lightness)
报了这个错误[error] parse.py LINE 278, THERE IS NO MATCHING MODULE NAME, ADD MODULE NAME IN config.yaml,请修改代码,出现这个错误时跳过,继续解析后面的图,不要停止程序