Add a yes/no flag dimension

Follow below instruction to add a boolean(yes/no  1/0) flag.

First of all, you need a column/field which only contains 1/0, 1 stands for yes/true, 0 stands for no/false.



 

Add a yes/no dimension.

 



 


 Rename YesNo to Refill Flag, then add relationship to all the measure groups on combineclaimvw



 

 

 

After relationship set up, then save and process the cube.



 

 

 

{ "arrowMsgList": [ { "joinType": "OUTER", "arrowConnectMsgList": [ { "left": "id", "right": "model_id" } ], "start": { "x": 460.36248779296875, "y": 338.1999969482422, "id": 1760346037276, "tableName": "model_info" }, "end": { "x": 739.3624877929688, "y": 346.1999969482422, "id": 1760346039708, "tableName": "model_field" } } ], "tableInfoList": [ { "columnInfoList": [ { "columnName": "id", "isNullable": "NO", "columnType": "bigint(20)", "columnComment": "id,自增序列", "tableName": "model_info", "property": "DIMENSION" }, { "columnName": "datasource_id", "isNullable": "YES", "columnType": "bigint(20)", "columnComment": "数据源id", "tableName": "model_info", "property": "DIMENSION" }, { "columnName": "name", "isNullable": "YES", "columnType": "varchar(60)", "columnComment": "模型名称", "tableName": "model_info", "property": "DIMENSION" }, { "columnName": "code", "isNullable": "YES", "columnType": "varchar(20)", "columnComment": "模型编码", "tableName": "model_info", "property": "DIMENSION" }, { "columnName": "description", "isNullable": "YES", "columnType": "varchar(255)", "columnComment": "模型说明", "tableName": "model_info", "property": "DIMENSION" } ], "x": 267.36248779296875, "y": 153.1999969482422, "tableName": "model_info", "id": 1760346037276 }, { "columnInfoList": [ { "columnName": "id", "isNullable": "NO", "columnType": "bigint(20)", "columnComment": "id,自增序列", "tableName": "model_field", "property": "DIMENSION" }, { "columnName": "model_id", "isNullable": "YES", "columnType": "bigint(20)", "columnComment": "模型id", "tableName": "model_field", "property": "DIMENSION" }, { "columnName": "name", "isNullable": "YES", "columnType": "varchar(100)", "columnComment": "字段名称", "tableName": "model_field", "property": "DIMENSION" }, { "columnName": "name_alias", "isNullable": "YES", "columnType": "varchar(100)", "columnComment": "字段名称别名", "tableName": "model_field", "property": "DIMENSION" }, { "columnName": "comment", "isNullable": "YES", "columnType": "varchar(100)", "columnComment": "字段描述", "tableName": "model_field", "property": "DIMENSION" }, { "columnName": "type", "isNullable": "YES", "columnType": "varchar(100)", "columnComment": "字段类型", "tableName": "model_field", "property": "DIMENSION" }, { "columnName": "belong_table", "isNullable": "YES", "columnType": "varchar(100)", "columnComment": "字段归属表", "tableName": "model_field", "property": "DIMENSION" }, { "columnName": "property", "isNullable": "YES", "columnType": "varchar(50)", "columnComment": "字段属性,度量 维度 时间维度", "tableName": "model_field", "property": "DIMENSION" } ], "x": 743.3624877929688, "y": 155.1999969482422, "tableName": "model_field", "id": 1760346039708 } ], "tableFilterList": [ { "id": "1760346037276", "tableName": "model_info", "columnName": "use_flag", "match": "=", "value": "0" }, { "id": "1760346039708", "tableName": "model_field", "columnName": "use_flag", "match": "=", "value": "0" } ] } 请用我这个结构,使用java语言,写出可以拼接成完整sql的代码,需要支持常见的数据库
12-04
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import os import argparse import torch from src.loader import load_images, DataSampler from src.utils import initialize_exp, bool_flag, attr_flag, check_attr from src.model import AutoEncoder, LatentDiscriminator, PatchDiscriminator, Classifier from src.training import Trainer from src.evaluation import Evaluator # parse parameters parser = argparse.ArgumentParser(description='Images autoencoder') parser.add_argument("--name", type=str, default="default", help="Experiment name") parser.add_argument("--img_sz", type=int, default=256, help="Image sizes (images have to be squared)") parser.add_argument("--img_fm", type=int, default=3, help="Number of feature maps (1 for grayscale, 3 for RGB)") parser.add_argument("--attr", type=attr_flag, default="Smiling,Male", help="Attributes to classify") parser.add_argument("--instance_norm", type=bool_flag, default=False, help="Use instance normalization instead of batch normalization") parser.add_argument("--init_fm", type=int, default=32, help="Number of initial filters in the encoder") parser.add_argument("--max_fm", type=int, default=512, help="Number maximum of filters in the autoencoder") parser.add_argument("--n_layers", type=int, default=6, help="Number of layers in the encoder / decoder") parser.add_argument("--n_skip", type=int, default=0, help="Number of skip connections") parser.add_argument("--deconv_method", type=str, default="convtranspose", help="Deconvolution method") parser.add_argument("--hid_dim", type=int, default=512, help="Last hidden layer dimension for discriminator / classifier") parser.add_argument("--dec_dropout", type=float, default=0., help="Dropout in the decoder") parser.add_argument("--lat_dis_dropout", type=float, default=0.3, help="Dropout in the latent discriminator") parser.add_argument("--n_lat_dis", type=int, default=1, help="Number of latent discriminator training steps") parser.add_argument("--n_ptc_dis", type=int, default=0, help="Number of patch discriminator training steps") parser.add_argument("--n_clf_dis", type=int, default=0, help="Number of classifier discriminator training steps") parser.add_argument("--smooth_label", type=float, default=0.2, help="Smooth label for patch discriminator") parser.add_argument("--lambda_ae", type=float, default=1, help="Autoencoder loss coefficient") parser.add_argument("--lambda_lat_dis", type=float, default=0.0001, help="Latent discriminator loss feedback coefficient") parser.add_argument("--lambda_ptc_dis", type=float, default=0, help="Patch discriminator loss feedback coefficient") parser.add_argument("--lambda_clf_dis", type=float, default=0, help="Classifier discriminator loss feedback coefficient") parser.add_argument("--lambda_schedule", type=float, default=500000, help="Progressively increase discriminators' lambdas (0 to disable)") parser.add_argument("--v_flip", type=bool_flag, default=False, help="Random vertical flip for data augmentation") parser.add_argument("--h_flip", type=bool_flag, default=True, help="Random horizontal flip for data augmentation") parser.add_argument("--batch_size", type=int, default=32, help="Batch size") parser.add_argument("--ae_optimizer", type=str, default="adam,lr=0.0002", help="Autoencoder optimizer (SGD / RMSprop / Adam, etc.)") parser.add_argument("--dis_optimizer", type=str, default="adam,lr=0.0002", help="Discriminator optimizer (SGD / RMSprop / Adam, etc.)") parser.add_argument("--clip_grad_norm", type=float, default=5, help="Clip gradient norms (0 to disable)") parser.add_argument("--n_epochs", type=int, default=1000, help="Total number of epochs") parser.add_argument("--epoch_size", type=int, default=50000, help="Number of samples per epoch") parser.add_argument("--ae_reload", type=str, default="", help="Reload a pretrained encoder") parser.add_argument("--lat_dis_reload", type=str, default="", help="Reload a pretrained latent discriminator") parser.add_argument("--ptc_dis_reload", type=str, default="", help="Reload a pretrained patch discriminator") parser.add_argument("--clf_dis_reload", type=str, default="", help="Reload a pretrained classifier discriminator") parser.add_argument("--eval_clf", type=str, default="", help="Load an external classifier for evaluation") parser.add_argument("--debug", type=bool_flag, default=False, help="Debug mode (only load a subset of the whole dataset)") params = parser.parse_args() # check parameters check_attr(params) assert len(params.name.strip()) > 0 assert params.n_skip <= params.n_layers - 1 assert params.deconv_method in ['convtranspose', 'upsampling', 'pixelshuffle'] assert 0 <= params.smooth_label < 0.5 assert not params.ae_reload or os.path.isfile(params.ae_reload) assert not params.lat_dis_reload or os.path.isfile(params.lat_dis_reload) assert not params.ptc_dis_reload or os.path.isfile(params.ptc_dis_reload) assert not params.clf_dis_reload or os.path.isfile(params.clf_dis_reload) assert os.path.isfile(params.eval_clf) assert params.lambda_lat_dis == 0 or params.n_lat_dis > 0 assert params.lambda_ptc_dis == 0 or params.n_ptc_dis > 0 assert params.lambda_clf_dis == 0 or params.n_clf_dis > 0 # initialize experiment / load dataset logger = initialize_exp(params) data, attributes = load_images(params) train_data = DataSampler(data[0], attributes[0], params) valid_data = DataSampler(data[1], attributes[1], params) # build the model ae = AutoEncoder(params).cuda() lat_dis = LatentDiscriminator(params).cuda() if params.n_lat_dis else None ptc_dis = PatchDiscriminator(params).cuda() if params.n_ptc_dis else None clf_dis = Classifier(params).cuda() if params.n_clf_dis else None eval_clf = torch.load(params.eval_clf).cuda().eval() # trainer / evaluator trainer = Trainer(ae, lat_dis, ptc_dis, clf_dis, train_data, params) evaluator = Evaluator(ae, lat_dis, ptc_dis, clf_dis, eval_clf, valid_data, params) for n_epoch in range(params.n_epochs): logger.info('Starting epoch %i...' % n_epoch) for n_iter in range(0, params.epoch_size, params.batch_size): # latent discriminator training for _ in range(params.n_lat_dis): trainer.lat_dis_step() # patch discriminator training for _ in range(params.n_ptc_dis): trainer.ptc_dis_step() # classifier discriminator training for _ in range(params.n_clf_dis): trainer.clf_dis_step() # autoencoder training trainer.autoencoder_step() # print training statistics trainer.step(n_iter) # run all evaluations / save best or periodic model to_log = evaluator.evaluate(n_epoch) trainer.save_best_periodic(to_log) logger.info('End of epoch %i.\n' % n_epoch) 我在哪一句里加上正确的路径解决错误呢
05-29
#!/bin/env python # -*- coding: utf-8 -*- ################################################# # Author: songwenhua # Function: MI-13自动出具POFV孔口图纸 # Date: 2025-11-18 # v1.00 songwenhua 用户需求号: 2796 任务ID:2110 # LOAD_MODE__ # /home/incam/Desktop/scripts/danban/auto_make_tuzhi/specific_tuzhi/auto_POFV.py import os import math import re import sys import string import faulthandler faulthandler.enable() from PyQt5.QtGui import * from PyQt5.QtCore import * from PyQt5.QtWidgets import * from PyQt5 import QtWidgets from py39COM import Gateway, InCAM from py39Tools import TableWidget from messageBox import messageBox from ICO import ICO from ICNET import ICNET from reportlab.pdfgen.canvas import Canvas as ReportCanvas from reportlab.platypus import PageBreak, FrameBreak, Frame, Table, TableStyle from reportlab.platypus.doctemplate import SimpleDocTemplate, PageTemplate, NextPageTemplate from DrawingCreate import DrawingTemplate from reportlab.lib import colors from Gerber2SVG import Origin, Feature from Gerber2Canvas import Gerber2Canvas, Dimension, Direct from reportlab.lib.pagesizes import A4 from reportlab.lib.units import mm from EqHelper import EqHelper from params_window import Ui_Form class Dr_POFV_Map(QWidget): A5 = (100*mm, 100*mm) pageSize = (A5[0], A5[1]) # 页面大小 # pageSize = (A4[1], A4[0]) # 页面大小 bgTemp: DrawingTemplate # 画PDF的模板类 doc: SimpleDocTemplate drawingVer = '01' # 版本号 drawingParams = dict() # PDF模板参数 canvas: ReportCanvas # 画布 tmpLays = [] # 需要删除的临时层 sigDimension = {} # 图纸编号 MI-13 drawNo = 'MI-13' # str # 铜厚测量图纸 drawName: str = '孔到孔距离标注图纸' workstation = None dirPath = None # 公共盘路径 filePath = None # 文件路径 datLay = None # dat层 datWork = 'dat_copy_work_lay' # dat复制的工作层 coorArr = [] # 坐标序列 def __init__(self): self.JOB = os.environ.get('JOB', None) self.STEP = os.environ.get('STEP', None) INCAM_DEBUG = os.getenv('INCAM_DEBUG', None) if INCAM_DEBUG == 'yes': self.incam = Gateway() self.JOB = self.incam.job_name self.STEP = self.incam.step_name self.pid = self.incam.pid else: self.incam = InCAM() self.pid = os.getpid() self.ico = ICO(incam=self.incam) self.icNet = ICNET(incam=self.incam) self.jobName = self.ico.SimplifyJobName(jobName=self.JOB) self.dbSite = self.ico.GetDBSite(JOB=self.JOB) self.SITE = self.ico.GetSite(JOB=self.JOB) self.layerMatrix = self.ico.GetLayerMatrix() self.step_list = self.ico.GetStepList() self.workStep = self.ico.GetEditList()[0] #得到edit步骤 basePath = self.ico.GetWorkFilePath(mode='withname') self.imgPath = os.path.join(self.ico.GetWorkFilePath(), 'output', 'laser_image', self.jobName) os.makedirs(self.imgPath, exist_ok=True) self.imgPath = os.path.join(self.imgPath, 'pofv_ring.png') # 如果公盘文件夹不存在,则将pdf存放在“/tmp”下 if not os.path.isdir(basePath) or not os.path.exists(basePath): basePath = '/tmp' self.filePath = os.path.join(basePath, f"{self.jobName}-MI-13-POFV.pdf") # # 网络共享的基础路径(父目录) # base_network_path = '//10.10.80.178/workfile/output/pdf' # job_folder = self.jobName # target_path = os.path.join(base_network_path, job_folder) # 完整目标路径 # basePath = '/tmp' # 默认降级路径 # try: # # 检查基础网络路径是否存在(即 pdf/ 目录) # if not os.path.exists(base_network_path): # raise OSError(f"Base network path does not exist: {base_network_path}") # if not os.path.isdir(base_network_path): # raise OSError(f"Base network path is not a directory: {base_network_path}") # # 尝试创建 jobName 子目录 # os.makedirs(target_path, exist_ok=True) # # 再次确认有写权限 # test_file = os.path.join(target_path, '.test_write') # with open(test_file, 'w') as f: # f.write('test') # os.remove(test_file) # # 如果一切正常,使用网络路径 # basePath = target_path # except Exception as e: # print(f"[WARNING] Cannot use network path: {e}") # messageBox.showDialog( # title='提示', # text=f'无法访问网络路径,将保存至临时目录 (/tmp)。\n错误: {str(e)}', # buttons=['OK'], # defaultButton='OK' # ) # basePath = '/tmp' # # === 设置最终路径 === # self.imgPath = os.path.join(self.ico.GetWorkFilePath(), 'output', 'laser_image', self.jobName) # os.makedirs(self.imgPath, exist_ok=True) # self.imgPath = os.path.join(self.imgPath, 'pofv_ring.png') # self.filePath = os.path.join(basePath, f"{self.jobName}-MI-13-POFV.pdf") self.totalPage = 1 # PDF总页数(外层) # 获取中文用户名 user = self.ico.GetUserName() usList = ICNET.GetCTypeUserInfo('user2CN') self.userCN = user if user in usList: self.userCN = usList[user] self.run() def find_keys_by_start_or_end(self, data, target): """ 查找所有 start 或 end 等于 target 的钻孔层 """ result = [] for key, value in data.items(): if isinstance(value, dict): # 确保是字典 start = value.get('start') end = value.get('end') if start == target or end == target: result.append(key) return result def chk_touch(self, dat_layer, intersect_layer): self.ico.ClearLayer() self.ico.DispWork(dat_layer) # 重置并设置基础过滤器 self.incam.COM("reset_filter_criteria,filter_name=,criteria=all") self.incam.COM("set_filter_type,filter_name=,lines=yes,pads=yes,surfaces=yes,arcs=yes,text=yes") self.incam.COM("set_filter_polarity,filter_name=,positive=yes,negative=yes") # 找到dat层中touch相交层的物体 self.incam.COM(f"sel_ref_feat,layers={intersect_layer},use=filter,mode=touch,pads_as=shape,f_types=line;pad;surface;arc;text,polarity=positive;negative,include_syms=,exclude_syms=") self.incam.COM('get_select_count') selected_features = int(self.incam.COMANS) return selected_features def get_coords(self, feature): """ 从任意图元中快速提取一个坐标点 (x, y) 特别处理 surface 的 orig 字段 """ # 优先返回已有的 cx/cy pad # all_cor = [] if 'cx' in feature and 'cy' in feature: return round(feature['cx'], 3), round(feature['cy'], 3) # 对于 line 等有 x0/y0 的类型 if 'x0' in feature and 'y0' in feature: return round(feature['x0'], 3), round(feature['y0'], 3) # 处理 surface 的 orig if feature.get('type') == 'surface' and isinstance(feature.get('orig'), list): pattern = r'#O[BS]\s+([-\d.]+)\s+([-\d.]+)' for line in feature['orig']: match = re.search(pattern, line) if match: x = float(match.group(1)) y = float(match.group(2)) return round(x, 3), round(y, 3) # 返回第一个有效坐标即可 return None def run(self): self.ico.OpenStep(step=self.workStep, job=self.JOB) site = self.ico.GetSite(self.JOB) layerMatrix = self.ico.GetLayerMatrix() sig_out_list = layerMatrix['sigOutLay'] sm_lay_list = layerMatrix['smAllLay'] drill_through = layerMatrix['drlThrough'] helper = EqHelper(self.incam, self.JOB, self.workStep) pofv_flag = helper.getIsPOFV() if not (site == '301' and pofv_flag is True): return 0 # 存储每层处理结果 layer_results = {} tmp_drill_layer = "drill_final" self.ico.CreateOrEmptyLay([tmp_drill_layer]) for i, sig_layer in enumerate(sig_out_list): sm_layer = sm_lay_list[i] matching_layers = self.find_keys_by_start_or_end(drill_through, sig_layer) temp_intersect = f"int_{sig_layer}" # is_front = i == 0 # 假设第一个为正面 if not matching_layers: continue # 清理并创建相交层 if self.ico.IsLayerExist([temp_intersect]): self.ico.DelLayer([temp_intersect]) self.ico.CreateOrEmptyLay([temp_intersect]) self.ico.ClearAll() self.ico.DispWork(layer=sig_layer) self.ico.DispLayer(layer=sm_layer) self.ico.GetLayIntersect(self.workStep, sm_layer, sig_layer, acc=0.01) self.incam.COM(f"matrix_rename_layer,job={self.JOB},matrix=matrix,layer=intersect,new_name={temp_intersect}") self.ico.ClearLayer() self.ico.DispWork(temp_intersect) inter_info = self.ico.GetFeatureFullInfo(self.workStep, layer=temp_intersect) if not inter_info: messageBox.showDialog( title='提示', text=f'{sm_layer} 和 {sig_layer} 没有相交部分', buttons=['OK'], defaultButton='OK' ) self.ico.DelLayer(temp_intersect) continue #开始检测该层是否有有效钻孔匹配 result = { 'has_full_match': False, # 缩小50um后仍匹配 'has_raw_match': False, # 原始匹配 'has_expanded_match': False, # 外扩100um后匹配 'dat_layer': None, 'temp_intersect': temp_intersect } found_in_this_layer = False for dat_layer in matching_layers: # 情况A:检查原始是否 touch selected_features = self.chk_touch(dat_layer, temp_intersect) if selected_features > 0: # 尝试缩小50um shrunk_layer = temp_intersect + '-100' self.ico.ClearLayer() self.ico.DispWork(temp_intersect) self.incam.COM( 'copy_layer, source_job = %s, source_step = %s, source_layer = %s, dest = layer_name, ' 'dest_step =, dest_layer = %s, mode = replace, invert = no, copy_notes = no, ' 'copy_attrs = new_layers_only, copy_sr_feat = no' % ( self.JOB, self.workStep, temp_intersect, shrunk_layer) ) self.ico.ClearLayer() # 缩小50um self.ico.DispWork(shrunk_layer) self.incam.COM("rv_tab_empty,report=resize_rep,is_empty=yes") self.incam.COM("sel_resize,size=-100,corner_ctl=no") self.incam.COM("rv_tab_view_results_enabled,report=resize_rep,is_enabled=no,serial_num=-1,all_count=-1") selected_shrunk = self.chk_touch(dat_layer, shrunk_layer) if selected_shrunk > 0: result['has_full_match'] = True result['dat_layer'] = dat_layer self.ico.DelLayer(shrunk_layer) found_in_this_layer = True break # 成功即退出 dat_layer 循环 else: result['has_raw_match'] = True result['dat_layer'] = dat_layer self.ico.DelLayer(shrunk_layer) #情况B:原始无 touch,尝试外扩+100um else: expanded_layer = temp_intersect + '+100' self.ico.ClearLayer() self.ico.DispWork(temp_intersect) self.incam.COM( 'copy_layer, source_job = %s, source_step = %s, source_layer = %s, dest = layer_name, ' 'dest_step =, dest_layer = %s, mode = replace, invert = no, copy_notes = no, ' 'copy_attrs = new_layers_only, copy_sr_feat = no' % ( self.JOB, self.workStep, temp_intersect, expanded_layer) ) self.ico.ClearLayer() # 外扩50um self.ico.DispWork(expanded_layer) self.incam.COM("rv_tab_empty,report=resize_rep,is_empty=yes") self.incam.COM("sel_resize,size=+100,corner_ctl=no") self.incam.COM("rv_tab_view_results_enabled,report=resize_rep,is_enabled=no,serial_num=-1,all_count=-1") selected_expanded = self.chk_touch(dat_layer, expanded_layer) if selected_expanded > 0: result['has_expanded_match'] = True result['dat_layer'] = dat_layer result['expanded_layer'] = expanded_layer # 保留用于后续复制 found_in_this_layer = True break # 成功即退出 else: self.ico.DelLayer(expanded_layer) # 保存当前层结果 if found_in_this_layer or result['has_raw_match']: layer_results[sig_layer] = result else: self.ico.DelLayer(temp_intersect) # 无任何匹配,清理 # 二、根据收集结果进行最终输出决策 final_copied = False # 1. 优先:正面 缩小50um后有匹配 front_sig = sig_out_list[0] if front_sig in layer_results: res = layer_results[front_sig] if res['has_full_match']: temp = res['temp_intersect'] dat = res['dat_layer'] shrunk = temp + '-100' # 重建并使用缩小层 self.ico.ClearLayer() self.ico.DispWork(temp) self.incam.COM( 'copy_layer, source_job = %s, source_step = %s, source_layer = %s, dest = layer_name, ' 'dest_step =, dest_layer = %s, mode = replace, , invert = no, copy_notes = no, ' 'copy_attrs = new_layers_only, copy_sr_feat = no' % (self.JOB, self.workStep, temp, shrunk)) self.incam.COM("sel_resize,size=-100,corner_ctl=no") self.chk_touch(dat, shrunk) self.incam.COM(f"sel_copy_other,dest=layer_name,target_layer={tmp_drill_layer},invert=no,dx=0,dy=0,size=0,x_anchor=-1.36612,y_anchor=-1.03115,subsystem=1-Up-Edit") self.ico.DelLayer(shrunk) final_copied = True # 2. 背面 缩小50um后有匹配 if not final_copied: for i, sig_layer in enumerate(sig_out_list): if i == 0: continue # 跳过正面 if sig_layer in layer_results: res = layer_results[sig_layer] if res['has_full_match']: temp = res['temp_intersect'] dat = res['dat_layer'] shrunk = temp + '-100' self.ico.ClearLayer() self.ico.DispWork(temp) self.incam.COM( 'copy_layer, source_job = %s, source_step = %s, source_layer = %s, dest = layer_name, ' 'dest_step =, dest_layer = %s, mode = replace, invert = no, copy_notes = no, ' 'copy_attrs = new_layers_only, copy_sr_feat = no' % ( self.JOB, self.workStep, temp, shrunk_layer)) self.incam.COM("sel_resize,size=-100,corner_ctl=no") self.chk_touch(dat, shrunk) self.incam.COM(f"sel_copy_other,dest=layer_name,target_layer={tmp_drill_layer},invert=no,dx=0,dy=0,size=0,x_anchor=-1.36612,y_anchor=-1.03115,subsystem=1-Up-Edit") self.ico.DelLayer(shrunk) final_copied = True break # 3. 任意背面 有原始或外扩匹配 if not final_copied: for i, sig_layer in enumerate(sig_out_list): if i == 0: continue if sig_layer in layer_results: res = layer_results[sig_layer] temp = res['temp_intersect'] dat = res['dat_layer'] self.ico.ClearLayer() self.ico.DispWork(temp) if res['has_raw_match']: self.chk_touch(dat, temp) elif res['has_expanded_match']: exp_layer = temp + '+100' self.incam.COM( 'copy_layer, source_job = %s, source_step = %s, source_layer = %s, dest = layer_name, ' 'dest_step =, dest_layer = %s, mode = replace, invert = no, copy_notes = no, ' 'copy_attrs = new_layers_only, copy_sr_feat = no' % ( self.JOB, self.workStep, temp, exp_layer)) self.incam.COM("sel_resize,size=+100,corner_ctl=no") self.chk_touch(dat, exp_layer) self.ico.DelLayer(exp_layer) self.incam.COM(f"sel_copy_other,dest=layer_name,target_layer={tmp_drill_layer},invert=no,dx=0,dy=0,size=0,x_anchor=-1.36612,y_anchor=-1.03115,subsystem=1-Up-Edit") final_copied = True break # 4. 最后:正面 有原始或外扩匹配(但没有进入缩小成功分支) if not final_copied and front_sig in layer_results: res = layer_results[front_sig] temp = res['temp_intersect'] dat = res['dat_layer'] self.ico.ClearLayer() self.ico.DispWork(temp) if res['has_raw_match']: self.chk_touch(dat, temp) self.incam.COM(f"sel_copy_other,dest=layer_name,target_layer={tmp_drill_layer},invert=no,dx=0,dy=0,size=0,x_anchor=-1.36612,y_anchor=-1.03115,subsystem=1-Up-Edit") final_copied = True elif res['has_expanded_match']: exp_layer = temp + '+100' # self.incam.COM('copy_layer, ..., dest_layer=%s' % exp_layer) self.incam.COM( 'copy_layer, source_job = %s, source_step = %s, source_layer = %s, dest = layer_name, ' 'dest_step =, dest_layer = %s, mode = replace, invert = no, copy_notes = no, ' 'copy_attrs = new_layers_only, copy_sr_feat = no' % ( self.JOB, self.workStep, temp, exp_layer)) self.incam.COM("sel_resize,size=+100,corner_ctl=no") self.chk_touch(dat, exp_layer) self.incam.COM(f"sel_copy_other,dest=layer_name,target_layer={tmp_drill_layer},invert=no,dx=0,dy=0,size=0,x_anchor=-1.36612,y_anchor=-1.03115,subsystem=1-Up-Edit") self.ico.DelLayer(exp_layer) final_copied = True # 5. 完全失败 if not final_copied: messageBox.showDialog( title='提示', text='正面和背面均未找到符合条件的钻孔', buttons=['OK'], defaultButton='OK' ) # 清理所有临时层 for sig_layer in sig_out_list: base = f"int_{sig_layer}" self.ico.DelLayer([base, base+'+100']) self.sigDimension[sig_layer] = {} self.sigDimension[sig_layer]['point_x'] = [] self.sigDimension[sig_layer]['point_y'] = [] self.ico.ClearLayer() # 获取要标注的孔坐标(来自 tmp_drill_layer) self.ico.DispWork(tmp_drill_layer) #获取最终孔层的信息,主要是要其中任意一个孔的中心坐标 padList = self.incam.INFO( '-t layer -e %s/%s/%s -m script -d FEATURES -o consider_origin+feat_index+f0' % ( self.JOB, self.workStep, tmp_drill_layer)) for pad in padList: pad.strip() # 3 #P 0.927 1.915 r261 P 1 0 N;.drill=via,.drill_flag=103,.combined_size=0.000000 strList = pad.split() match1 = re.search(r'#(\d+)\s+#P\s+', pad) if match1: midpointX = '%0.3f' % ( float(strList[2])) # 孔盘中点的X坐标 midpointY = '%0.3f' % ( float(strList[3])) # 孔盘中点的Y坐标 self.sigDimension[sig_layer]['point_x'].append(midpointX) self.sigDimension[sig_layer]['point_y'].append(midpointY) self.ico.ClearLayer() # self.ico.DispWork(sig_layer) # self.ico.DispLayer(sm_layer) # self.ico.DispLayer(dat_layer) self.__renderPDF(sig_layer) #在找到的那一层操作,正面或背面 self.ico.DelLayer(self.tmpLays) mes = f'输出目录:{self.filePath},继续将打开PDF' ans = messageBox.showMessage( bitmap='information', title='PDF输出完成', message=mes, buttons=['退出', '继续']) if ans == '继续': os.system(f"/usr/bin/evince {self.filePath} &") self.incam.COM('disp_on') # TODO 转换成png 放到output里面 self.__pdf2PNG() return 0 def __pdf2PNG(self): cmd = f"convert -density 120 -quality 80 -background white -alpha remove {self.filePath} {self.imgPath}" os.system(cmd) def __renderPDF(self, sig_lay): """ 渲染PDF:设置文档结构并构建内容 """ canv = ReportCanvas(self.filePath, pagesize=self.pageSize) self.canvas = canv self.drawingParams = self.__setTemplateParams() # 设置模板的默认参数 self.bgTemp = DrawingTemplate( canv, self.A5[0], self.A5[1], self.drawingParams) lM = 0 rM = 0 tM = 0 bM = 0 self.doc = SimpleDocTemplate(self.filePath, pagesize=self.pageSize, topMargin=tM, bottomMargin=bM, leftMargin=lM, rightMargin=rM, title="MI-13", author=self.userCN)#filePath:最终存放路径; pageSize:画布大小 self.__setPageFrame(self.doc) story = [] story.append(NextPageTemplate('p1')) g2c = self.__createGerber(sig_lay) story.append(FrameBreak()) story.append(g2c) story.append(PageBreak()) self.doc.build(story) # def __setPageFrame(self, doc: SimpleDocTemplate): """设置每一页框架分布 """ frames = [] fh = doc.height / 3 padX = self.drawingParams['padx'] padY = self.drawingParams['pady'] tableFrame = Frame(x1=padX, y1=padY + fh * 2, width=doc.width, height=fh, id='f1') gerberFrame = Frame( x1=padX, y1=padY, width=doc.width, height=fh * 2, id='f2') frames.append(tableFrame) frames.append(gerberFrame) doc.addPageTemplates([PageTemplate(id='p1', frames=frames)]) def getMergeLay(self, lay): """ 将lay备份并将备份层合并为surface """ mergeLay = f'{lay}_merge' self.ico.DelLayer(mergeLay) self.ico.ClearAll() self.ico.DispWork(lay, number=1) self.incam.COM( f'sel_copy_other,dest=layer_name,target_layer={mergeLay},invert=no,dx=0,dy=0,size=0,x_anchor=0,y_anchor=0') self.ico.DispWork(mergeLay, number=1) self.incam.COM( 'sel_cont_resize,accuracy=25.4,break_to_islands=yes,island_size=0,hole_size=0,drill_filter=no,corner_ctl=no') return mergeLay def getDnxSigLayMapping(self, lay: str): """ 获取dnx孔层与其钻带的起始信号层之间的映射关系(1:1) :param dnxLayers:线路层 :return:起始终止是线路层的所有钻孔 """ dnxSigLayMapping = [] for drl in self.ico.GetLayerMatrix()['drlAllLay']: startLay = self.ico.GetLayerMatrix()['drlThrough'][drl]['start'] endLay = self.ico.GetLayerMatrix()['drlThrough'][drl]['end'] if startLay == lay or endLay == lay: dnxSigLayMapping.append(drl) return dnxSigLayMapping # 分析创建光绘的关键部分 def __createGerber(self, sigLay): toRead = [] # 1. 合并信号层为 surface mergeSigLay = self.getMergeLay(sigLay) self.tmpLays.append(mergeSigLay) mergeSigLayFilePath = self.ico.getFeatureFile(self.JOB, self.workStep, mergeSigLay) toRead.append(Feature(mergeSigLayFilePath, layerType='signal')) # 2. 添加钻孔层 drlSet = self.getDnxSigLayMapping(sigLay) for drl in drlSet: drlFilePath = self.ico.getFeatureFile(self.JOB, self.workStep, drl) toRead.append(Feature(drlFilePath, layerType='solder_mask')) # === 创建高亮圆圈层 === highlight_layer = "pofv_highlight_circle" if self.ico.IsLayerExist([highlight_layer]): self.ico.DelLayer([highlight_layer]) self.ico.CreateOrEmptyLay(layer_list=[highlight_layer]) # 获取要高亮的孔坐标(只标第一个) if sigLay not in self.sigDimension or not self.sigDimension[sigLay]['point_x']: # 没有坐标,跳过画圈 pass else: print("111111111111111111111111111") x = float(self.sigDimension[sigLay]['point_x'][0]) y = float(self.sigDimension[sigLay]['point_y'][0]) self.ico.ClearLayer() self.ico.DispWork(highlight_layer) # self.ico.AddPad(x,y,'r120') self.incam.COM(f"add_pad,symbol=r180,polarity=positive,x={x},y={y},mirror=no,angle=0,direction=ccw,resize=0,xscale=1,yscale=1") # self.incam.COM("rv_tab_empty,report=cut_data_rep,is_empty=yes") # self.incam.COM("sel_cut_data,det_tol=25.4,con_tol=25.4,rad_tol=2.54,ignore_width=yes,filter_overlaps=no,delete_doubles=no,use_order=yes,ignore_holes=none,start_positive=yes,polarity_of_touching=same,contourize=yes,simplify=yes,resize_thick_lines=no") # self.incam.COM("rv_tab_view_results_enabled,report=cut_data_rep,is_enabled=no,serial_num=-1,all_count=-1") self.incam.COM("sel_feat2outline,width=3.0,location=on_edge,offset=0.1,polarity=as_feature,keep_original=no,text2limit=no")# 轮廓线 # self.incam.COM(f"display_layer,name={highlight_layer},display=yes,number=3") self.incam.COM("arc2lines,arc_line_tol=1") # self.incam.COM("add_slot,symbol=r20,x_center=0.7836725,y_center=1.85691,len=0.7306875,angle=0.98315767,direction=ccw,dcode=0,drill_type=nplate,attributes=no") # self.incam.COM("add_slot,symbol=r20,x_center=3.7836725,y_center=1.85691,len=0.7306875,angle=0.98315767,direction=ccw,dcode=0,drill_type=nplate,attributes=no") self.ico.DelLayer(highlight_layer + '+++') self.ico.ClearLayer() # sys.exit(0) # 高亮层也导出为 Gerber Feature self.tmpLays.append(highlight_layer) # 确保后续清理" hlight_path = self.ico.getFeatureFile(self.JOB, self.workStep, highlight_layer) toRead.append(Feature(hlight_path, layerType='signal', strokeColor = "#fcfcf6")) # === 计算尺寸和偏移 === unitSizeX, unitSizeY = self.ico.GetStepSize(self.workStep)[0:2] gbWidth = self.doc.width * 0.6 gbHeigth = self.doc.height * 2 / 3 * 0.6 offsetX = self.getOffsetXY(gbWidth, gbHeigth, unitSizeX, unitSizeY, pagesize=( self.doc.width, self.doc.height * 2 / 3))[0] # 创建绘图对象 g2c = Gerber2Canvas( gbWidth, gbHeigth, unitSizeX, unitSizeY, offsetX, 0.1, Origin.leftdown, self.canvas, toRead, rotate=0 ) # === 添加标注箭头=== dimension = [] if sigLay in self.sigDimension and self.sigDimension[sigLay]['point_x']: pointX = self.sigDimension[sigLay]['point_x'][0] pointY = self.sigDimension[sigLay]['point_y'][0] dimensionSingle = Dimension( x0=unitSizeX / 2, y0=unitSizeY * 1.2, x1=float(pointX), y1=float(pointY), direct=Direct.one_arrow, dist='POFV孔铜厚度测量位置', dimColor="#0400FF", # dimLineColor="#0800FF" # ) dimension.append(dimensionSingle) if dimension: g2c.addDimension(dimension) return g2c @staticmethod def getOffsetXY(gbWidth: float, gbHeight: float, unitSizeX: float, unitSizeY: float, pagesize: tuple = (A5[0], A5[1])): """ 获取使Gerber在PDF中居中显示的偏移量 :param gbWidth: pdf中 gerber宽度 :param gbHeight: pdf中 gerber长度 :param unitSizeX: unit宽 :param unitSizeY: unit长 :param pagesize: PDF宽和长 :return: x,y的偏移量 # 创建绘图对象 g2c = Gerber2Canvas(gbWidth,gbHeight,unitSizeX,unitSizeY,offsetX, offsetY, Origin.leftdown,self.canvas,toRead, rotate=0) """ scale1 = math.ceil(gbWidth / unitSizeX) scale2 = math.ceil(gbHeight / unitSizeY) scale = scale1 if scale1 < scale2 else scale2 offsetX = (pagesize[0] - unitSizeX * scale) / (2 * scale) offsetY = (pagesize[1] - unitSizeY * scale) / (2 * scale) return offsetX, offsetY def __setTemplateParams(self): """设置模板的默认参数""" params = {"layer_side": None, "header": "文档密级:内部公开", "footer": "此资料属广芯基板有限公司所有,未经许可,不得扩散.", "note:": "", "tolerance": "", "workstation": "蚀刻开窗-激光钻-电镀-刷板I", "drawing_name": "90874外层最小环宽监控图纸", "jobname": self.jobName, "drawing_no": "MI-13", "drawing_version": self.drawingVer, "units": "mm", "num_page": "", "total_page": self.totalPage, "create": self.userCN, "confirm": "陈伟", "approved": "刘丹洪", "padx": 5 * mm, "pady": 5 * mm, } return params def drawBackground(self, canv: ReportCanvas, doc: SimpleDocTemplate): """ 画页眉页脚用的函数 """ num = canv.getPageNumber() if num == 1: face = 'Unit Top side' else: face = 'Unit Bottom side' params = self.drawingParams params["num_page"] = str(num) params['layer_side'] = face bgTemp = DrawingTemplate(canv, A5[0], A5[1], params) bgTemp.parser() if __name__ == "__main__": app = QApplication(sys.argv) analyzer = Dr_POFV_Map() 将这份代码中的逻辑表述出来,如果需要你做成PPT进行汇报,你会如何进行重要、关键内容的汇报?
12-01
【电力系统】单机无穷大电力系统短路故障暂态稳定Simulink仿真(带说明文档)内容概要:本文档围绕“单机无穷大电力系统短路故障暂态稳定Simulink仿真”展开,提供了完整的仿真模型与说明文档,重点研究电力系统在发生短路故障后的暂态稳定性问题。通过Simulink搭建单机无穷大系统模型,模拟不同类型的短路故障(如三相短路),分析系统在故障期间及切除后的动态响应,包括发电机转子角度、转速、电压和功率等关键参数的变化,进而评估系统的暂态稳定能力。该仿真有助于理解电力系统稳定性机理,掌握暂态过程分析方法。; 适合人群:电气工程及相关专业的本科生、研究生,以及从事电力系统分析、运行与控制工作的科研人员和工程师。; 使用场景及目标:①学习电力系统暂态稳定的基本概念与分析方法;②掌握利用Simulink进行电力系统建模与仿真的技能;③研究短路故障对系统稳定性的影响及提高稳定性的措施(如故障清除时间优化);④辅助课程设计、毕业设计或科研项目中的系统仿真验证。; 阅读建议:建议结合电力系统稳定性理论知识进行学习,先理解仿真模型各模块的功能与参数设置,再运行仿真并仔细分析输出结果,尝试改变故障类型或系统参数以观察其对稳定性的影响,从而深化对暂态稳定问题的理解。
本研究聚焦于运用MATLAB平台,将支持向量机(SVM)应用于数据预测任务,并引入粒子群优化(PSO)算法对模型的关键参数进行自动调优。该研究属于机器学习领域的典型实践,其核心在于利用SVM构建分类模型,同时借助PSO的全局搜索能力,高效确定SVM的最优超参数配置,从而显著增强模型的整体预测效能。 支持向量机作为一种经典的监督学习方法,其基本原理是通过在高维特征空间中构造一个具有最大间隔的决策边界,以实现对样本数据的分类或回归分析。该算法擅长处理小规模样本集、非线性关系以及高维度特征识别问题,其有效性源于通过核函数将原始数据映射至更高维的空间,使得原本复杂的分类问题变得线性可分。 粒子群优化算法是一种模拟鸟群社会行为的群体智能优化技术。在该算法框架下,每个潜在解被视作一个“粒子”,粒子群在解空间中协同搜索,通过不断迭代更新自身速度与位置,并参考个体历史最优解和群体全局最优解的信息,逐步逼近问题的最优解。在本应用中,PSO被专门用于搜寻SVM中影响模型性能的两个关键参数——正则化参数C与核函数参数γ的最优组合。 项目所提供的实现代码涵盖了从数据加载、预处理(如标准化处理)、基础SVM模型构建到PSO优化流程的完整步骤。优化过程会针对不同的核函数(例如线性核、多项式核及径向基函数核等)进行参数寻优,并系统评估优化前后模型性能的差异。性能对比通常基于准确率、精确率、召回率及F1分数等多项分类指标展开,从而定量验证PSO算法在提升SVM模型分类能力方面的实际效果。 本研究通过一个具体的MATLAB实现案例,旨在演示如何将全局优化算法与机器学习模型相结合,以解决模型参数选择这一关键问题。通过此实践,研究者不仅能够深入理解SVM的工作原理,还能掌握利用智能优化技术提升模型泛化性能的有效方法,这对于机器学习在实际问题中的应用具有重要的参考价值。 资源来源于网络分享,仅用于学习交流使用,请勿用于商业,如有侵权请联系我删除!
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值