Save && Print manual

在此记录各develop language的print method,以及遇到的问题。

iframe 预览+print

 <iframe style="width: 100%; height:100vh;" :src="pdfSrc.toString()"></iframe>

embed 预览+print

<embed width="800" height="600" src="test_pdf.pdf"> </embed> 

<embed v-show="pdfShow" width="800" height="600" src="cant.pdf"> </embed>

object 预览+print

<object classid="clsid:CA8A9780-" width="100%" height="100%" border="0">
<!--IE--> 
      <param name="_Version" value="65539"> 
      <param name="_ExtentX" value="20108"> 
      <param name="_ExtentY" value="10866"> 
      <param name="_StockProps" value="0"> 
      <param name="SRC" value="testing_pdf.pdf"> 
      
<embed src="testing_pdf.pdf" width="100%" height="800" href="testing_pdf.pdf">
</embed>
<!--FF--> 
</object> 

PDFViewer

基于pdfjs-lib,参考

打印时,比例不协调
import PDFViewer from ‘pdf-viewer-vue’

    <div>
      <PDFViewer 
      :source="pdfSrc.toString()" 
      style="height: 70vh;width:75vw;" 
      @download="handleDownload" 
      :settings="{defaultZoom:1000}"/>
    </div>

vueshowpdf

支持JS,有机会再补充。

window 的document print

    const win = window.open()
    win?.document.write(
      '<img src="http://127.0.0.1:8888/res/a4cf3409335ec8d7f93b74503ed71ce6.png">'
    )
    win?.print()
    win?.close()

window canvas print

參考

npm install html2canvas
import html2canvas from 'html2canvas';

需要進一步調試,目前沒有走通。

 const canvasPrint = async () => {
    elHelper.alertBox('ssd')
    await pdfjsLib
      .getDocument('http://127.0.0.1:8888/res/INV_SIN2300003699.pdf')
      .promise.then((pdf) => {
        pdf.getPage(1).then((page) => {
          const canvas = document.createElement('canvas')
          const context = canvas.getContext('2d')
          const viewPort = page.getViewport({ scale: 1 })

          canvas.height = viewPort.height
          canvas.width = viewPort.width

          page
            .render({
              canvasContext: context,
              viewPort: viewPort
            })
            .promise.then(() => {
              html2canvas(canvas).then((canvas) => {
                const printWindown = window.open('', 'Print')
                printWindown.document.write('<img src="' + canvas.toDataUrl() + '">')
                printWindown.print()
                printWindown.close()
              })
            })
        })
      })
  }

window 打印当前页

当然也包括pdf,不过基本上不能用于商业化,因为打印的区域需要设计,不专业。

  const winPrintPdf = () => {
    window.print()
  }

通过URL保存下载文件

npm install file-saver
  const savePdf = async () => {
    const pdfData = await loadPdfData('http://127.0.0.1:8888/res/INV_SIN2300003699.pdf')
    saveAs(pdfData, 'file.pdf')
  }
  const loadPdfData = async (pdfUrl: string) => {
    //load paf
    const pdf = await pdfjsLib.getDocument(pdfUrl).promise
    //get pdf data
    const pdfDataUrl = await pdf.getData().then((data) => {
      return URL.createObjectURL(new Blob([data], { type: 'application/pdf' }))
    })
    return pdfDataUrl
  }
from docx import Document import os def deep_debug_table(file_path): """深度调试:分析文档中的所有表格结构""" doc = Document(file_path) print("=== 深度表格分析 ===") for table_idx, table in enumerate(doc.tables, 1): print(f"\n--- 表格 {table_idx} ---") print(f"行数: {len(table.rows)}, 列数: {len(table.columns)}") # 分析每一行每一列 for row_idx, row in enumerate(table.rows): print(f"行 {row_idx}: ", end="") row_content = [] for col_idx, cell in enumerate(row.cells): cell_text = cell.text.strip() # 显示非空单元格 if cell_text: row_content.append(f"[{col_idx}]='{cell_text}'") print(" | ".join(row_content) if row_content else "空行") def advanced_clear_cells(file_path, output_path, compliance_keywords=None): """ 高级清空单元格函数,包含多种处理策略 """ if compliance_keywords is None: compliance_keywords = ["符合", "不适用"] doc = Document(file_path) cleared_count = 0 print(f"\n=== 开始处理文档 ===") print(f"目标关键词: {compliance_keywords}") for table_idx, table in enumerate(doc.tables, 1): print(f"\n处理表格 {table_idx}...") # 策略1:查找列标题 target_columns = find_target_columns(table) if not target_columns: print(" ❌ 未找到目标列,尝试策略2...") # 策略2:手动指定列索引 target_columns = manual_find_columns(table) for col_info in target_columns: col_index = col_info['index'] col_name = col_info['name'] print(f" ✅ 处理列 [{col_index}]: {col_name}") # 处理每一行 for row_idx, row in enumerate(table.rows): if row_idx == 0: # 跳过标题行 continue if col_index < len(row.cells): cell = row.cells[col_index] original_text = cell.text.strip() # 检查是否匹配关键词 if original_text in compliance_keywords: print(f" 🎯 行 {row_idx+1}: 匹配 '{original_text}'") # 清空目标列及前两列 clear_adjacent_cells(table, row_idx, col_index) cleared_count += 1 print(f" ✅ 已清空行 {row_idx+1} 的列 {col_index-2}-{col_index}") # 保存文档 doc.save(output_path) print(f"\n=== 处理完成 ===") print(f"共清空了 {cleared_count} 个匹配项") return cleared_count def find_target_columns(table): """查找目标列(策略1:自动识别)""" target_columns = [] target_keywords = ["符合程度", "符合性", "评估结果", "检查结果", "结果"] if len(table.rows) == 0: return target_columns # 检查第一行(通常是标题行) header_row = table.rows[0] for col_idx, cell in enumerate(header_row.cells): cell_text = cell.text.strip() for keyword in target_keywords: if keyword in cell_text: target_columns.append({ 'index': col_idx, 'name': cell_text }) break return target_columns def manual_find_columns(table): """手动查找列(策略2:基于内容模式)""" target_columns = [] if len(table.rows) <= 1: return target_columns # 分析数据行,查找包含"符合"/"不适用"的列 sample_rows = min(5, len(table.rows)) column_values = {} for col_idx in range(len(table.columns)): values = [] for row_idx in range(1, sample_rows): # 跳过标题行 if row_idx < len(table.rows) and col_idx < len(table.rows[row_idx].cells): cell_text = table.rows[row_idx].cells[col_idx].text.strip() if cell_text: values.append(cell_text) if values: column_values[col_idx] = values # 查找包含目标关键词的列 for col_idx, values in column_values.items(): if any(val in ["符合", "不适用"] for val in values): target_columns.append({ 'index': col_idx, 'name': f"自动识别列_{col_idx}" }) return target_columns def clear_adjacent_cells(table, row_idx, target_col): """清空目标单元格及其前两列""" row = table.rows[row_idx] # 清空目标列 if target_col < len(row.cells): row.cells[target_col].text = "" # 清空前一列 if target_col - 1 >= 0 and target_col - 1 < len(row.cells): row.cells[target_col - 1].text = "" # 清空前两列 if target_col - 2 >= 0 and target_col - 2 < len(row.cells): row.cells[target_col - 2].text = "" def interactive_processing(): """交互式处理主函数""" print("Word文档单元格清空工具") print("=" * 50) # 获取文件路径 while True: input_file = input("请输入Word文件路径: ").strip().strip('"') if os.path.exists(input_file): break print("❌ 文件不存在,请重新输入") # 先进行深度分析 print("\n正在进行文档分析...") deep_debug_table(input_file) # 选择处理模式 print("\n请选择处理模式:") print("1. 自动模式(推荐)") print("2. 手动指定列索引") choice = input("请选择 (1/2): ").strip() # 设置关键词 keywords = ["符合", "不适用"] custom_choice = input("使用默认关键词['符合','不适用']? (Y/n): ").strip().lower() if custom_choice == 'n': custom_keys = input("请输入关键词,用逗号分隔: ").strip() keywords = [k.strip() for k in custom_keys.split(",")] # 生成输出文件 file_dir = os.path.dirname(input_file) file_name = os.path.basename(input_file) name, ext = os.path.splitext(file_name) output_file = os.path.join(file_dir, f"{name}_已处理{ext}") # 执行处理 print(f"\n开始处理...") print(f"输入文件: {input_file}") print(f"输出文件: {output_file}") print(f"目标关键词: {keywords}") try: cleared_count = advanced_clear_cells(input_file, output_file, keywords) if cleared_count > 0: print(f"\n🎉 成功清空了 {cleared_count} 个匹配项!") print(f"请查看输出文件: {output_file}") else: print(f"\n⚠️ 没有找到匹配的项需要清空") print("可能的原因:") print("1. 列名不匹配") print("2. 关键词不匹配") print("3. 表格结构特殊") # 提供手动模式 manual_choice = input("是否尝试手动指定列索引? (y/N): ").strip().lower() if manual_choice == 'y': manual_mode_processing(input_file) except Exception as e: print(f"\n❌ 处理失败: {str(e)}") import traceback traceback.print_exc() def manual_mode_processing(input_file): """手动模式处理""" print("\n=== 手动模式 ===") doc = Document(input_file) # 让用户选择表格 print(f"文档中共有 {len(doc.tables)} 个表格") table_choice = input("要处理哪个表格? (输入序号,从1开始): ").strip() try: table_idx = int(table_choice) - 1 if table_idx < 0 or table_idx >= len(doc.tables): print("❌ 表格序号无效") return table = doc.tables[table_idx] print(f"\n表格 {table_choice} 的结构:") deep_debug_table_single(table, table_idx) # 让用户输入列索引 col_index = input("请输入'符合程度'列的索引 (从0开始): ").strip() col_idx = int(col_index) # 处理表格 cleared = 0 for row_idx, row in enumerate(table.rows[1:], 1): # 跳过标题行 if col_idx < len(row.cells): cell_text = row.cells[col_idx].text.strip() if cell_text in ["符合", "不适用"]: # 清空单元格 row.cells[col_idx].text = "" if col_idx - 1 >= 0: row.cells[col_idx - 1].text = "" if col_idx - 2 >= 0: row.cells[col_idx - 2].text = "" cleared += 1 print(f"清空行 {row_idx+1}") # 保存 output_file = input_file.replace('.docx', '_手动处理.docx') doc.save(output_file) print(f"\n手动处理完成,清空了 {cleared} 个项") print(f"输出文件: {output_file}") except ValueError: print("❌ 请输入有效的数字") except Exception as e: print(f"❌ 处理失败: {str(e)}") def deep_debug_table_single(table, table_idx): """调试单个表格""" print(f"表格 {table_idx+1}: 行数={len(table.rows)}, 列数={len(table.columns)}") for row_idx, row in enumerate(table.rows): print(f" 行 {row_idx}: ", end="") for col_idx, cell in enumerate(row.cells): text = cell.text.strip() if text: print(f"[{col_idx}]='{text}' ", end="") print() if __name__ == "__main__": interactive_processing() 改为保留测评指标中包含(F3)的行
11-08
现有数据集格式如下,有pth文件,输出测试集的真实标签与预测结果到不同的csv表格,训练代码如下 X模态形状: torch.Size([1, 1, 35]) Y模态形状: torch.Size([1, 1, 35]) 标签形状: torch.Size([1, 9]) 训练代码:import torch import torch.nn as nn import torch.optim as optim import numpy as np import matplotlib.pyplot as plt import os import time from torch.utils.tensorboard import SummaryWriter from torch.utils.data import DataLoader from dataset import MultiModalDataset from model import MultiModalNet, FusionModel from opt import opt # 训练函数 def train_model(model, dataloader, val_loader, num_epochs=10, lr=0.001, save_dir='results'): # 创建保存目录 os.makedirs(save_dir, exist_ok=True) # 设置TensorBoard writer = SummaryWriter(os.path.join(save_dir, 'logs')) # 损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=lr) # 跟踪训练过程 train_losses = [] val_losses = [] train_accs = [] val_accs = [] best_val_acc = 0.0 start_time = time.time() print(f"开始训练,共 {num_epochs} 个epoch...") print("-" * 50) for epoch in range(num_epochs): # 训练阶段 model.train() running_loss = 0.0 correct = 0 total = 0 for i, batch in enumerate(dataloader): # 获取数据 x_data = batch['x'] y_data = batch['y'] labels = batch['label'] # 梯度清零 optimizer.zero_grad() # 前向传播 outputs = model(x_data, y_data) # print(outputs.shape, labels.shape) loss = criterion(outputs, labels) # 反向传播和优化 loss.backward() optimizer.step() # 统计结果 running_loss += loss.item() _, predicted = torch.max(outputs.data, 1) true_labels = labels.argmax(dim=1) if labels.dim() > 1 else labels total += true_labels.size(0) correct += (predicted == true_labels).sum().item() # 每10个batch打印一次 # if i % 10 == 9: print(f'Epoch [{epoch + 1}/{num_epochs}], Batch [{i + 1}/{len(dataloader)}], Loss: {loss.item():.4f}') # 计算训练集指标 epoch_loss = running_loss / len(dataloader) epoch_acc = correct / total train_losses.append(epoch_loss) train_accs.append(epoch_acc) # 验证阶段 val_loss, val_acc = validate_model(model, val_loader, criterion) val_losses.append(val_loss) val_accs.append(val_acc) # 记录到TensorBoard writer.add_scalar('Loss/train', epoch_loss, epoch) writer.add_scalar('Loss/val', val_loss, epoch) writer.add_scalar('Accuracy/train', epoch_acc, epoch) writer.add_scalar('Accuracy/val', val_acc, epoch) # 打印epoch结果 print(f'\nEpoch [{epoch + 1}/{num_epochs}] 完成!') print(f'训练损失: {epoch_loss:.4f}, 训练准确率: {epoch_acc:.4f}') print(f'验证损失: {val_loss:.4f}, 验证准确率: {val_acc:.4f}') print("-" * 50) # 保存最佳模型 if val_acc > best_val_acc: best_val_acc = val_acc torch.save(model.state_dict(), os.path.join(save_dir, 'best_model.pth')) print(f'保存最佳模型,验证准确率: {val_acc:.4f}') save_training_results(train_losses, val_losses, train_accs, val_accs, save_dir) # 保存最终模型 torch.save(model.state_dict(), os.path.join(save_dir, 'final_model.pth')) # writer.close() # 保存训练结果 save_training_results(train_losses, val_losses, train_accs, val_accs, save_dir) # 计算总训练时间 total_time = time.time() - start_time print(f'训练完成! 总耗时: {total_time // 60:.0f}分 {total_time % 60:.0f}秒') print(f'最佳验证准确率: {best_val_acc:.4f}') return model def validate_model(model, dataloader, criterion): """模型验证""" model.eval() running_loss = 0.0 correct = 0 total = 0 with torch.no_grad(): for batch in dataloader: x_data = batch['x'] y_data = batch['y'] labels = batch['label'] outputs = model(x_data, y_data) loss = criterion(outputs, labels) running_loss += loss.item() _, predicted = torch.max(outputs.data, 1) true_labels = labels.argmax(dim=1) if labels.dim() > 1 else labels total += true_labels.size(0) correct += (predicted == true_labels).sum().item() val_loss = running_loss / len(dataloader) val_acc = correct / total return val_loss, val_acc def save_training_results(train_losses, val_losses, train_accs, val_accs, save_dir): """Save training result charts and metrics""" # Set font properties for better rendering plt.rcParams['font.family'] = 'serif' plt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif'] plt.rcParams['mathtext.fontset'] = 'stix' # Use STIX fonts for math symbols compatible with Times style plt.rcParams['axes.unicode_minus'] = False # Display minus sign correctly plt.rcParams.update({ 'font.size': 14, # Base font size 'axes.labelsize': 16, # Axis label font size 'axes.titlesize': 18, # Title font size 'xtick.labelsize': 14, # X-axis tick label font size 'ytick.labelsize': 14, # Y-axis tick label font size 'legend.fontsize': 14, # Legend font size }) # Save loss curve plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) plt.plot(train_losses, label='Training Loss') plt.plot(val_losses, label='Validation Loss') plt.title('Training and Validation Loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() # Save accuracy curve plt.subplot(1, 2, 2) plt.plot(train_accs, label='Training Accuracy') plt.plot(val_accs, label='Validation Accuracy') plt.title('Training and Validation Accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend() plt.tight_layout() plt.savefig(os.path.join(save_dir, 'training_results.png')) plt.close() # Save metrics to file with open(os.path.join(save_dir, 'metrics.txt'), 'w') as f: f.write(f'Final Training Loss: {train_losses[-1]:.6f}\n') f.write(f'Final Training Accuracy: {train_accs[-1]:.6f}\n') f.write(f'Final Validation Loss: {val_losses[-1]:.6f}\n') f.write(f'Final Validation Accuracy: {val_accs[-1]:.6f}\n') f.write(f'Best Validation Accuracy: {max(val_accs):.6f}\n') print(f'Training results saved to {save_dir}') # 主程序 if __name__ == "__main__": torch.manual_seed(42) np.random.seed(42) train_dataset = MultiModalDataset(root_dir=opt.dataset_name, mode='train', num_classes=opt.n_class) val_dataset = MultiModalDataset(root_dir=opt.dataset_name, mode='val', num_classes=opt.n_class) train_loader = DataLoader( train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=2, ) val_loader = DataLoader( val_dataset, batch_size=1, shuffle=False, num_workers=2, ) print(f"训练集样本数: {len(train_dataset)}") print(f"验证集样本数: {len(val_dataset)}") if opt.which_model_net == "FusionModel": model = FusionModel(input_dim=opt.size, num_classes=len(set(train_dataset.samples[i]['label'] for i in range(len(train_dataset)))) ) elif opt.which_model_net == "MultiModalNet": model = MultiModalNet(input_dim=opt.size, num_classes=len(set(train_dataset.samples[i]['label'] for i in range(len(train_dataset)))) ) else: model = MultiModalNet(input_dim=opt.size, num_classes=len(set(train_dataset.samples[i]['label'] for i in range(len(train_dataset)))) ) print(f"模型参数数量: {sum(p.numel() for p in model.parameters() if p.requires_grad)}") trained_model = train_model( model, train_loader, val_loader, num_epochs=opt.n_epochs, lr=opt.lr, save_dir=opt.exp_path )
12-13
基于可靠性评估序贯蒙特卡洛模拟法的配电网可靠性评估研究(Matlab代码实现)内容概要:本文围绕“基于可靠性评估序贯蒙特卡洛模拟法的配电网可靠性评估研究”,介绍了利用Matlab代码实现配电网可靠性的仿真分析方法。重点采用序贯蒙特卡洛模拟法对配电网进行长时间段的状态抽样与统计,通过模拟系统元件的故障与修复过程,评估配电网的关键可靠性指标,如系统停电频率、停电持续时间、负荷点可靠性等。该方法能够有效处理复杂网络结构与设备时序特性,提升评估精度,适用于含分布式电源、电动汽车等新型负荷接入的现代配电网。文中提供了完整的Matlab实现代码与案例分析,便于复现和扩展应用。; 适合人群:具备电力系统基础知识和Matlab编程能力的高校研究生、科研人员及电力行业技术人员,尤其适合从事配电网规划、运行与可靠性分析相关工作的人员; 使用场景及目标:①掌握序贯蒙特卡洛模拟法在电力系统可靠性评估中的基本原理与实现流程;②学习如何通过Matlab构建配电网仿真模型并进行状态转移模拟;③应用于含新能源接入的复杂配电网可靠性定量评估与优化设计; 阅读建议:建议结合文中提供的Matlab代码逐段调试运行,理解状态抽样、故障判断、修复逻辑及指标统计的具体实现方式,同时可扩展至不同网络结构或加入更多不确定性因素进行深化研究。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值