sample_fork.c

博客展示了一段C语言代码,包含stdio.h、stdlib.h和unistd.h头文件,在main函数中使用fork函数创建子进程,通过判断返回值区分父进程和子进程,并分别输出进程信息,若出错则输出错误信息。
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
int main()
{
     pid_t  pid=0;
     if((pid=fork())>0)
         printf("I am the parent,my pid %u,my child's pid=%u/n",getpid(),pid);
     else if (pid==0)
         printf("I am the child,my pid=%u,my parent's id=%u/n",getpid(),getppid());
     else
     {
         perror("fork");
         return 1;
     }
     return 0;
}
2025-08-31 13:33:24.392 | ERROR | PID:13409 | standard_model_wrapper_pytorch.train:391 - learner train() Exception, 'SampleData' object has no attribute 'next_obs' Traceback (most recent call last): File "/data/projects/intelligent_traffic_lights_v2/train_test.py", line 179, in <module> train() └ <function train at 0x7f98901116c0> File "/data/projects/intelligent_traffic_lights_v2/train_test.py", line 109, in train proc.start() │ └ <function BaseProcess.start at 0x7f9890025940> └ <Process name='learner' parent=13365 started> File "/usr/lib64/python3.11/multiprocessing/process.py", line 121, in start self._popen = self._Popen(self) │ │ │ │ └ <Process name='learner' parent=13365 started> │ │ │ └ <staticmethod(<function Process._Popen at 0x7f98900b20c0>)> │ │ └ <Process name='learner' parent=13365 started> │ └ None └ <Process name='learner' parent=13365 started> File "/usr/lib64/python3.11/multiprocessing/context.py", line 224, in _Popen return _default_context.get_context().Process._Popen(process_obj) │ │ └ <Process name='learner' parent=13365 started> │ └ <function DefaultContext.get_context at 0x7f98900b22a0> └ <multiprocessing.context.DefaultContext object at 0x7f9890075910> File "/usr/lib64/python3.11/multiprocessing/context.py", line 281, in _Popen return Popen(process_obj) │ └ <Process name='learner' parent=13365 started> └ <class 'multiprocessing.popen_fork.Popen'> File "/usr/lib64/python3.11/multiprocessing/popen_fork.py", line 19, in __init__ self._launch(process_obj) │ │ └ <Process name='learner' parent=13365 started> │ └ <function Popen._launch at 0x7f9888ffee80> └ <multiprocessing.popen_fork.Popen object at 0x7f9889024750> File "/usr/lib64/python3.11/multiprocessing/popen_fork.py", line 71, in _launch code = process_obj._bootstrap(parent_sentinel=child_r) │ │ └ 5 │ └ <function BaseProcess._bootstrap at 0x7f9890026340> └ <Process name='learner' parent=13365 started> File "/usr/lib64/python3.11/multiprocessing/process.py", line 314, in _bootstrap self.run() │ └ <function BaseProcess.run at 0x7f98900258a0> └ <Process name='learner' parent=13365 started> File "/usr/lib64/python3.11/multiprocessing/process.py", line 108, in run self._target(*self._args, **self._kwargs) │ │ │ │ │ └ {} │ │ │ │ └ <Process name='learner' parent=13365 started> │ │ │ └ () │ │ └ <Process name='learner' parent=13365 started> │ └ <function main at 0x7f9888ffca40> └ <Process name='learner' parent=13365 started> File "/data/projects/intelligent_traffic_lights_v2/kaiwudrl/server/learner/learner.py", line 154, in main train_loop() └ <function train_loop at 0x7f9888ffc9a0> File "/data/projects/intelligent_traffic_lights_v2/kaiwudrl/server/learner/learner.py", line 131, in train_loop train.loop() │ └ <function OnPolicyTrainer.loop at 0x7f9789722340> └ <kaiwudrl.server.learner.standard_trainer.StandardTrainer object at 0x7f97c4cd2d10> File "/data/projects/intelligent_traffic_lights_v2/kaiwudrl/server/learner/on_policy_trainer.py", line 1278, in loop self.run_once() │ └ <function OnPolicyTrainer.run_once at 0x7f9789722160> └ <kaiwudrl.server.learner.standard_trainer.StandardTrainer object at 0x7f97c4cd2d10> File "/data/projects/intelligent_traffic_lights_v2/kaiwudrl/server/learner/on_policy_trainer.py", line 1251, in run_once self.train() │ └ <function OnPolicyTrainer.train at 0x7f9789722020> └ <kaiwudrl.server.learner.standard_trainer.StandardTrainer object at 0x7f97c4cd2d10> File "/data/projects/intelligent_traffic_lights_v2/kaiwudrl/server/learner/on_policy_trainer.py", line 1217, in train self.train_detail() │ └ <function OnPolicyTrainer.train_detail at 0x7f9789721300> └ <kaiwudrl.server.learner.standard_trainer.StandardTrainer object at 0x7f97c4cd2d10> File "/data/projects/intelligent_traffic_lights_v2/kaiwudrl/server/learner/on_policy_trainer.py", line 125, in train_detail ) = self.model_wrapper.train(self.current_sync_model_version_from_learner) │ │ │ │ └ 0 │ │ │ └ <kaiwudrl.server.learner.standard_trainer.StandardTrainer object at 0x7f97c4cd2d10> │ │ └ <function StandardModelWrapperPytorch.train at 0x7f988512cfe0> │ └ <kaiwudrl.common.algorithms.standard_model_wrapper_pytorch.StandardModelWrapperPytorch object at 0x7f978a6db1d0> └ <kaiwudrl.server.learner.standard_trainer.StandardTrainer object at 0x7f97c4cd2d10> > File "/data/projects/intelligent_traffic_lights_v2/kaiwudrl/common/algorithms/standard_model_wrapper_pytorch.py", line 374, in train values = self.model.learn(data, train=True) │ │ │ └ [tensor([[0., 0., 0., ..., 1., 1., 1.], │ │ │ [0., 0., 0., ..., 1., 1., 1.]], device='cuda:0')] │ │ └ <function learn_wrapper.<locals>.wrapper at 0x7f9885313ba0> │ └ <agent_target_dqn.agent.Agent object at 0x7f978a6da950> └ <kaiwudrl.common.algorithms.standard_model_wrapper_pytorch.StandardModelWrapperPytorch object at 0x7f978a6db1d0> File "/data/projects/intelligent_traffic_lights_v2/kaiwudrl/interface/base_agent_kaiwudrl_remote.py", line 73, in wrapper return func(agent, datas, *args, **kargs) │ │ │ │ └ {} │ │ │ └ () │ │ └ [<kaiwu_agent.utils.common_func.SampleData object at 0x7f9885619310>, <kaiwu_agent.utils.common_func.SampleData object at 0x7... │ └ <agent_target_dqn.agent.Agent object at 0x7f978a6da950> └ <function Agent.learn at 0x7f9885313b00> File "/workspace/code/agent_target_dqn/agent.py", line 117, in learn return self.algorithm.learn(list_sample_data) │ │ │ └ [<kaiwu_agent.utils.common_func.SampleData object at 0x7f9885619310>, <kaiwu_agent.utils.common_func.SampleData object at 0x7... │ │ └ <function Algorithm.learn at 0x7f98853125c0> │ └ <agent_target_dqn.algorithm.algorithm.Algorithm object at 0x7f978a4f0cd0> └ <agent_target_dqn.agent.Agent object at 0x7f978a6da950> File "/workspace/code/agent_target_dqn/algorithm/algorithm.py", line 109, in learn next_obs_list.append(sample.next_obs) │ │ └ <kaiwu_agent.utils.common_func.SampleData object at 0x7f9885619310> │ └ <method 'append' of 'list' objects> └ [] AttributeError: 'SampleData' object has no attribute 'next_obs'
09-01
我的收发一直是1024,这对吗?#include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/ipc.h> #include <sys/msg.h> #include <sys/types.h> #include <time.h> #include <unistd.h> /* A→B发送结构体 */ struct msg_send { long msg_type; struct { int seq; size_t size; int crc; char data[1024]; } content_send; }; /* B→A响应结构体 */ struct msg_ack { long msg_type; struct { int seq; size_t size; int crc_ack; } content_ack; }; /* 消息记录结构体 */ struct status { int total_count; int success_count; size_t total_bytes; }; /*************************************************************** * @brief 校验码计算 * @param data数据指针, size数据大小 * @return crc值 * @Sample usage: CRC32校验码计算 **************************************************************/ unsigned int calculate_crc(const char *data, size_t size) { unsigned int crc = 0xFFFFFFFF; /* CRC初始值 */ for (size_t i = 0; i < size; i++) { crc ^= (unsigned char)data[i]; /* 字符ASCII值与预设异或 */ // 处理当前字节的8个比特 for (int j = 0; j < 8; j++) { int lsb = crc & 1; /* 检测最低位 */ crc = crc >> 1; /* 无条件右移1位 */ /* 最低位1 与特殊值进行异或 */ if (lsb) { crc ^= 0xEDB88320; } } } return ~crc; } /* 打印统计结果 */ void print_status(const struct status *s) { printf("总发送次数: %d\n", s->total_count); printf("成功次数: %d\n", s->success_count); printf("失败次数: %d\n", s->total_count - s->success_count); printf("总字节数: %zu\n", s->total_bytes); } int main() { key_t key = ftok("ipc_queue", 100); /*生成唯一ipc键值,标记消息队列*/ int msgid = msgget(key, 0666 | IPC_CREAT); /*创建消息队列*/ pid_t pid = fork(); if (pid > 0) { /*父进程 A发送*/ struct status sender_stats = {0}; srand(time(NULL)); /* 随机数 */ for (int i = 1; i <= 6; i++) { sleep(1); struct msg_send msg; msg.msg_type = 1; msg.content_send.seq = i; msg.content_send.size = sizeof(msg.content_send.data); for (size_t j = 0; j < msg.content_send.size; j++) { msg.content_send.data[j] = rand() % 256; } /* 计算随机数生成的0-255字节数CRC */ msg.content_send.crc = calculate_crc(msg.content_send.data, msg.content_send.size); /* 在偶数次修改CRC值导致错误*/ if (i % 2 == 0) { msg.content_send.crc ^= 0x12345678; // 异或操作改变CRC值 } /* 发送消息 */ msgsnd(msgid, &msg, sizeof(msg.content_send), 0); time_t now = time(NULL); printf("time_%ld: A发送%zu字节数据给B (序列号:%d)\n", now, msg.content_send.size, i); sender_stats.total_count++; sender_stats.total_bytes += msg.content_send.size; /* 接收响应 */ struct msg_ack ack; msgrcv(msgid, &ack, sizeof(ack.content_ack), 2, 0); now = time(NULL); if (ack.content_ack.crc_ack) { printf("time_%ld: B收到了%zu字节数据, 校验正确 (序列号:%d)\n", now, ack.content_ack.size, ack.content_ack.seq); sender_stats.success_count++; } else { printf("time_%ld: B收到了%zu字节数据, 校验失败 (序列号:%d)\n", now, ack.content_ack.size, ack.content_ack.seq); } } printf("\n发送方A 统计结果:\n"); print_status(&sender_stats); msgctl(msgid, IPC_RMID, NULL); /*删除消息队列*/ } else if (pid == 0) { /*子进程 B发送*/ struct status receiver_status = {0}; for (int i = 1; i <= 6; i++) { struct msg_send msg; msgrcv(msgid, &msg, sizeof(msg.content_send), 1, 0); time_t now = time(NULL); receiver_status.total_count++; receiver_status.total_bytes += msg.content_send.size; // 验证CRC unsigned int calculated_crc = calculate_crc(msg.content_send.data, msg.content_send.size); int crc_ack = (calculated_crc == msg.content_send.crc); if (crc_ack) receiver_status.success_count++; /* 发送响应 */ struct msg_ack ack; ack.msg_type = 2; ack.content_ack.seq = msg.content_send.seq; ack.content_ack.crc_ack = crc_ack; ack.content_ack.size = msg.content_send.size; msgsnd(msgid, &ack, sizeof(ack.content_ack), 0); } printf("\n接收方B 统计结果:\n"); print_status(&receiver_status); exit(0); } return 0; }
08-19
ERROR (EX3863) : Syntax error near 'wait'("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":126) ERROR (EX2656) : Verilog 2001 keyword 'wait' used in incorrect context("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":126) ERROR (EX3863) : Syntax error near 'end'("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":134) ERROR (EX2656) : Verilog 2001 keyword 'end' used in incorrect context("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":134) ERROR (EX3863) : Syntax error near 'i'("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":139) ERROR (EX3863) : Syntax error near '['("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":140) ERROR (EX3863) : Syntax error near '['("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":141) ERROR (EX3863) : Syntax error near 'endtask'("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":174) ERROR (EX2656) : Verilog 2001 keyword 'endtask' used in incorrect context("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":174) ERROR (EX3863) : Syntax error near '['("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":178) ERROR (EX3863) : Syntax error near 'endtask'("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":202) ERROR (EX2656) : Verilog 2001 keyword 'endtask' used in incorrect context("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":202) ERROR (EX3863) : Syntax error near 'endtask'("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":215) ERROR (EX2656) : Verilog 2001 keyword 'endtask' used in incorrect context("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":215) ERROR (EX3863) : Syntax error near 'endmodule'("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":223) ERROR (EX2656) : Verilog 2001 keyword 'endmodule' used in incorrect context("D:\GowinFPGA\Project\serial_protocol_converter\src\serial_protocol_converter_tb.v":223)
07-30
Traceback (most recent call last): File "/home/chrismon/anaconda3/envs/gfmrag/lib/python3.12/multiprocessing/process.py", line 314, in _bootstrap self.run() File "/home/chrismon/anaconda3/envs/gfmrag/lib/python3.12/multiprocessing/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "/home/chrismon/anaconda3/envs/gfmrag/lib/python3.12/site-packages/colbert/infra/launcher.py", line 134, in setup_new_process return_val = callee(config, *args) ^^^^^^^^^^^^^^^^^^^^^ File "/home/chrismon/anaconda3/envs/gfmrag/lib/python3.12/site-packages/colbert/indexing/collection_indexer.py", line 33, in encode encoder.run(shared_lists) File "/home/chrismon/anaconda3/envs/gfmrag/lib/python3.12/site-packages/colbert/indexing/collection_indexer.py", line 63, in run self.setup() # Computes and saves plan for whole collection ^^^^^^^^^^^^ File "/home/chrismon/anaconda3/envs/gfmrag/lib/python3.12/site-packages/colbert/indexing/collection_indexer.py", line 101, in setup avg_doclen_est = self._sample_embeddings(sampled_pids) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/chrismon/anaconda3/envs/gfmrag/lib/python3.12/site-packages/colbert/indexing/collection_indexer.py", line 142, in _sample_embeddings torch.distributed.all_reduce(self.num_sample_embs) File "/home/chrismon/anaconda3/envs/gfmrag/lib/python3.12/site-packages/torch/distributed/c10d_logger.py", line 81, in wrapper return func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^ File "/home/chrismon/anaconda3/envs/gfmrag/lib/python3.12/site-packages/torch/distributed/distributed_c10d.py", line 2935, in all_reduce work = group.allreduce([tensor], opts) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ torch.distributed.DistBackendError: NCCL error in: /pytorch/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:3690, unhandled cuda error (run with NCCL_DEBUG=INFO for details), NCCL version 2.27.5 ncclUnhandledCudaError: Call to CUDA function failed. Last error: Cuda failure 2 'out of memory'
11-03
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import re import sys import argparse import xlwt from collections import defaultdict # 分区名称映射表(前缀 → 友好名称) PARTITION_NAME_MAP = { '02_': 'system', '03_': 'vendor', '04_': 'product', '05_': 'odm', '06_': 'my_product', '07_': 'my_engineering', '08_': 'my_stock', '09_': 'my_heytap', '10_': 'my_company', '11_': 'my_carrier', '12_': 'my_region', '13_': 'my_preload', '14_': 'data', '15_': 'my_bigball', '16_': 'my_manifest', '17_system_dlkm': 'system_dlkm', '17_vendor_dlkm': 'vendor_dlkm', '17_cache': 'cache', '18_': 'system_ext' } def parse_du_file(file_path): """解析du命令输出文件并转换为MB""" data = {} try: with open(file_path, 'r') as f: for line in f: if 'Permission denied' in line or 'No such file' in line or not line.strip(): continue match = re.match(r'(\d+\.?\d*)\s*([KMG]?)[Bb]?\s+(.*)', line.strip()) if match: size, unit, path = match.groups() size = float(size) # 单位转换到MB if unit == 'K': size = size / 1024.0 elif unit == '': size = size / (1024 * 1024.0) elif unit == 'M': pass elif unit == 'G': size = size * 1024.0 data[path] = round(size, 4) except IOError as e: print("警告: 无法读取文件 {}: {}".format(file_path, str(e))) return data def extract_file_prefix(filename): """提取文件前缀""" if filename.startswith('17_'): return filename.replace('.txt', '') match = re.match(r'^(\d+_)', filename) return match.group(1) if match else "other_" def is_main_partition_file(filename, prefix): """检查是否为主分区文件""" if prefix.startswith('17_'): return True expected_name = prefix + PARTITION_NAME_MAP[prefix] + ".txt" return filename == expected_name def parse_lpdump_file(file_path): """解析 lpdump 输出文件,获取各分区大小和super物理分区大小""" sector_size = 512 # 每个扇区大小为 512 字节 partition_sizes = {} try: with open(file_path, 'r') as f: lines = f.readlines() in_super_layout = False super_find =False super_size = 0.0 # 初始化super物理分区大小 for line in lines: if "Super partition layout:" in line: in_super_layout = True continue if not in_super_layout or not line.strip(): continue # 使用正则匹配 Size 行 match = re.search(r'(?<!\S)Size:\s*(\d+)\s+bytes', line, re.IGNORECASE) if match: try: size_bytes = int(match.group(1)) if super_size == 0.0: super_size = size_bytes / float(1024 * 1024) # 转换为 MB print ("super 分区大小:%.2f MB" % super_size) except Exception as e: print ("无法解析 super 分区大小:%s" % str(e)) continue match = re.search(r'super:\s*(\d+)\s*.. (\d+):\s*([a-zA-Z0-9_]+)_a\s*\((\d+)\s+sectors\)', line) if match: name = match.group(3) + '_a' size_sectors = int(match.group(4)) # 直接使用括号内的数值 size_mb = round(size_sectors * sector_size / (1024 * 1024), 4) partition_sizes[name] = size_mb # 特别提取 super 分区的大小 if name == 'super_a': super_size = size_mb except Exception as e: print("解析 lpdump 文件出错: %s" % e) return partition_sizes, super_size def generate_dual_report(folder1, folder2, output_xlsx, is_os_project, upgrade_generations, maintenance_generations, reserve_per_generation, commercial_max_usage): """生成双机对比报告""" folder1_name = os.path.basename(os.path.normpath(folder1)) folder2_name = os.path.basename(os.path.normpath(folder2)) for folder in [folder1, folder2]: if not os.path.exists(folder): print("错误: 目录不存在 - %s" % folder) return "目录 %s 不存在,请检查路径" % folder if not os.path.isdir(folder): print("错误: 路径不是目录 - %s" % folder) return "%s 不是有效目录" % folder # 初始化数据结构 machine1_main_data = {} machine2_main_data = {} machine1_all_files = defaultdict(dict) machine2_all_files = defaultdict(dict) # 新增:解析 lpdump 文件 machine1_lpdump, machine1_super_size = parse_lpdump_file(os.path.join(folder1, '18_lpdump.txt')) machine2_lpdump, machine2_super_size = parse_lpdump_file(os.path.join(folder2, '18_lpdump.txt')) # 收集数据 for folder_path, main_dict, all_dict in [ (folder1, machine1_main_data, machine1_all_files), (folder2, machine2_main_data, machine2_all_files) ]: print("处理目录: %s" % folder_path) try: for filename in os.listdir(folder_path): if not filename.endswith('.txt'): continue prefix = extract_file_prefix(filename) if prefix == '01_' or prefix not in PARTITION_NAME_MAP: continue file_path = os.path.join(folder_path, filename) partition_name = PARTITION_NAME_MAP[prefix] file_data = parse_du_file(file_path) all_dict[filename] = file_data if is_main_partition_file(filename, prefix): print("解析主分区文件: %s" % file_path) main_dict[prefix] = file_data except OSError as e: print("目录访问错误: %s" % str(e)) return "无法访问目录 %s: %s" % (folder_path, str(e)) # 创建Excel工作簿 try: wb = xlwt.Workbook(encoding='utf-8') # ====== 定义样式变量 ====== header_style = xlwt.easyxf('font: bold on') title_style = xlwt.easyxf('font: bold on, height 280; align: wrap on, vert centre') normal_style = xlwt.easyxf() added_style = xlwt.easyxf('pattern: pattern solid, fore_colour light_green;') removed_style = xlwt.easyxf('pattern: pattern solid, fore_colour rose;') summary_style = xlwt.easyxf('font: bold on, color blue;') wrap_style = xlwt.easyxf('align: wrap on, vert centre') # 备注列样式 # ====== 创建总览Sheet页 ====== ws_overview = wb.add_sheet('总览') current_row = 0 # 写入总览标题 ws_overview.write_merge( current_row, current_row, 0, 5, "存储使用总览(仅主分区文件)", title_style ) current_row += 1 # 写入文件夹名称 ws_overview.write(current_row, 1, folder1_name, header_style) ws_overview.write(current_row, 2, folder2_name, header_style) current_row += 1 # 写入表头(增加备注列) headers = ['分区', '总大小(MB)', '总大小(MB)', '差值(MB)', '标记', '备注(增大TOP3)'] for col, header in enumerate(headers): ws_overview.write(current_row, col, header, header_style) current_row += 1 # 存储各分区汇总数据 overview_data = [] total_machine1 = 0.0 total_machine2 = 0.0 # 按分区顺序处理数据 for prefix in sorted(PARTITION_NAME_MAP.keys()): partition_name = PARTITION_NAME_MAP[prefix] # 跳过data分区 if partition_name == 'data': continue # 使用 lpdump 提供的大小替代原来的 du 总和 key = partition_name + '_a' size1 = machine1_lpdump.get(key, 0.0) size2 = machine2_lpdump.get(key, 0.0) diff = round(size1 - size2, 4) # 更新总计 total_machine1 += size1 total_machine2 += size2 # 确定标记样式 if diff > 0: mark = "增加" style = added_style elif diff < 0: mark = "减少" style = removed_style else: mark = "无变化" style = normal_style # 计算分区中增大的大于5MB的路径 top_notes = [] if diff > 0: # 只在分区增大时计算TOP路径 path_diffs = [] all_paths = set(machine1_main_data.get(prefix, {}).keys()) | set(machine2_main_data.get(prefix, {}).keys()) for path in all_paths: size1_path = machine1_main_data.get(prefix, {}).get(path, 0.0) size2_path = machine2_main_data.get(prefix, {}).get(path, 0.0) path_diff = size1_path - size2_path if path_diff > 5: # 只记录增大小于5MB的路径 path_diffs.append((path, path_diff)) # 按增大值降序排序 path_diffs.sort(key=lambda x: x[1], reverse=True) for i, (path, diff_val) in enumerate(path_diffs): # 截断过长的路径名 if len(path) > 50: path = "..." + path[-47:] top_notes.append("%d. %s: +%.2fMB" % (i+1, path, diff_val)) # 保存分区数据 overview_data.append({ 'name': partition_name, 'machine1': size1, 'machine2': size2, 'diff': diff, 'style': style, 'mark': mark, 'notes': "\n".join(top_notes) if top_notes else "无显著增大路径" }) # 写入行数据到总览页(增加备注列) print("写入总览页: %s, 测试机大小: %.2f MB, 对比机大小: %.2f MB" % (partition_name, size1, size2)) # 调试输出 ws_overview.write(current_row, 0, partition_name, style) ws_overview.write(current_row, 1, size1, style) ws_overview.write(current_row, 2, size2, style) ws_overview.write(current_row, 3, diff, style) ws_overview.write(current_row, 4, mark, style) ws_overview.write(current_row, 5, overview_data[-1]['notes'], wrap_style) # 使用已定义的wrap_style current_row += 1 # 添加空行 current_row += 1 # 写入总计行 total_diff = total_machine1 - total_machine2 if total_diff > 0: total_mark = "总增加" total_style = added_style elif total_diff < 0: total_mark = "总减少" total_style = removed_style else: total_mark = "无变化" total_style = normal_style ws_overview.write(current_row, 0, "总计", header_style) ws_overview.write(current_row, 1, total_machine1, header_style) ws_overview.write(current_row, 2, total_machine2, header_style) ws_overview.write(current_row, 3, total_diff, header_style) ws_overview.write(current_row, 4, total_mark, header_style) ws_overview.write(current_row, 5, "", header_style) # 备注列留空 # 判断是否满足预留条件 cr = 0 #商业化预留 current_row += 1 my_preload1 = machine1_lpdump.get('my_preload_a', 0.0) if is_os_project: required_space = total_machine1 + upgrade_generations * reserve_per_generation + maintenance_generations * 200 + commercial_max_usage*1024 - my_preload1 cr = commercial_max_usage*1024 - my_preload1 else: required_space = total_machine1 + upgrade_generations * reserve_per_generation + maintenance_generations * 200 space_difference = machine1_super_size - required_space reserved_space = machine1_super_size - total_machine1 symbol = ">" if required_space > machine1_super_size: status = "不满足" status_style = removed_style print("预留空间不足!当前占用: %.2fMB | 预留空间: %.2fMB | Super大小: %.2fMB | 还差 %.2fMB" % (total_machine1, reserved_space, machine1_super_size, abs(space_difference))) else: status = "满足" status_style = added_style symbol = "<" print("预留空间充足!当前占用: %.2fMB | 预留空间: %.2fMB | Super大小: %.2fMB | 剩余 %.2fMB" % (total_machine1, reserved_space, machine1_super_size, space_difference)) # 写入判断结果 ws_overview.write(current_row, 0, "预留判断", header_style) ws_overview.write(current_row, 1, "", normal_style) ws_overview.write(current_row, 2, "", normal_style) ws_overview.write(current_row, 3, "", normal_style) ws_overview.write(current_row, 4, status, status_style) ws_overview.write(current_row, 5, "当前占用: %.2fMB | 预留空间: %.2fMB | Super大小: %.2fMB" % (total_machine1, reserved_space, machine1_super_size), wrap_style) current_row += 1 ws_overview.write(current_row, 0, "判断公式", header_style) ws_overview.write(current_row, 5, "预留所需大小: %.2fMB = 当前占用: %.2fMB + 升级预留: %.2f*%.2f + %.2f*200 + 商业化预留:%.2f %s super大小:%.2f" % (required_space, total_machine1, upgrade_generations,reserve_per_generation,maintenance_generations,cr,symbol,machine1_super_size), wrap_style) # 设置备注列宽度(100字符) ws_overview.col(5).width = 256 * 100 # ====== 为每个文件创建单独的Sheet页(保持不变) ====== # 获取所有唯一的文件名(两个文件夹的并集) all_filenames = sorted(set(machine1_all_files.keys()) | set(machine2_all_files.keys())) for filename in all_filenames: # 提取文件前缀 prefix = extract_file_prefix(filename) # 跳过无效前缀 if prefix not in PARTITION_NAME_MAP: continue # 获取分区名称 partition_name = PARTITION_NAME_MAP[prefix] # 创建Sheet页名称(文件名不带扩展名) sheet_name = filename.replace('.txt', '') if len(sheet_name) > 31: # Excel sheet名称长度限制 sheet_name = sheet_name[:31] # 创建Sheet页 ws = wb.add_sheet(sheet_name) # print("创建文件Sheet页: %s" % sheet_name) # 当前行指针 current_row = 0 # 写入分区标题 title = "分区: %s - 文件: %s" % (partition_name, filename) ws.write_merge( current_row, current_row, 0, 5, title, title_style ) current_row += 1 # 写入文件夹名称 ws.write_merge(current_row, current_row, 0, 1, folder1_name, header_style) ws.write_merge(current_row, current_row, 2, 3, folder2_name, header_style) ws.write(current_row, 4, "差异(M)", header_style) ws.write(current_row, 5, "标记", header_style) current_row += 1 # 写入表头 headers = ['路径', '大小(M)', '路径', '大小(M)', '差异(M)', '标记'] for col, header in enumerate(headers): ws.write(current_row, col, header, header_style) current_row += 1 # 获取文件数据 data1 = machine1_all_files.get(filename, {}) data2 = machine2_all_files.get(filename, {}) # 获取所有路径(合并两个文件夹的路径) all_paths = sorted(set(data1.keys()) | set(data2.keys())) # 初始化变化统计数据 total_increase = 0.0 # 增大总和 total_decrease = 0.0 # 减小总和 total_added = 0.0 # 新增文件总和 total_removed = 0.0 # 去除文件总和 # 写入数据行 for path in all_paths: size1 = data1.get(path, 0.0) size2 = data2.get(path, 0.0) # 计算差值 diff = size1 - size2 # 确定标记和样式 if size1 == 0 and size2 > 0: mark = "除去" cell_style = removed_style total_removed += size2 elif size1 > 0 and size2 == 0: mark = "新增" cell_style = added_style total_added += size1 else: if diff > 0: mark = "增大" cell_style = added_style total_increase += diff elif diff < 0: mark = "减小" cell_style = removed_style total_decrease += abs(diff) else: mark = "相同" cell_style = normal_style # 写入行数据 # folder1列 if size1 > 0: ws.write(current_row, 0, path, cell_style) ws.write(current_row, 1, size1, cell_style) else: ws.write(current_row, 0, "", cell_style) ws.write(current_row, 1, "", cell_style) # folder2列 if size2 > 0: ws.write(current_row, 2, path, cell_style) ws.write(current_row, 3, size2, cell_style) else: ws.write(current_row, 2, "", cell_style) ws.write(current_row, 3, "", cell_style) # 差异和标记列 ws.write(current_row, 4, diff, cell_style) ws.write(current_row, 5, mark, cell_style) current_row += 1 # 添加文件汇总行 file_total1 = sum(data1.values()) file_total2 = sum(data2.values()) file_diff = file_total1 - file_total2 # 写入汇总行 ws.write(current_row, 0, "文件汇总", header_style) ws.write(current_row, 1, file_total1, header_style) ws.write(current_row, 2, "", header_style) ws.write(current_row, 3, file_total2, header_style) ws.write(current_row, 4, file_diff, header_style) ws.write(current_row, 5, "", header_style) current_row += 1 # 添加变化分类统计行 message = ( u"%s路径下: " u"减小%.2fM " u"增大%.2fM " u"新增文件%.2fM " u"减少文件%.2fM" ) % ( partition_name, total_decrease, total_increase, total_added, total_removed ) ws.write_merge( current_row, current_row, 0, 5, message, summary_style ) # 保存文件 wb.save(output_xlsx) return "对比报告已成功生成: %s" % output_xlsx except Exception as e: import traceback traceback.print_exc() return "生成Excel文件时出错: %s" % str(e) def generate_single_report(folder, output_xlsx, is_os_project=False, upgrade_generations=0, maintenance_generations=0, reserve_per_generation=0, commercial_max_usage=0): """单机拆解模式""" folder_name = os.path.basename(os.path.normpath(folder)) added_style = xlwt.easyxf('pattern: pattern solid, fore_colour light_green;') removed_style = xlwt.easyxf('pattern: pattern solid, fore_colour rose;') if not os.path.exists(folder): print("错误: 目录不存在 - %s" % folder) return "目录 %s 不存在,请检查路径" % folder if not os.path.isdir(folder): print("错误: 路径不是目录 - %s" % folder) return "%s 不是有效目录" % folder # 初始化数据结构 machine_data = {} all_files = defaultdict(dict) # 解析 lpdump 文件 lpdump_data, super_size = parse_lpdump_file(os.path.join(folder, '18_lpdump.txt')) my_preload_size = lpdump_data.get('my_preload_a', 0.0) # 收集数据 try: for filename in os.listdir(folder): if not filename.endswith('.txt'): continue prefix = extract_file_prefix(filename) if prefix == '01_' or prefix not in PARTITION_NAME_MAP: continue file_path = os.path.join(folder, filename) partition_name = PARTITION_NAME_MAP[prefix] file_data = parse_du_file(file_path) all_files[filename] = file_data if is_main_partition_file(filename, prefix): machine_data[prefix] = file_data except OSError as e: print("目录访问错误: %s" % str(e)) return "无法访问目录 %s: %s" % (folder, str(e)) # 创建Excel工作簿 try: wb = xlwt.Workbook(encoding='utf-8') # ====== 定义样式变量 ====== header_style = xlwt.easyxf('font: bold on') title_style = xlwt.easyxf('font: bold on, height 280; align: wrap on, vert centre') normal_style = xlwt.easyxf() summary_style = xlwt.easyxf('font: bold on, color blue;') wrap_style = xlwt.easyxf('align: wrap on, vert centre') # 备注列样式 # ====== 创建总览Sheet页 ====== ws_overview = wb.add_sheet('总览') current_row = 0 # 写入总览标题 ws_overview.write_merge( current_row, current_row, 0, 4, "存储使用总览", title_style ) current_row += 1 # 写入表头(增加备注列) headers = ['分区', '总大小(MB)', '标记', '差值(MB)', '备注(增大TOP3)'] for col, header in enumerate(headers): ws_overview.write(current_row, col, header, header_style) current_row += 1 # 存储各分区汇总数据 overview_data = [] total_machine = 0.0 # 按分区顺序处理数据 for prefix in sorted(PARTITION_NAME_MAP.keys()): partition_name = PARTITION_NAME_MAP[prefix] # 跳过data分区 if partition_name == 'data': continue # 使用 lpdump 提供的大小替代原来的 du 总和 key = partition_name + '_a' size = lpdump_data.get(key, 0.0) # 更新总计 total_machine += size # 记录标记为无变化 mark = "无变化" style = normal_style # 计算分区中增大的大于5MB的路径 top_notes = [] path_diffs = [] all_paths = set(machine_data.get(prefix, {}).keys()) for path in all_paths: size_path = machine_data.get(prefix, {}).get(path, 0.0) path_diffs.append((path, size_path)) # 按增大值降序排序 path_diffs.sort(key=lambda x: x[1], reverse=True) for i, (path, diff_val) in enumerate(path_diffs): # 截断过长的路径名 if len(path) > 50: path = "..." + path[-47:] top_notes.append("%d. %s: %.2fMB" % (i+1, path, diff_val)) # 保存分区数据 overview_data.append({ 'name': partition_name, 'machine': size, 'mark': mark, 'style': style, 'notes': "\n".join(top_notes) if top_notes else "无显著增大路径" }) # 写入行数据到总览页 ws_overview.write(current_row, 0, partition_name, style) ws_overview.write(current_row, 1, size, style) ws_overview.write(current_row, 2, mark, style) ws_overview.write(current_row, 3, "", style) current_row += 1 # 添加空行 current_row += 1 # 写入总计行 ws_overview.write(current_row, 0, "总计", header_style) ws_overview.write(current_row, 1, total_machine, header_style) ws_overview.write(current_row, 2, "", header_style) ws_overview.write(current_row, 3, "", header_style) ws_overview.write(current_row, 4, "", header_style) # 判断是否满足预留条件 cr = 0 #商业化预留 current_row += 1 if is_os_project: required_space = total_machine + (upgrade_generations * reserve_per_generation) + (maintenance_generations * 200) + (commercial_max_usage*1024) - my_preload_size cr = (commercial_max_usage*1024) - my_preload_size else: required_space = total_machine + upgrade_generations * reserve_per_generation + maintenance_generations * 200 space_difference = super_size - required_space reserved_space = super_size - total_machine symbol = ">" if required_space > super_size: status = "不满足" status_style = removed_style print("预留空间不足!当前占用: %.2fMB | 预留空间: %.2fMB | Super大小: %.2fMB | 还差 %.2fMB | 需求空间:%.2fMB" % (total_machine, reserved_space, super_size, abs(space_difference),required_space)) else: status = "满足" status_style = added_style symbol = "<" print("预留空间充足!当前占用: %.2fMB | 预留空间: %.2fMB | Super大小: %.2fMB | 剩余 %.2fMB | 需求空间:%.2fMB" % (total_machine, reserved_space, super_size, space_difference,required_space)) print("total_machine: %.2fMB | upgrade_generations: %.2fMB | reserve_per_generation: %.2fMB | commercial_max_usage: %.2fMB | my_preload_size:%.2fMB" % (total_machine, upgrade_generations, reserve_per_generation, commercial_max_usage,my_preload_size)) # 写入判断结果 ws_overview.write(current_row, 0, "预留判断", header_style) ws_overview.write(current_row, 1, "", normal_style) ws_overview.write(current_row, 2, status, status_style) ws_overview.write(current_row, 3, "", normal_style) ws_overview.write(current_row, 4, "当前占用: %.2fMB | 预留空间: %.2fMB | Super大小: %.2fMB" % (total_machine, reserved_space, super_size), wrap_style) current_row += 1 ws_overview.write(current_row, 0, "判断公式", header_style) ws_overview.write(current_row, 4, "预留所需大小: %.2fMB = 当前占用: %.2fMB + 升级预留: %.2f*%.2f + %.2f*200 + 商业化预留:%.2f %s super大小:%.2f" % (required_space, total_machine, upgrade_generations,reserve_per_generation,maintenance_generations,cr,symbol,super_size), wrap_style) # 设置备注列宽度(100字符) ws_overview.col(4).width = 256 * 100 # ====== 为每个文件创建单独的Sheet页(保持不变) ====== # 获取所有唯一的文件名 all_filenames = sorted(set(all_files.keys())) for filename in all_filenames: # 提取文件前缀 prefix = extract_file_prefix(filename) # 跳过无效前缀 if prefix not in PARTITION_NAME_MAP: continue # 获取分区名称 partition_name = PARTITION_NAME_MAP[prefix] # 创建Sheet页名称(文件名不带扩展名) sheet_name = filename.replace('.txt', '') if len(sheet_name) > 31: # Excel sheet名称长度限制 sheet_name = sheet_name[:31] # 创建Sheet页 ws = wb.add_sheet(sheet_name) # print("创建文件Sheet页: %s" % sheet_name) # 当前行指针 current_row = 0 # 写入分区标题 title = "分区: %s - 文件: %s" % (partition_name, filename) ws.write_merge( current_row, current_row, 0, 3, title, title_style ) current_row += 1 # 写入文件夹名称 ws.write_merge(current_row, current_row, 0, 1, folder_name, header_style) ws.write(current_row, 2, "差异(M)", header_style) ws.write(current_row, 3, "标记", header_style) current_row += 1 # 写入表头 headers = ['路径', '大小(M)', '差异(M)', '标记'] for col, header in enumerate(headers): ws.write(current_row, col, header, header_style) current_row += 1 # 获取文件数据 data = all_files.get(filename, {}) # 获取所有路径(合并两个文件夹的路径) all_paths = sorted(set(data.keys())) # 写入数据行 for path in all_paths: size = data.get(path, 0.0) # 标记为无变化 mark = "无变化" cell_style = normal_style # 写入行数据 ws.write(current_row, 0, path, cell_style) ws.write(current_row, 1, size, cell_style) ws.write(current_row, 2, "", cell_style) ws.write(current_row, 3, mark, cell_style) current_row += 1 # 添加文件汇总行 file_total = sum(data.values()) # 写入汇总行 ws.write(current_row, 0, "文件汇总", header_style) ws.write(current_row, 1, file_total, header_style) ws.write(current_row, 2, "", header_style) ws.write(current_row, 3, "", header_style) current_row += 1 # 添加变化分类统计行 message = ( u"%s路径下: " u"新增文件%.2fM" ) % ( partition_name, file_total ) ws.write_merge( current_row, current_row, 0, 3, message, summary_style ) # 保存文件 wb.save(output_xlsx) return "单机报告已成功生成: %s" % output_xlsx except Exception as e: import traceback traceback.print_exc() return "生成Excel文件时出错: %s" % str(e) if __name__ == "__main__": # 创建参数解析器 parser = argparse.ArgumentParser(description='存储空间分析工具') subparsers = parser.add_subparsers(dest='mode', help='运行模式') # 双机对比模式 dual_parser = subparsers.add_parser('dual', help='双机对比模式') dual_parser.add_argument('folder1', help='第一个文件夹路径') dual_parser.add_argument('folder2', help='第二个文件夹路径') dual_parser.add_argument('output', help='输出Excel文件路径') dual_parser.add_argument('--is-os-project', action='store_true', default=True, help='是否是OS项目') dual_parser.add_argument('--upgrade-generations', type=int, default=0, help='升级代数') dual_parser.add_argument('--maintenance-generations', type=int, default=1, help='维护代数') dual_parser.add_argument('--reserve-per-generation', type=float, default=700, help='每代预留大小 (单位MB)') dual_parser.add_argument('--commercial-max-usage', type=float, default=2.2, help='商业化历史最大占用 (单位MB)') # 单机拆解模式 single_parser = subparsers.add_parser('single', help='单机拆解模式') single_parser.add_argument('folder', help='待分析文件夹路径') single_parser.add_argument('output', help='输出Excel文件路径') single_parser.add_argument('--is-os-project', action='store_true', default=True, help='是否是OS项目') single_parser.add_argument('--upgrade-generations', type=int, default=0, help='升级代数') single_parser.add_argument('--maintenance-generations', type=int, default=1, help='维护代数') single_parser.add_argument('--reserve-per-generation', type=float, default=700, help='每代预留大小 (单位MB)') single_parser.add_argument('--commercial-max-usage', type=float, default=2.2, help='商业化历史最大占用 (单位MB)') # 解析参数 args = parser.parse_args() if args.mode == 'dual': print("运行双机对比模式...") result = generate_dual_report( args.folder1, args.folder2, args.output, is_os_project=args.is_os_project, upgrade_generations=args.upgrade_generations, maintenance_generations=args.maintenance_generations, reserve_per_generation=args.reserve_per_generation, commercial_max_usage=args.commercial_max_usage ) elif args.mode == 'single': print("运行单机拆解模式...") result = generate_single_report(args.folder, args.output, is_os_project=args.is_os_project, upgrade_generations=args.upgrade_generations, maintenance_generations=args.maintenance_generations, reserve_per_generation=args.reserve_per_generation, commercial_max_usage=args.commercial_max_usage) else: result = "错误:请选择 'dual' 或 'single' 模式" print(result) 将上面脚本如何打包成exe文件在window环境运行
11-22
【电力系统】单机无穷大电力系统短路故障暂态稳定Simulink仿真(带说明文档)内容概要:本文档围绕“单机无穷大电力系统短路故障暂态稳定Simulink仿真”展开,提供了完整的仿真模型与说明文档,重点研究电力系统在发生短路故障后的暂态稳定性问题。通过Simulink搭建单机无穷大系统模型,模拟不同类型的短路故障(如三相短路),分析系统在故障期间及切除后的动态响应,包括发电机转子角度、转速、电压和功率等关键参数的变化,进而评估系统的暂态稳定能力。该仿真有助于理解电力系统稳定性机理,掌握暂态过程分析方法。; 适合人群:电气工程及相关专业的本科生、研究生,以及从事电力系统分析、运行与控制工作的科研人员和工程师。; 使用场景及目标:①学习电力系统暂态稳定的基本概念与分析方法;②掌握利用Simulink进行电力系统建模与仿真的技能;③研究短路故障对系统稳定性的影响及提高稳定性的措施(如故障清除时间优化);④辅助课程设计、毕业设计或科研项目中的系统仿真验证。; 阅读建议:建议结合电力系统稳定性理论知识进行学习,先理解仿真模型各模块的功能与参数设置,再运行仿真并仔细分析输出结果,尝试改变故障类型或系统参数以观察其对稳定性的影响,从而深化对暂态稳定问题的理解。
本研究聚焦于运用MATLAB平台,将支持向量机(SVM)应用于数据预测任务,并引入粒子群优化(PSO)算法对模型的关键参数进行自动调优。该研究属于机器学习领域的典型实践,其核心在于利用SVM构建分类模型,同时借助PSO的全局搜索能力,高效确定SVM的最优超参数配置,从而显著增强模型的整体预测效能。 支持向量机作为一种经典的监督学习方法,其基本原理是通过在高维特征空间中构造一个具有最大间隔的决策边界,以实现对样本数据的分类或回归分析。该算法擅长处理小规模样本集、非线性关系以及高维度特征识别问题,其有效性源于通过核函数将原始数据映射至更高维的空间,使得原本复杂的分类问题变得线性可分。 粒子群优化算法是一种模拟鸟群社会行为的群体智能优化技术。在该算法框架下,每个潜在解被视作一个“粒子”,粒子群在解空间中协同搜索,通过不断迭代更新自身速度与位置,并参考个体历史最优解和群体全局最优解的信息,逐步逼近问题的最优解。在本应用中,PSO被专门用于搜寻SVM中影响模型性能的两个关键参数——正则化参数C与核函数参数γ的最优组合。 项目所提供的实现代码涵盖了从数据加载、预处理(如标准化处理)、基础SVM模型构建到PSO优化流程的完整步骤。优化过程会针对不同的核函数(例如线性核、多项式核及径向基函数核等)进行参数寻优,并系统评估优化前后模型性能的差异。性能对比通常基于准确率、精确率、召回率及F1分数等多项分类指标展开,从而定量验证PSO算法在提升SVM模型分类能力方面的实际效果。 本研究通过一个具体的MATLAB实现案例,旨在演示如何将全局优化算法与机器学习模型相结合,以解决模型参数选择这一关键问题。通过此实践,研究者不仅能够深入理解SVM的工作原理,还能掌握利用智能优化技术提升模型泛化性能的有效方法,这对于机器学习在实际问题中的应用具有重要的参考价值。 资源来源于网络分享,仅用于学习交流使用,请勿用于商业,如有侵权请联系我删除!
评论 1
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值