No18:Maximum path sum I

本文探讨了通过递归及动态规划方法解决寻找三角形路径最大总和的问题,并给出了具体的Python实现代码。最终找到了从顶点到底部的最大路径及其值。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.

3
7 4
4 6
8 5 9 3

That is, 3 + 7 + 4 + 9 = 23.

Find the maximum total from top to bottom of the triangle below:

75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23

NOTE: As there are only 16384 routes, it is possible to solve this problem by trying every route. However, Problem 67, is the same challenge with a triangle containing one-hundred rows; it cannot be solved by brute force, and requires a clever method! ;o)

我起初的想法。是只选取最大的那个。。。。这样就是贪婪法了。。结果也证明错了。

#!/usr/bin/python
#encoding=utf-8
import operator 
import datetime
import os
###############������###################
def searchpath (a,li,gra,height=15):
    now_row=a[1]
    now_col=a[2]
    nextinfo1=(gra[now_row+1][now_col+1]+a[0],now_row+1,now_col+1)
    nextinfo2=(gra[now_row+1][now_col-1]+a[0],now_row+1,now_col-1)
    for info_in in (nextinfo1,nextinfo2) :
         if info_in[1]==height-1:
              print info_in
              return info_in[0]
         else:
              li.append(info_in)
    li.sort(reverse=True)
    nextinfo=li.pop(0)
    print nextinfo
    return searchpath(nextinfo,li,gra,height)
def calList (in_str):
    lines=[ [ int(x) for x in line.split() if x.strip() ]  for line in in_str.split('\n')     ]
    height,width=len(lines),len(lines[-1])
    print height,width
    empty_list=[[ 0 for x  in xrange(2*width-1)  ] for y in xrange(height)]     
    for row in xrange(height,0,-1):
         indent=height-row
         for  col in xrange(indent,2*width-1-indent) :
              i=col-indent
              if not  i%2:
                   print row-1,col,(i+1)/2,lines[row-1][(i+1)/2] 
                   empty_list[row-1][col]=lines[row-1][(i+1)/2]  
    return empty_list   
strgot="""75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"""

lines=[ [ int(x) for x in line.split() if x.strip() ]  for line in strgot.split('\n')     ]
height,width=len(lines),len(lines[-1])
print height,width
empty_list=[[ 0 for x  in xrange(2*width-1)  ] for y in xrange(height)]
     
for row in xrange(height,0,-1):
    indent=height-row
    for  col in xrange(indent,2*width-1-indent) :
         i=col-indent
         if not  i%2:
              print row-1,col,(i+1)/2,lines[row-1][(i+1)/2] 
              empty_list[row-1][col]=lines[row-1][(i+1)/2]               
for innerlist in  empty_list:
    print innerlist
s_li=[]

print empty_list[0][14]     
mxv=searchpath((75,0,14),s_li,empty_list,15)
print mxv

in_str="""3
7 4
2 4 6
8 5 9 3"""
"""
s_list=[]
gra=calList(in_str)
mxv=searchpath ((3,0,3),s_list,gra,height=4)
print mxv
"""
#print f_list
#print mxv
#(value,row,col)

          
#print lines     
结果我为1064。却报错误了。。

....发现自己不会做了,Fuck了,艹了。。

今天04/16了差不过1个多月了。
过了一个月,我再看我的代码,我只想说,这是我写的代码吗?这是给人看的代码吗。。一点注释都没有
。。。。
。。一定要养成注释的习惯了。。。
然后讨论一下这个问题吧。
过了一个月,再看这个问题,感觉不是很难。
递归或者动态归划都可以做出来啊。
这里先写出递归的吧。动态归划明天再做。

#encoding=utf-8
import operator
import datetime
import os

def searchpath (a,li,gra,height=15):
    """
    a 表示当前遍历到的元素
    li 用于存储一个元素的列表
    gra表示所有元素的列表
    height表示gra的高度
    """
    now_row=a[1]#现在的行号
    now_col=a[2]#现在的列号
    nextinfo1=(gra[now_row+1][now_col+1]+a[0],now_row+1,now_col+1)#下一行的右孩子
    nextinfo2=(gra[now_row+1][now_col-1]+a[0],now_row+1,now_col-1)#左孩子
    for info_in in (nextinfo1,nextinfo2) :
         if info_in[1]==height-1:#如果行号表示最后一行
              print info_in
              return info_in[0]
         else:#如果非最后一行,则添加到li中
              li.append(info_in)
    #对li降序排序,取到最大值
    li.sort(reverse=True)
    nextinfo=li.pop(0)
    print nextinfo
    return searchpath(nextinfo,li,gra,height)
def calList (in_str):
    """
    作用:将字符串转换为二维列表
    """
    lines=[ [ int(x) for x in line.split() if x.strip() ]  for line in in_str.split('\n')     ]#把输入的字符串转换为二维列表
    height,width=len(lines),len(lines[-1])
    print height,width
    empty_list=[[ 0 for x  in xrange(2*width-1)  ] for y in xrange(height)]
    for row in xrange(height,0,-1):
         indent=height-row
         for  col in xrange(indent,2*width-1-indent) :
              i=col-indent
              if not  i%2:
                   print row-1,col,(i+1)/2,lines[row-1][(i+1)/2]
                   empty_list[row-1][col]=lines[row-1][(i+1)/2]
    return empty_list
#递归法
def recursive_path(search_ele,gra,session,height):
    """
    
    search_ele 当前元素
    gra 表示整个元素的列表
    session 表示缓存
    height 表示整个列表的高度
    """
    now_row,now_col=search_ele[1],search_ele[2] #search_ele所有的行号,以及列号
    if session[now_row][now_col]!=-1:#表示该元素有缓存
        return session[now_row][now_col]
    else:
        #该元素还没有缓存
        leftChild=(gra[now_row+1][now_col-1],now_row+1,now_col-1)#左孩子
        rightChild=(gra[now_row+1][now_col+1],now_row+1,now_col+1)#右孩子
        maxvalue=0
        if now_row==height-2:#倒数第二行
            maxvalue=max(leftChild[0],rightChild[0])+search_ele[0]
        else:
            maxvalue=max(recursive_path(leftChild,gra,session,height),recursive_path(rightChild,gra,session,height))+search_ele[0]
        session[now_row][now_col]=maxvalue
        return maxvalue
starttime=datetime.datetime.now()
strgot="""75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"""
s_list=[]
gra=calList(strgot)
height,width=len(gra),len(gra[0])#高度,宽度
searchindex=gra[0].index(75)
session=[ [-1 for col in xrange(width) ] for row in  xrange(height)]#缓存

mxv=recursive_path((75,0,searchindex),gra,session,height)
print mxv
endtime=datetime.datetime.now()
print (endtime-starttime)
输出的结果为:


动态规划法如下:
#encoding=utf-8
import operator
import datetime
import os
#动态规则法
def getmaxvalue_path(gra,height,width):
    """
    对gra里的元素找到最大路径值,以及最大路径
    height:gra的高度
    width:gra的宽度
    """
    maxvalue_path=[[ (0,[]) for col in xrange(width)] for row in xrange(height) ]#(0,[])表示的是最大路径值,以及路径
    for row in xrange(height-1,-1,-1):
        if row==height-1:#最后一行初始化
            maxvalue_path[row]=[ (gra[row][col],[(row,col)]) for col in xrange(width)]
        else:
            for col in xrange(width):
                if col==0:#第一列
                    temptuple=maxvalue_path[row+1][col+1]
                elif col==width-1:#最后一列
                    temptuple=maxvalue_path[row+1][col-1]
                else:#既不是第一列,也不是最后一列
                    temptuple=maxvalue_path[row+1][col-1]
                    temptuple2=maxvalue_path[row+1][col+1]
                    if temptuple2[0]>temptuple:
                        temptuple=temptuple2
                path_list=temptuple[1][:]
                path_list.insert(0,(row,col))
                maxvalue_path[row][col]=(temptuple[0]+gra[row][col],path_list)
    return maxvalue_path
def calList (in_str):
    """
    作用:将字符串转换为二维列表
    """
    lines=[ [ int(x) for x in line.split() if x.strip() ]  for line in in_str.split('\n')     ]#把输入的字符串转换为二维列表
    height,width=len(lines),len(lines[-1])
    #print height,width
    empty_list=[[ 0 for x  in xrange(2*width-1)  ] for y in xrange(height)]
    for row in xrange(height,0,-1):
         indent=height-row
         for  col in xrange(indent,2*width-1-indent) :
              i=col-indent
              if not  i%2:
                   #print row-1,col,(i+1)/2,lines[row-1][(i+1)/2]
                   empty_list[row-1][col]=lines[row-1][(i+1)/2]
    return empty_list

starttime=datetime.datetime.now()
strgot="""75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"""
s_list=[]
gra=calList(strgot)
height,width=len(gra),len(gra[0])#高度,宽度
searchindex=gra[0].index(75)
session=[ [-1 for col in xrange(width) ] for row in  xrange(height)]#缓存

#mxv=getmaxvalue_path((75,0,searchindex),gra,session,height)
maxvalue_path=getmaxvalue_path(gra,height,width)
print maxvalue_path[0][searchindex]
endtime=datetime.datetime.now()
print (endtime-starttime)

但是有问题啊。
15 15
(794, [(0, 14), (1, 13), (2, 12), (3, 11), (4, 10), (5, 9), (6, 8), (7, 7), (8, 6), (9, 5), (10, 4), (11, 3), (12, 2), (13, 1), (14, 0)])
0:00:00.002000

为什么输出结果会这个样子呢?

========================================================================================

知道是哪里了
if temptuple2[0]>temptuple:
                        temptuple=temptuple2

这里写错了。。一不小心就写错了。。

改为
if temptuple2[0]>temptuple[0]:
                        temptuple=temptuple2

就好了,
结果如下:

#encoding=utf-8
import operator
import datetime
import os
#动态规则法
def getmaxvalue_path(gra,height,width):
    """
    对gra里的元素找到最大路径值,以及最大路径
    height:gra的高度
    width:gra的宽度
    """
    maxvalue_path=[[ (0,[]) for col in xrange(width)] for row in xrange(height) ]#(0,[])表示的是最大路径值,以及路径
    for row in xrange(height-1,-1,-1):
        if row==height-1:#最后一行初始化
            maxvalue_path[row]=[ (gra[row][col],[(row,col)]) for col in xrange(width)]
        else:
            for col in xrange(width):
                if col==0:#第一列
                    temptuple=maxvalue_path[row+1][col+1]
                elif col==width-1:#最后一列
                    temptuple=maxvalue_path[row+1][col-1]
                else:#既不是第一列,也不是最后一列
                    temptuple=maxvalue_path[row+1][col-1]
                    temptuple2=maxvalue_path[row+1][col+1]
                    if temptuple2[0]>temptuple[0]:
                        temptuple=temptuple2
                path_list=temptuple[1][:]
                path_list.insert(0,(row,col))
                maxvalue_path[row][col]=(temptuple[0]+gra[row][col],path_list)
    return maxvalue_path
def calList (in_str):
    """
    作用:将字符串转换为二维列表
    """
    lines=[ [ int(x) for x in line.split() if x.strip() ]  for line in in_str.split('\n')     ]#把输入的字符串转换为二维列表
    height,width=len(lines),len(lines[-1])
    #print height,width
    empty_list=[[ 0 for x  in xrange(2*width-1)  ] for y in xrange(height)]
    for row in xrange(height,0,-1):
         indent=height-row
         for  col in xrange(indent,2*width-1-indent) :
              i=col-indent
              if not  i%2:
                   #print row-1,col,(i+1)/2,lines[row-1][(i+1)/2]
                   empty_list[row-1][col]=lines[row-1][(i+1)/2]
    return empty_list

starttime=datetime.datetime.now()
strgot="""75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"""
s_list=[]
gra=calList(strgot)
height,width=len(gra),len(gra[0])#高度,宽度
searchindex=gra[0].index(75)
session=[ [-1 for col in xrange(width) ] for row in  xrange(height)]#缓存

#mxv=getmaxvalue_path((75,0,searchindex),gra,session,height)
maxvalue_path=getmaxvalue_path(gra,height,width)
print maxvalue_path[0][searchindex]
endtime=datetime.datetime.now()
print (endtime-starttime)
运行结果为:
(1074, [(0, 14), (1, 15), (2, 16), (3, 15), (4, 14), (5, 15), (6, 14), (7, 13), (8, 14), (9, 15), (10, 16), (11, 17), (12, 18), (13, 17), (14, 18)])
0:00:00.001000

#!/usr/bin/env python -- coding: utf-8 -- import os import re import sys import argparse import xlwt from collections import defaultdict 分区名称映射表(前缀 → 友好名称) PARTITION_NAME_MAP = { ‘02_’: ‘system’, ‘03_’: ‘vendor’, ‘04_’: ‘product’, ‘05_’: ‘odm’, ‘06_’: ‘my_product’, ‘07_’: ‘my_engineering’, ‘08_’: ‘my_stock’, ‘09_’: ‘my_heytap’, ‘10_’: ‘my_company’, ‘11_’: ‘my_carrier’, ‘12_’: ‘my_region’, ‘13_’: ‘my_preload’, ‘14_’: ‘data’, ‘15_’: ‘my_bigball’, ‘16_’: ‘my_manifest’, ‘17_system_dlkm’: ‘system_dlkm’, ‘17_vendor_dlkm’: ‘vendor_dlkm’, ‘17_cache’: ‘cache’ } def parse_du_file(file_path): “”“解析du命令输出文件并转换为MB”“” data = {} try: with open(file_path, ‘r’) as f: for line in f: if ‘Permission denied’ in line or ‘No such file’ in line or not line.strip(): continue match = re.match(r'(\d+\.?\d*)\s*([KMG]?)[Bb]?\s+(.*)', line.strip()) if match: size, unit, path = match.groups() size = float(size) # 单位转换到MB if unit == 'K': size = size / 1024.0 elif unit == '': size = size / (1024*1024.0) elif unit == 'M': pass elif unit == 'G': size = size * 1024.0 data[path] = round(size, 4) except IOError as e: print("警告: 无法读取文件 {}: {}".format(file_path, str(e))) return data def extract_file_prefix(filename): “”“提取文件前缀”“” if filename.startswith(‘17_’): return filename.replace(‘.txt’, ‘’) match = re.match(r’^(\d+)', filename) return match.group(1) if match else "other" def is_main_partition_file(filename, prefix): “”“检查是否为主分区文件”“” if prefix.startswith(‘17_’): return True expected_name = prefix + PARTITION_NAME_MAP[prefix] + “.txt” return filename == expected_name def generate_dual_report(folder1, folder2, output_xlsx): “”“生成双机对比报告”“” folder1_name = os.path.basename(os.path.normpath(folder1)) folder2_name = os.path.basename(os.path.normpath(folder2)) for folder in [folder1, folder2]: if not os.path.exists(folder): print("错误: 目录不存在 - {}".format(folder)) return "目录 {} 不存在,请检查路径".format(folder) if not os.path.isdir(folder): print("错误: 路径不是目录 - {}".format(folder)) return "{} 不是有效目录".format(folder) # 初始化数据结构 machine1_main_data = {} machine2_main_data = {} machine1_all_files = defaultdict(dict) machine2_all_files = defaultdict(dict) # 收集数据 for folder_path, main_dict, all_dict in [ (folder1, machine1_main_data, machine1_all_files), (folder2, machine2_main_data, machine2_all_files) ]: print("处理目录: {}".format(folder_path)) try: for filename in os.listdir(folder_path): if not filename.endswith('.txt'): continue prefix = extract_file_prefix(filename) if prefix == '01_' or prefix not in PARTITION_NAME_MAP: continue file_path = os.path.join(folder_path, filename) partition_name = PARTITION_NAME_MAP[prefix] file_data = parse_du_file(file_path) all_dict[filename] = file_data if is_main_partition_file(filename, prefix): print("解析主分区文件: {}".format(file_path)) main_dict[prefix] = file_data except OSError as e: print("目录访问错误: {}".format(str(e))) return "无法访问目录 {}: {}".format(folder_path, str(e)) # 创建Excel工作簿 try: wb = xlwt.Workbook(encoding='utf-8') # ====== 修复:正确定义所有样式变量 ====== header_style = xlwt.easyxf('font: bold on') title_style = xlwt.easyxf('font: bold on, height 280; align: wrap on, vert centre') normal_style = xlwt.easyxf() added_style = xlwt.easyxf('pattern: pattern solid, fore_colour light_green;') removed_style = xlwt.easyxf('pattern: pattern solid, fore_colour rose;') summary_style = xlwt.easyxf('font: bold on, color blue;') wrap_style = xlwt.easyxf('align: wrap on, vert centre') # 修复:正确定义wrap_style # ====== 创建总览Sheet页 ====== ws_overview = wb.add_sheet('总览') print("创建总览Sheet页(仅主文件数据)") current_row = 0 # 写入总览标题 ws_overview.write_merge( current_row, current_row, 0, 5, "存储使用总览(仅主分区文件)", title_style ) current_row += 1 # 写入文件夹名称 ws_overview.write(current_row, 1, folder1_name, header_style) ws_overview.write(current_row, 2, folder2_name, header_style) current_row += 1 # 写入表头(增加备注列) headers = ['分区', '总大小(MB)', '总大小(MB)', '差值(MB)', '标记', '备注(增大TOP3)'] for col, header in enumerate(headers): ws_overview.write(current_row, col, header, header_style) current_row += 1 # 存储各分区汇总数据 overview_data = [] total_machine1 = 0.0 total_machine2 = 0.0 # 按分区顺序处理数据 for prefix in sorted(PARTITION_NAME_MAP.keys()): partition_name = PARTITION_NAME_MAP[prefix] # 跳过data分区 if partition_name == 'data': continue # 获取主文件数据 data1 = machine1_main_data.get(prefix, {}) data2 = machine2_main_data.get(prefix, {}) # 计算主文件总大小 partition_total1 = round(sum(data1.values()), 2) partition_total2 = round(sum(data2.values()), 2) diff = partition_total1 - partition_total2 # 更新总计 total_machine1 += partition_total1 total_machine2 += partition_total2 # 确定标记样式 if diff > 0: mark = "增加" style = added_style elif diff < 0: mark = "减少" style = removed_style else: mark = "无变化" style = normal_style # 计算分区中增大的TOP3路径 top_notes = [] if diff > 0: # 只在分区增大时计算TOP路径 path_diffs = [] all_paths = set(data1.keys()) | set(data2.keys()) for path in all_paths: size1 = data1.get(path, 0.0) size2 = data2.get(path, 0.0) path_diff = size1 - size2 if path_diff > 0: # 只记录增大的路径 path_diffs.append((path, path_diff)) # 按增大值降序排序,取TOP3 path_diffs.sort(key=lambda x: x[1], reverse=True) for i, (path, diff_val) in enumerate(path_diffs[:3]): # 截断过长的路径名 if len(path) > 50: path = "..." + path[-47:] top_notes.append("{}. {}: +{:.2f}MB".format(i+1, path, diff_val)) # 保存分区数据 overview_data.append({ 'name': partition_name, 'machine1': partition_total1, 'machine2': partition_total2, 'diff': diff, 'style': style, 'mark': mark, 'notes': "\n".join(top_notes) if top_notes else "无显著增大路径" }) # 写入行数据到总览页(增加备注列) ws_overview.write(current_row, 0, partition_name, style) ws_overview.write(current_row, 1, partition_total1, style) ws_overview.write(current_row, 2, partition_total2, style) ws_overview.write(current_row, 3, diff, style) ws_overview.write(current_row, 4, mark, style) ws_overview.write(current_row, 5, overview_data[-1]['notes'], wrap_style) # 使用已定义的wrap_style current_row += 1 # 添加空行 current_row += 1 # 写入总计行 total_diff = total_machine1 - total_machine2 if total_diff > 0: total_mark = "总增加" total_style = added_style elif total_diff < 0: total_mark = "总减少" total_style = removed_style else: total_mark = "无变化" total_style = normal_style ws_overview.write(current_row, 0, "总计", header_style) ws_overview.write(current_row, 1, total_machine1, header_style) ws_overview.write(current_row, 2, total_machine2, header_style) ws_overview.write(current_row, 3, total_diff, header_style) ws_overview.write(current_row, 4, total_mark, header_style) ws_overview.write(current_row, 5, "", header_style) # 备注列留空 # 设置备注列宽度(100字符) ws_overview.col(5).width = 256 * 100 # ====== 为每个文件创建单独的Sheet页(保持不变) ====== # 获取所有唯一的文件名(两个文件夹的并集) all_filenames = sorted(set(machine1_all_files.keys()) | set(machine2_all_files.keys())) for filename in all_filenames: # 提取文件前缀 prefix = extract_file_prefix(filename) # 跳过无效前缀 if prefix not in PARTITION_NAME_MAP: continue # 获取分区名称 partition_name = PARTITION_NAME_MAP[prefix] # 创建Sheet页名称(文件名不带扩展名) sheet_name = filename.replace('.txt', '') if len(sheet_name) > 31: # Excel sheet名称长度限制 sheet_name = sheet_name[:31] # 创建Sheet页 ws = wb.add_sheet(sheet_name) print("创建文件Sheet页: {}".format(sheet_name)) # 当前行指针 current_row = 0 # 写入分区标题 title = "分区: {} - 文件: {}".format(partition_name, filename) ws.write_merge( current_row, current_row, 0, 5, title, title_style ) current_row += 1 # 写入文件夹名称 ws.write_merge(current_row, current_row, 0, 1, folder1_name, header_style) ws.write_merge(current_row, current_row, 2, 3, folder2_name, header_style) ws.write(current_row, 4, "差异(M)", header_style) ws.write(current_row, 5, "标记", header_style) current_row += 1 # 写入表头 headers = ['路径', '大小(M)', '路径', '大小(M)', '差异(M)', '标记'] for col, header in enumerate(headers): ws.write(current_row, col, header, header_style) current_row += 1 # 获取文件数据 data1 = machine1_all_files.get(filename, {}) data2 = machine2_all_files.get(filename, {}) # 获取所有路径(合并两个文件夹的路径) all_paths = sorted(set(data1.keys()) | set(data2.keys())) # 初始化变化统计数据 total_increase = 0.0 # 增大总和 total_decrease = 0.0 # 减小总和 total_added = 0.0 # 新增文件总和 total_removed = 0.0 # 去除文件总和 # 写入数据行 for path in all_paths: size1 = data1.get(path, 0.0) size2 = data2.get(path, 0.0) # 计算差值 diff = size1 - size2 # 确定标记和样式 if size1 == 0 and size2 > 0: mark = "除去" cell_style = removed_style total_removed += size2 elif size1 > 0 and size2 == 0: mark = "新增" cell_style = added_style total_added += size1 else: if diff > 0: mark = "增大" cell_style = added_style total_increase += diff elif diff < 0: mark = "减小" cell_style = removed_style total_decrease += abs(diff) else: mark = "相同" cell_style = normal_style # 写入行数据 # folder1列 if size1 > 0: ws.write(current_row, 0, path, cell_style) ws.write(current_row, 1, size1, cell_style) else: ws.write(current_row, 0, "", cell_style) ws.write(current_row, 1, "", cell_style) # folder2列 if size2 > 0: ws.write(current_row, 2, path, cell_style) ws.write(current_row, 3, size2, cell_style) else: ws.write(current_row, 2, "", cell_style) ws.write(current_row, 3, "", cell_style) # 差异和标记列 ws.write(current_row, 4, diff, cell_style) ws.write(current_row, 5, mark, cell_style) current_row += 1 # 添加文件汇总行 file_total1 = sum(data1.values()) file_total2 = sum(data2.values()) file_diff = file_total1 - file_total2 # 写入汇总行 ws.write(current_row, 0, "文件汇总", header_style) ws.write(current_row, 1, file_total1, header_style) ws.write(current_row, 2, "", header_style) ws.write(current_row, 3, file_total2, header_style) ws.write(current_row, 4, file_diff, header_style) ws.write(current_row, 5, "", header_style) current_row += 1 # 添加变化分类统计行 message = ( u"{partition_name}路径下: " u"减小{total_decrease:.2f}M " u"增大{total_increase:.2f}M " u"新增文件{total_added:.2f}M " u"减少文件{total_removed:.2f}M" ).format( partition_name=partition_name, total_decrease=total_decrease, total_increase=total_increase, total_added=total_added, total_removed=total_removed ) ws.write_merge( current_row, current_row, 0, 5, message, summary_style ) # 保存文件 wb.save(output_xlsx) return "对比报告已成功生成: {}".format(output_xlsx) except Exception as e: import traceback traceback.print_exc() return "生成Excel文件时出错: {}".format(str(e)) 单机拆解模式保持不变 def generate_single_report(folder, output_xlsx): “”“单机拆解模式(保持不变)”“” # …(原有单机拆解模式实现)… if name == “main”: # 创建参数解析器 parser = argparse.ArgumentParser(description=‘存储空间分析工具’) subparsers = parser.add_subparsers(dest=‘mode’, help=‘运行模式’) # 双机对比模式 dual_parser = subparsers.add_parser('dual', help='双机对比模式') dual_parser.add_argument('folder1', help='第一个文件夹路径') dual_parser.add_argument('folder2', help='第二个文件夹路径') dual_parser.add_argument('output', help='输出Excel文件路径') # 单机拆解模式 single_parser = subparsers.add_parser('single', help='单机拆解模式') single_parser.add_argument('folder', help='待分析文件夹路径') single_parser.add_argument('output', help='输出Excel文件路径') # 解析参数 args = parser.parse_args() if args.mode == 'dual': print("运行双机对比模式...") result = generate_dual_report(args.folder1, args.folder2, args.output) elif args.mode == 'single': print("运行单机拆解模式...") result = generate_single_report(args.folder, args.output) else: result = "错误:请选择 'dual' 或 'single' 模式" print(result)基于这个脚本修改总览页输出根据18_lpdump.txt文件生成总大小,差值和标记列 备注的to3差异大小改为大于5M的差异都列出Slot 0: Metadata version: 10.2 Metadata size: 2640 bytes Metadata max size: 65536 bytes Metadata slot count: 3 Header flags: virtual_ab_device Partition table: Name: system_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 1527575 linear super 2048 Name: system_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: system_ext_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 2482807 linear super 1529856 Name: system_ext_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: vendor_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 715543 linear super 4014080 Name: vendor_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: product_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 16159 linear super 4730880 Name: product_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: my_product_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 4165551 linear super 4747264 Name: my_product_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: odm_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 2075367 linear super 8912896 Name: odm_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: my_engineering_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 655 linear super 10989568 Name: my_engineering_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: vendor_dlkm_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 164735 linear super 10991616 Name: vendor_dlkm_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: system_dlkm_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 839 linear super 11157504 Name: system_dlkm_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: my_stock_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 4617279 linear super 11159552 Name: my_stock_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: my_heytap_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 655 linear super 15777792 Name: my_heytap_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: my_carrier_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 655 linear super 15779840 Name: my_carrier_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: my_region_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 168575 linear super 15781888 Name: my_region_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: my_company_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 655 linear super 15951872 Name: my_company_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: my_preload_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 3191663 linear super 15953920 Name: my_preload_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: my_bigball_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 655 linear super 19146752 Name: my_bigball_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Name: my_manifest_a Group: qti_dynamic_partitions_a Attributes: readonly Extents: 0 … 911 linear super 19148800 Name: my_manifest_b Group: qti_dynamic_partitions_b Attributes: readonly Extents: Super partition layout: super: 2048 … 1529624: system_a (1527576 sectors) super: 1529856 … 4012664: system_ext_a (2482808 sectors) super: 4014080 … 4729624: vendor_a (715544 sectors) super: 4730880 … 4747040: product_a (16160 sectors) super: 4747264 … 8912816: my_product_a (4165552 sectors) super: 8912896 … 10988264: odm_a (2075368 sectors) super: 10989568 … 10990224: my_engineering_a (656 sectors) super: 10991616 … 11156352: vendor_dlkm_a (164736 sectors) super: 11157504 … 11158344: system_dlkm_a (840 sectors) super: 11159552 … 15776832: my_stock_a (4617280 sectors) super: 15777792 … 15778448: my_heytap_a (656 sectors) super: 15779840 … 15780496: my_carrier_a (656 sectors) super: 15781888 … 15950464: my_region_a (168576 sectors) super: 15951872 … 15952528: my_company_a (656 sectors) super: 15953920 … 19145584: my_preload_a (3191664 sectors) super: 19146752 … 19147408: my_bigball_a (656 sectors) super: 19148800 … 19149712: my_manifest_a (912 sectors) Block device table: Partition name: super First sector: 2048 Size: 12348030976 bytes Flags: none Group table: Name: default Maximum size: 0 bytes Flags: none Name: qti_dynamic_partitions_a Maximum size: 12343836672 bytes Flags: none Name: qti_dynamic_partitions_b Maximum size: 12343836672 bytes Flags: none Snapshot state: Update state: none Using snapuserd: 0 Using userspace snapshots: 0 Using io_uring: 0 Using o_direct: 0 Using XOR compression: 1 Current slot: _a Boot indicator: booting from unknown slot Rollback indicator: No such file or directory Forward merge indicator: No such file or directory Source build fingerprint:
最新发布
08-05
lass RRTStar3D: def __init__(self, start, goal, builds, bounds, max_iter=RRT_MAX_ITER, step_size=RRT_STEP, neighbor_radius=RRT_NEIGHBOR_RADIUS): self.start = np.array(start) self.goal = np.array(goal) # Pre-calculate builds with safety height buffer self.builds_with_safety = builds.copy() self.builds_with_safety[:, 4] -= SAFE_HEIGHT # Decrease zmin self.builds_with_safety[:, 5] += SAFE_HEIGHT # Increase zmax # Ensure zmin is not negative if SAFE_HEIGHT is large self.builds_with_safety[:, 4] = np.maximum(0, self.builds_with_safety[:, 4]) self.bounds = np.array(bounds) # Ensure bounds is numpy array self.max_iter = max_iter self.step_size = step_size self.neighbor_radius = neighbor_radius self.nodes = [self.start] self.parent = {tuple(self.start): None} self.cost = {tuple(self.start): 0.0} # Initialize KDTree with the start node self.kdtree = cKDTree(np.array([self.start])) # Ensure it's a 2D array def sample(self): # Biased sampling towards goal occasionally if np.random.rand() < 0.1: # 10% chance to sample goal return self.goal # Sample within bounds return np.random.uniform(self.bounds[:, 0], self.bounds[:, 1]) def nearest(self, q): _, idx = self.kdtree.query(q) # Handle case where KDTree might have only one node initially if isinstance(idx, (int, np.integer)): return self.nodes[idx] else: # Should not happen if tree has >= 1 node, but safety check return self.nodes[0] def steer(self, q_near, q_rand): delta = q_rand - q_near dist = np.linalg.norm(delta) if dist == 0: # Avoid division by zero return q_near ratio = self.step_size / dist if ratio >= 1.0: return q_rand return q_near + delta * ratio def near_neighbors(self, q_new): # Ensure nodes list is not empty before querying if not self.nodes: return [] # Ensure kdtree has points before querying if self.kdtree.n == 0: return [] # Use query_ball_point which is efficient for radius searches indices = self.kdtree.query_ball_point(q_new, self.neighbor_radius) # Filter out the index of q_new itself if it's already in nodes (might happen during rewiring) q_new_tuple = tuple(q_new) neighbors = [] for i in indices: # Check bounds and ensure it's not the node itself if already added # This check might be redundant if q_new isn't added before calling this if i < len(self.nodes): # Ensure index is valid node = self.nodes[i] if tuple(node) != q_new_tuple: neighbors.append(node) return neighbors def plan(self): for i in range(self.max_iter): q_rand = self.sample() # Check if nodes list is empty (shouldn't happen after init) if not self.nodes: print("Warning: Node list empty during planning.") continue # Or handle appropriately q_near = self.nearest(q_rand) q_new = self.steer(q_near, q_rand) # Check collision for the new segment using the pre-calculated safe builds if check_segment_collision(q_near, q_new, self.builds_with_safety) > 0: continue # If collision-free, add the node and update KD-Tree periodically q_new_tuple = tuple(q_new) q_near_tuple = tuple(q_near) # Choose parent with minimum cost among neighbors min_cost = self.cost[q_near_tuple] + np.linalg.norm(q_new - q_near) best_parent_node = q_near neighbors = self.near_neighbors(q_new) # Find neighbors first for q_neighbor in neighbors: q_neighbor_tuple = tuple(q_neighbor) # Check connectivity collision if check_segment_collision(q_neighbor, q_new, self.builds_with_safety) == 0: new_cost = self.cost[q_neighbor_tuple] + np.linalg.norm(q_new - q_neighbor) if new_cost < min_cost: min_cost = new_cost best_parent_node = q_neighbor # Add the new node with the best parent found self.nodes.append(q_new) q_best_parent_tuple = tuple(best_parent_node) self.parent[q_new_tuple] = q_best_parent_tuple self.cost[q_new_tuple] = min_cost # Rebuild KDTree periodically if len(self.nodes) % KD_REBUILD_EVERY == 0 or i == self.max_iter - 1: # Important: Ensure nodes is a list of arrays before creating KDTree if self.nodes: # Check if nodes is not empty self.kdtree = cKDTree(np.array(self.nodes)) # Rewire neighbors to go through q_new if it provides a shorter path for q_neighbor in neighbors: q_neighbor_tuple = tuple(q_neighbor) # Check if rewiring through q_new is shorter and collision-free cost_via_new = min_cost + np.linalg.norm(q_neighbor - q_new) if cost_via_new < self.cost[q_neighbor_tuple]: if check_segment_collision(q_new, q_neighbor, self.builds_with_safety) == 0: self.parent[q_neighbor_tuple] = q_new_tuple self.cost[q_neighbor_tuple] = cost_via_new # Check if goal is reached if np.linalg.norm(q_new - self.goal) < self.step_size: # Check final segment collision if check_segment_collision(q_new, self.goal, self.builds_with_safety) == 0: goal_tuple = tuple(self.goal) self.nodes.append(self.goal) # Add goal node self.parent[goal_tuple] = q_new_tuple self.cost[goal_tuple] = min_cost + np.linalg.norm(self.goal - q_new) print(f"RRT*: Goal reached at iteration {i+1}") # Rebuild KDTree one last time if goal is reached self.kdtree = cKDTree(np.array(self.nodes)) break # Exit planning loop else: # Loop finished without reaching goal condition print(f"RRT*: Max iterations ({self.max_iter}) reached. Connecting nearest node to goal.") # Find node closest to goal among existing nodes if not self.nodes: print("Error: No nodes generated by RRT*.") return None # Or raise error nodes_arr = np.array(self.nodes) distances_to_goal = np.linalg.norm(nodes_arr - self.goal, axis=1) nearest_node_idx = np.argmin(distances_to_goal) q_final_near = self.nodes[nearest_node_idx] q_final_near_tuple = tuple(q_final_near) goal_tuple = tuple(self.goal) # Try connecting nearest found node to goal if check_segment_collision(q_final_near, self.goal, self.builds_with_safety) == 0: self.nodes.append(self.goal) self.parent[goal_tuple] = q_final_near_tuple self.cost[goal_tuple] = self.cost[q_final_near_tuple] + np.linalg.norm(self.goal - q_final_near) print("RRT*: Connected nearest node to goal.") else: print("RRT*: Could not connect nearest node to goal collision-free. Returning path to nearest node.") # Path will be constructed to q_final_near instead of goal goal_tuple = q_final_near_tuple # Target for path reconstruction # Backtrack path from goal (or nearest reachable node) path = [] # Start backtracking from the actual last node added (goal or nearest) curr_tuple = goal_tuple if curr_tuple not in self.parent and curr_tuple != tuple(self.start): print(f"Warning: Target node {curr_tuple} not found in parent dict. Path reconstruction might fail.") # Fallback to the last added node if goal wasn't reachable/added correctly if self.nodes: curr_tuple = tuple(self.nodes[-1]) else: return None # No path possible while curr_tuple is not None: # Ensure the node corresponding to the tuple exists # This requires searching self.nodes, which is inefficient. # A better approach is to store nodes in the dict or use indices. # For now, let's assume tuple keys match numpy arrays. path.append(np.array(curr_tuple)) curr_tuple = self.parent.get(curr_tuple, None) if not path: print("Error: Path reconstruction failed.") return None if tuple(path[-1]) != tuple(self.start): print("Warning: Path does not end at start node.") return np.array(path[::-1]) # Reverse to get start -> goal order # --------------------- Path Cost Function (Use safe builds) --------------------- def path_cost(path_pts, builds_with_safety, drone_speed=DRONE_SPEED, penalty_k=PENALTY_K): total_time = 0.0 total_penalty = 0.0 num_segments = len(path_pts) - 1 if num_segments < 1: return 0.0 # No cost for a single point path # Vectorized calculations where possible p = path_pts[:-1] # Start points of segments q = path_pts[1:] # End points of segments segments = q - p distances = np.linalg.norm(segments, axis=1) # Avoid division by zero for zero-length segments valid_segments = distances > 1e-6 if not np.any(valid_segments): return 0.0 # Path has no length p = p[valid_segments] q = q[valid_segments] segments = segments[valid_segments] distances = distances[valid_segments] dir_unit = segments / distances[:, np.newaxis] # Interpolate wind at midpoints for better average (optional, could use start/end) midpoints = p + segments / 2.0 # try: # wind_vectors = interp(midpoints) # except ValueError as e: # print(f"Interpolation error: {e}") # print(f"Midpoints shape: {midpoints.shape}") # # Handle error, e.g., return a large cost or use zero wind # wind_vectors = np.zeros_like(midpoints) # Calculate ground speed component along each segment # Ensure wind_vectors and dir_unit have compatible shapes for dot product # np.einsum is efficient for row-wise dot products # wind_along_path = np.einsum('ij,ij->i', wind_vectors, dir_unit) ground_speeds = np.maximum(drone_speed , 1e-3) # Avoid zero/negative speed # Calculate time for each segment segment_times = distances / ground_speeds total_time = np.sum(segment_times) # Calculate collision penalty (iterate segments as check_segment_collision is per-segment) for i in range(len(p)): # Pass pre-calculated builds_with_safety penetration = check_segment_collision(p[i], q[i], builds_with_safety) if penetration > 0: total_penalty += penalty_k * penetration**2 # Quadratic penalty return total_time + total_penalty生成注释
05-14
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值