from utools.tools import expand_topo, sag_generate, load_data, clear_dir
from utools.tools import create_csv, append_csv
from utools.planner.SgplanPlanner_Phycial import SgplanPlanner_Phycial
from utools.planner.PopfPlanner_Phycial import PopfPlanner_Pyhcial
from utools.planner.OpticPlanner_Phycial import OpticPlanner_Phycial
from utools.planner.FfPlanner_Phycial import FfPlanner_Phycial
from utools.planner.FdPlanner_Phycial import FdPlanner_Phycial
from utools.planner.SymkPlanner_Phycial import SymkPlanner_Phycial
from utools.generateInput.generatePDDLInput_Phycial import GenPDDLInputFile_Phycial
from utools.planner.JshopPlanner import JshopPlanner
from utools.generateInput.generateJSHOPInput_Phycial import GenJSHOPInputFile_Phycial
from utools.planner.PyhopPlanner import PyhopPlanner
from utools.generateInput.generatePYHOPInput_Phycial import GenPYHOPInputFile_Phycial
from time import time
import os
# 设置超时时间
import eventlet
eventlet.monkey_patch() # 必须加这条代码
# 导入日志模块
import logging
# 设置日志级别
logging.basicConfig(level=logging.INFO)
# 设置日志格式
logger = logging.getLogger(__name__)
# 设置日志输出格式
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# 设置日志输出到文件
file_handler = logging.FileHandler("result/log/planner.log", mode='w')
# 将格式化器设置到处理器中
file_handler.setFormatter(formatter)
# 将处理器添加到日志器中
logger.addHandler(file_handler)
def static_planner_sence_topo_type(planner, sence, topo_type, topo_multis, logger = logger, parent_dir = None, domainfile_path=None, goaltasks=None):
# 定义planner对应的具体规划器实例
planner_dict = {
"pddl/sgplan":SgplanPlanner_Phycial,
"htn/jshop": JshopPlanner,
"htn/pyhop": PyhopPlanner,
"pddl/popf2": PopfPlanner_Pyhcial,
"pddl/optic": OpticPlanner_Phycial,
"pddl/ff": FfPlanner_Phycial,
"pddl/fd": FdPlanner_Phycial,
"pddl/symk": SymkPlanner_Phycial
}
# 根据sence获取拓扑的名称
topo_name = "Topo" + sence.split("Topo")[1]
if parent_dir is None:
parent_dir = f"result/{planner}/{sence}/{topo_type}/"
else:
parent_dir = f"{parent_dir}/{planner}/{sence}/{topo_type}/"
if "pddl" in planner:
if domainfile_path is None:
domainfile_path = f"input/{planner}/{sence}/domain1.pddl"
problemfile_path = f"input/{planner}/{sence}/problem1.pddl"
# 判断problemfile_path是否存在,如果存在则删除
# 初始化场景生成器
gpddl = GenPDDLInputFile_Phycial()
# 获取规划器实例
planner_obj = planner_dict[planner]()
# 定义存储规划结果的文件
result_path = parent_dir + "result.soln2"
# 清空目录
clear_dir(parent_dir)
static_file = parent_dir + "static.csv"
create_csv(static_file, ["multi", "host_num", "edge_num", "plan_counter", "path_num", "sag_node", "sag_edge","enum_time", "max_mem"])
for multi in topo_multis:
logger.info(f"当前拓扑为:input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json")
try:
with eventlet.Timeout(3600, False):
# 设置超时时间为2000秒
pddl_paths(planner,sence, topo_type,gpddl, planner_obj, multi,result_path, static_file, domainfile_path, problemfile_path, parent_dir)
# 读取static_file文件中的最后一行,判断生成的路径数量是否小于上一行
with open(static_file, "r") as f:
lines = f.readlines()
last_line = lines[-1]
last_line = last_line.strip().split(",")
last_last_line = lines[-2]
last_last_line = last_last_line.strip().split(",")
if int(last_line[4]) == 0:
logger.info(f"规划器在倍数{multi}无法找到路径,停止运行")
break
# 如果last_last_line是数字
elif last_last_line[4].isdigit():
if int(last_line[4]) <= int(last_last_line[4]):
logger.info(f"规划器在倍数{multi}多规划时间超过限制时间,停止运行")
break
except eventlet.timeout.Timeout:
logger.info(f"程序运行超时在倍数{multi}")
# 根据planner获取调用的规划器进程是否关闭
if "symk" in planner:
os.system("kill -9 $(ps -ef | grep downward| awk '{print $2}')")
break
else:
# 代表是htn规划器,htn规划器又分为jshop和pyhop规划器
if "jshop" in planner:
if domainfile_path is None:
domainfile_path = f"input/{planner}/{sence}/domain1.jshop"
problemfile_path = f"input/{planner}/{sence}/problem1.jshop"
# 初始化场景生成器
gjshop = GenJSHOPInputFile_Phycial()
# 获取规划器实例
planner_obj = planner_dict[planner]()
# 定义存储规划结果的文件
result_path = parent_dir + "result.soln2"
# 清空目录
clear_dir(parent_dir + "all")
clear_dir(parent_dir + "filter")
# 定义统计文件
static_file = parent_dir + "all/static.csv"
create_csv(static_file, ["multi", "host_num", "edge_num","path_num", "sag_node", "sag_edge","enum_time", "max_mem"])
static_file2 = parent_dir + "filter/static.csv"
create_csv(static_file2, ["multi", "host_num", "edge_num","path_num", "sag_node", "sag_edge","enum_time", "max_mem"])
current_dir = os.getcwd()
for multi in topo_multis:
logger.info(f"当前拓扑为:input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json")
try:
with eventlet.Timeout(3600, True):
logger.info("当前使用的是改进后的jshop规划器")
# 设置超时时间为2000秒
jshop_paths(planner,sence,topo_type, gjshop, planner_obj, multi,result_path, static_file, domainfile_path, problemfile_path, parent_dir)
except eventlet.timeout.Timeout:
logger.info(f"程序运行超时在倍数{multi}")
os.system("kill -9 $(ps -ef | grep 'java -Xss2048K -Xmx1024M problem'| awk '{print $2}')")
os.chdir(current_dir)
break
for multi in topo_multis:
logger.info(f"当前拓扑为:input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json")
try:
with eventlet.Timeout(3600, True):
logger.info("当前使用的是原始的jshop规划器,启用长度过滤")
jshop_paths_filter(planner,sence,topo_type, gjshop, planner_obj, multi,result_path, static_file2, domainfile_path, problemfile_path, parent_dir)
except eventlet.timeout.Timeout:
logger.info(f"程序运行超时在倍数{multi}")
os.system("kill -9 $(ps -ef | grep 'java -Xss2048K -Xmx1024M problem'| awk '{print $2}')")
os.chdir(current_dir)
break
elif "pyhop" in planner:
if domainfile_path is None:
domainfile_path = f"input/{planner}/{sence}/domain1.py"
if goaltasks is None:
if "Topo1" in sence:
goaltasks = [('AttackGoal', 'Attacker', 'Attacker', 'Host3')]
elif "Topo2" in sence:
goaltasks = [('AttackGoal', 'Attacker', 'Attacker', 'Host6')]
elif "Topo3" in sence:
goaltasks = [('AttackGoal', 'Attacker', 'Attacker', 'Splc')]
# 获取分解方案的名称
domain_name = domainfile_path.split("/")[-1].split(".")[0]
problemfile_path = f"input/{planner}/{sence}/problem.py"
# 初始化场景生成器
gpyhop = GenPYHOPInputFile_Phycial()
# 获取规划器实例
planner_obj = planner_dict[planner]()
result_path = parent_dir + "result.soln2"
# 清空目录
clear_dir(parent_dir + f"{domain_name}/all")
clear_dir(parent_dir + f"{domain_name}/filter")
clear_dir(parent_dir + f"{domain_name}/enum")
# 定义统计文件
static_file = parent_dir + f"{domain_name}/all/static.csv"
create_csv(static_file, ["multi", "host_num", "edge_num","path_num", "sag_node", "sag_edge","enum_time", "max_mem"])
static_file2 = parent_dir + f"{domain_name}/filter/static.csv"
create_csv(static_file2, ["multi", "host_num", "edge_num","path_num", "sag_node", "sag_edge","enum_time", "max_mem"])
static_file3 = parent_dir + f"{domain_name}/enum/static.csv"
create_csv(static_file3, ["multi", "host_num", "edge_num","plan_counter", "path_num", "sag_node", "sag_edge","enum_time", "max_mem"])
for multi in topo_multis:
# 当前使用的拓扑为
logger.info(f"当前拓扑为:input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json")
try:
with eventlet.Timeout(3600, True):
# 设置超时时间为2000秒
logger.info("当前使用的是改进后的pyhop规划器")
pyhop_paths(planner,sence,topo_type, gpyhop, planner_obj, multi,result_path, static_file, domainfile_path, goaltasks, domain_name, parent_dir)
except eventlet.timeout.Timeout:
logger.info(f"程序运行超时在倍数{multi}")
os.system("kill -9 $(ps -ef | grep 'utools/planner/PyhopG.py'| awk '{print $2}')")
break
#for multi in topo_multis:
# # 当前使用的拓扑为
# logger.info(f"当前拓扑为:input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json")
# try:
# with eventlet.Timeout(3600, True):
# # 设置超时时间为2000秒
# logger.info("当前使用的是原始的pyhop规划器,启用枚举")
# pyhop_paths_enum(planner,sence,topo_type, gpyhop, planner_obj, multi,result_path, static_file3, domainfile_path, goaltasks, domain_name, parent_dir)
# except eventlet.timeout.Timeout:
# logger.info(f"程序运行超时在倍数{multi}")
# os.system("kill -9 $(ps -ef | grep 'utools/planner/PyhopG.py'| awk '{print $2}')")
# break
#for multi in topo_multis:
# 当前使用的拓扑为
# logger.info(f"当前拓扑为:input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json")
# try:
# with eventlet.Timeout(3600, True):
# 设置超时时间为2000秒
# logger.info("当前使用的是改进后的pyhop规划器,启用长度过滤")
# pyhop_paths_filter(planner,sence,topo_type, gpyhop, planner_obj, multi,result_path, static_file2, domainfile_path, goaltasks, domain_name, parent_dir)
# except eventlet.timeout.Timeout:
# logger.info(f"程序运行超时在倍数{multi}")
# os.system("kill -9 $(ps -ef | grep 'utools/planner/PyhopG.py'| awk '{print $2}')")
# break
def pddl_paths(planner,sence, topo_type,gpddl, planner_obj, multi,result_path, static_file, domainfile_path, problemfile_path, parent_dir):
topo_name = "Topo" + sence.split("Topo")[1]
topo_path = f"input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json"
# logger.info(f"当前拓扑为:{topo_path}")
# 生成pddl问题文件
gpddl.loadTopo(topo_path)
host_num, edge_num = gpddl.get_host_edge()
clear_dir(parent_dir + f"{multi}")
# 初始化规划器
if "Complex" in sence:
gpddl.genProblemFile_Complex(problemfile_path)
if "symk" not in planner:
planner_obj.Init_Complex(topo_path, f"input/info/Complex{topo_name}/action_map.json", f"input/info/Complex{topo_name}/non_Critical.json")
paths, plan_counter, enum_time, max_time, max_mem = planner_obj.path_generator_Complex(domainfile_path, problemfile_path, result_path)
else:
# 求解全部路径
paths, plan_counter, enum_time, max_time, max_mem = planner_obj.path_generator(domainfile_path, problemfile_path, result_path)
out_filepath = parent_dir + f"{multi}/paths.txt"
planner_obj.output_paths(paths, out_filepath)
sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num)
append_csv(static_file, [multi, host_num, edge_num, plan_counter, len(paths), sag_node, sag_edge,enum_time, max_mem])
# logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划的次数为{plan_counter}, 规划器生成的路径数量为{len(paths)}")
# 刷新缓冲区
file_handler.flush()
elif "Simple" in sence:
gpddl.genProblemFile_Simple(problemfile_path)
if "symk" not in planner:
planner_obj.Init_Simple(topo_path, f"input/info/Simple{topo_name}/action_map.json", f"input/info/Simple{topo_name}/non_Critical.json")
# 为程序设置超时时间
paths, plan_counter, enum_time, max_time, max_mem = planner_obj.path_generator_Simple(domainfile_path, problemfile_path, result_path)
else:
# 求解全部路径
paths, plan_counter, enum_time, max_time, max_mem = planner_obj.path_generator(domainfile_path, problemfile_path, result_path)
out_filepath = parent_dir + f"{multi}/paths.txt"
planner_obj.output_paths(paths, out_filepath)
sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num)
append_csv(static_file, [multi, host_num, edge_num, plan_counter, len(paths), sag_node, sag_edge,enum_time, max_mem])
# logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划的次数为{plan_counter}, 规划器生成的路径数量为{len(paths)}")
# 刷新缓冲区
file_handler.flush()
def jshop_paths(planner,sence, topo_type,gjshop, planner_obj, multi,result_path, static_file,domainfile_path, problemfile_path, parent_dir):
topo_name = "Topo" + sence.split("Topo")[1]
topo_path = f"input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json"
# logger.info(f"当前拓扑为:{topo_path}")
# 生成pddl问题文件
gjshop.loadTopo(topo_path)
host_num, edge_num = gjshop.get_host_edge()
clear_dir(parent_dir + f"all/{multi}")
if "Complex" in sence:
gjshop.genProblemFile_Complex(problemfile_path)
paths, enum_time, max_mem= planner_obj.path_generator(domainfile_path, problemfile_path, result_path)
out_filepath = parent_dir + f"all/{multi}/paths.txt"
planner_obj.output_paths(paths, out_filepath)
sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num)
append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge,enum_time, max_mem])
# logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}")
# 刷新缓冲区
file_handler.flush()
elif "Simple" in sence:
gjshop.genProblemFile_Simple(problemfile_path)
paths, enum_time, max_mem = planner_obj.path_generator(domainfile_path, problemfile_path, result_path)
out_filepath = parent_dir + f"all/{multi}/paths.txt"
planner_obj.output_paths(paths, out_filepath)
sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num)
append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge,enum_time, max_mem])
# logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}")
# 刷新缓冲区
file_handler.flush()
def jshop_paths_filter(planner,sence, topo_type,gjshop, planner_obj, multi,result_path, static_file,domainfile_path, problemfile_path, parent_dir):
topo_name = "Topo" + sence.split("Topo")[1]
topo_path = f"input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json"
# logger.info(f"当前拓扑为:{topo_path}")
# 生成pddl问题文件
gjshop.loadTopo(topo_path)
host_num, edge_num = gjshop.get_host_edge()
clear_dir(parent_dir + f"filter/{multi}")
if "Complex" in sence:
start = time()
len_paths = gjshop.get_filter_edges()
end = time()
paths = []
all_enum_time, all_max_mem = 0, 0
for length, filter_edges in len_paths.items():
# 使用True代表开启边过滤
gjshop.genProblemFile_Complex(problemfile_path, filter_edges, True)
path, enum_time, max_mem = planner_obj.path_generator(domainfile_path, problemfile_path, result_path,jshop2="origion")
all_enum_time += enum_time
all_max_mem = max(all_max_mem, max_mem)
# paths += path
for p in path:
if p not in paths:
paths.append(p)
all_enum_time = round(all_enum_time+end-start, 3)
out_filepath = parent_dir + f"filter/{multi}/paths.txt"
planner_obj.output_paths(paths, out_filepath)
sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num)
append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge,all_enum_time, all_max_mem])
# logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}")
# 刷新缓冲区
file_handler.flush()
elif "Simple" in sence:
start = time()
len_paths = gjshop.get_filter_edges()
end = time()
paths = []
all_enum_time, all_max_mem = 0, 0
for length, filter_edges in len_paths.items():
# 使用True代表开启边过滤
gjshop.genProblemFile_Simple(problemfile_path, filter_edges, True)
path, enum_time, max_mem = planner_obj.path_generator(domainfile_path, problemfile_path, result_path,jshop2="origion")
all_enum_time += enum_time
all_max_mem = max(all_max_mem, max_mem)
# paths += path
for p in path:
if p not in paths:
paths.append(p)
all_enum_time = round(all_enum_time+end-start, 3)
out_filepath = parent_dir + f"filter/{multi}/paths.txt"
planner_obj.output_paths(paths, out_filepath)
sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num)
append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge,all_enum_time, all_max_mem])
# logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}")
# 刷新缓冲区
file_handler.flush()
def pyhop_paths(planner,sence,topo_type, gpyhop, planner_obj, multi,result_path, static_file, domainfile_path, goaltasks, domain_name, parent_dir):
topo_name = "Topo" + sence.split("Topo")[1]
topo_path = f"input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json"
# logger.info(f"当前拓扑为:{topo_path}")
# 生成pddl问题文件
gpyhop.loadTopo(topo_path)
host_num, edge_num = gpyhop.get_host_edge()
del gpyhop
clear_dir(parent_dir + f"{domain_name}/all/{multi}")
#将paths保存到文件中
out_filepath = parent_dir + f"{domain_name}/all/{multi}/paths.txt"
if "Complex" in sence:
paths, enum_time, max_mem = planner_obj.path_generator(domainfile_path, topo_path, goaltasks, result_path)
planner_obj.output_paths(paths, out_filepath)
# 生成攻击图
sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num)
# 将攻击图的节点和边的数量保存到文件中
append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge, enum_time, max_mem])
# print(paths)
# logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}")
# 刷新缓冲区
file_handler.flush()
elif "Simple" in sence:
paths, enum_time, max_mem = planner_obj.path_generator(domainfile_path, topo_path, goaltasks, result_path)
planner_obj.output_paths(paths, out_filepath)
# 生成攻击图
sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num)
# 将攻击图的节点和边的数量保存到文件中
append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge, enum_time, max_mem])
# print(paths)
# logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}")
# 刷新缓冲区
file_handler.flush()
def pyhop_paths_filter(planner,sence,topo_type, gpyhop, planner_obj, multi,result_path, static_file, domainfile_path, goaltasks, domain_name, parent_dir):
topo_name = "Topo" + sence.split("Topo")[1]
topo_path = f"input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json"
# logger.info(f"当前拓扑为:{topo_path}")
# 生成pddl问题文件
gpyhop.loadTopo(topo_path)
host_num, edge_num = gpyhop.get_host_edge()
del gpyhop
clear_dir(parent_dir + f"{domain_name}/filter/{multi}")
#将paths保存到文件中
out_filepath = parent_dir + f"{domain_name}/filter/{multi}/paths.txt"
if "Complex" in sence:
paths, enum_time, max_mem = planner_obj.path_generator(domainfile_path, topo_path, goaltasks, result_path,"filter")
planner_obj.output_paths(paths, out_filepath)
# 生成攻击图
sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num)
# 将攻击图的节点和边的数量保存到文件中
append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge, enum_time, max_mem])
# print(paths)
# logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}")
# 刷新缓冲区
file_handler.flush()
elif "Simple" in sence:
paths, enum_time, max_mem = planner_obj.path_generator(domainfile_path, topo_path, goaltasks, result_path, "filter")
planner_obj.output_paths(paths, out_filepath)
# 生成攻击图
sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num)
# 将攻击图的节点和边的数量保存到文件中
append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge, enum_time, max_mem])
# print(paths)
# logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}")
# 刷新缓冲区
file_handler.flush()
def pyhop_paths_enum(planner,sence,topo_type, gpyhop, planner_obj, multi,result_path, static_file, domainfile_path, goaltasks, domain_name, parent_dir):
topo_name = "Topo" + sence.split("Topo")[1]
topo_path = f"input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json"
# logger.info(f"当前拓扑为:{topo_path}")
# 生成pddl问题文件
gpyhop.loadTopo(topo_path)
host_num, edge_num = gpyhop.get_host_edge()
del gpyhop
clear_dir(parent_dir + f"{domain_name}/enum/{multi}")
out_filepath = parent_dir + f"{domain_name}/enum/{multi}/paths.txt"
if "Complex" in sence:
paths, plan_counter, enum_time, max_mem = planner_obj.path_generator(domainfile_path, topo_path, goaltasks, result_path,"enum")
planner_obj.output_paths(paths, out_filepath)
# 生成攻击图
sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num)
# 将攻击图的节点和边的数量保存到文件中
append_csv(static_file, [multi, host_num, edge_num, plan_counter, len(paths), sag_node, sag_edge, enum_time, max_mem])
# print(paths)
# logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}")
# 刷新缓冲区
file_handler.flush()
elif "Simple" in sence:
paths, plan_counter, enum_time, max_mem = planner_obj.path_generator(domainfile_path, topo_path, goaltasks, result_path, "enum")
#将paths保存到文件中
planner_obj.output_paths(paths, out_filepath)
# 生成攻击图
sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num)
# 将攻击图的节点和边的数量保存到文件中
append_csv(static_file, [multi, host_num, edge_num, plan_counter, len(paths), sag_node, sag_edge, enum_time, max_mem])
# print(paths)
# logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}")
# 刷新缓冲区
file_handler.flush()
if __name__ == "__main__":
# 选择规划器
planners = ["htn/pyhop"]
# planners = ["pddl/sgplan", "pddl/popf2", "pddl/optic", "pddl/ff", "pddl/fd"]
# planners = ["htn/pyhop", "pddl/sgplan", "pddl/popf2", "pddl/optic", "pddl/ff", "pddl/fd", "pddl/symk", "htn/jshop"]
# 首先,选择场景
sences = ["ComplexTopo5"]
# sences = ["SimpleTopo3"]
# 选择扩展的拓扑类型
# topo_types = ["ring","star","mesh"]
topo_types = ["mesh"]
# 选择拓扑倍数
topo_multis = range(0, 1, 20)
goaltasks = [('AttackGoal', 'Attacker', 'Attacker', 'Splc')]
# 选择一个规划器
for planner in planners:
# 清空此前的规划结果
clear_dir(f"result/{planner}")
# 选择一个场景
for sence in sences:
# 选择一个拓扑类型
for topo_type in topo_types:
logger.info(f"当前规划器为:{planner}, 场景为:{sence}, 拓扑类型为:{topo_type}")
static_planner_sence_topo_type(planner, sence, topo_type, topo_multis, logger, goaltasks=goaltasks)
这个是static_all_phycial的代码,也一并结合分析