资源规划 create_simple_plan

本文深入探讨了Oracle资源规划组的功能与应用,包括如何通过Dbms_resource_manager和dbms_resource_manager_privs进行资源管理,提供了创建简单和复杂资源计划的实例,并详细解释了资源计划的组成部分。
资源规划组的使用。

资源规划组的使用。
资源规划组可以给ORACLE提供更多的控制资源管理决策的权利。
可以解决系统开销过大,效率低下,不适当的资源分配等与资源相关的问题。
资源规划组的管理主要是由两个包来完成的。Dbms_resource_manager和dbms_resource_manager_privs。
为scott用户授权:
EXEC DBMS_RESOURCE_MANAGER_PRIVS.GRANT_SYSTEM_PRIVILEGE -
    (GRANTEE_NAME => 'scott', PRIVILEGE_NAME => 'ADMINISTER_RESOURCE_MANAGER', -
     ADMIN_OPTION => FALSE);
可用过程REVOKE_SYSTEM_PRVILEGE来收回权限。
权限ADMINISTER_RESOURCE_MANAGER的授予和回收只能通过过程来完成不能通过grant和revoke这两个命令来完成。

来看一个例子。创建一个简单的资源计划。
BEGIN
DBMS_RESOURCE_MANAGER.CREATE_SIMPLE_PLAN(SIMPLE_PLAN => 'simple_plan1',
   CONSUMER_GROUP1 => 'mygroup1', GROUP1_CPU => 80,
   CONSUMER_GROUP2 => 'mygroup2', GROUP2_CPU => 20);
END;
/

上面的过程创建了如下的资源规划:
Consumer Group        Level 1        Level 2        Level 3
SYS_GROUP               100%           -                    -
mygroup1                       -                  80%              -
mygroup2                       -                  20%              -
OTHER_GROUPS        -                  -                   100%
 

下面再来一个复杂的资源计划:包括下面的步骤:
"        Using the Pending Area for Creating Plan Schemas
"        Creating Resource Plans
"        Creating Resource Consumer Groups
"        Specifying Resource Plan Directives
BEGIN
DBMS_RESOURCE_MANAGER.CREATE_PENDING_AREA();
DBMS_RESOURCE_MANAGER.CREATE_PLAN(PLAN => 'erp_plan', 
  COMMENT => 'Resource plan/method for ERP Database');
DBMS_RESOURCE_MANAGER.CREATE_CONSUMER_GROUP(CONSUMER_GROUP => 'oltp', 
  COMMENT => 'Resource consumer group/method for OLTP jobs');
DBMS_RESOURCE_MANAGER.CREATE_CONSUMER_GROUP(CONSUMER_GROUP => 'batch', 
  COMMENT => 'Resource consumer group/method for BATCH jobs');
DBMS_RESOURCE_MANAGER.CREATE_PLAN_DIRECTIVE(PLAN => 'erp_plan', 
  GROUP_OR_SUBPLAN => 'oltp', COMMENT => 'OLTP sessions', CPU_P1 => 80, 
  SWITCH_GROUP => 'batch', SWITCH_TIME => 3,SWITCH_ESTIMATE => TRUE, 
  UNDO_POOL => 200);
DBMS_RESOURCE_MANAGER.CREATE_PLAN_DIRECTIVE(PLAN => 'erp_plan', 
  GROUP_OR_SUBPLAN => 'batch', COMMENT => 'BATCH sessions', CPU_P2 => 100, 
  ACTIVE_SESS_POOL_P1 => 5, QUEUEING_P1 => 600, 
  MAX_EST_EXEC_TIME => 3600);
DBMS_RESOURCE_MANAGER.CREATE_PLAN_DIRECTIVE(PLAN => 'erp_plan', 
  GROUP_OR_SUBPLAN => 'OTHER_GROUPS', COMMENT => 'mandatory', CPU_P3 => 100);
DBMS_RESOURCE_MANAGER.VALIDATE_PENDING_AREA();
DBMS_RESOURCE_MANAGER.SUBMIT_PENDING_AREA();
END;

上面过程
Group        CPU Resource Allocation %        Active Session Pool Parameters        Automatic Switching Parameters        Maximum Estimated Execution Time        Undo Pool
oltp        Level 1: 80%                Switch to group: batch 
Switch time: 3
Use estimate: TRUE        --        Size: 200K

batch        Level 2: 100%        Pool size: 5 
Timeout: 600        --        Time: 3600        --
OTHER_GROUPS        Level 3: 100%        --        --        --        --

来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/24104518/viewspace-713001/,如需转载,请注明出处,否则将追究法律责任。

转载于:http://blog.itpub.net/24104518/viewspace-713001/

from utools.tools import expand_topo, sag_generate, load_data, clear_dir from utools.tools import create_csv, append_csv from utools.planner.SgplanPlanner_Phycial import SgplanPlanner_Phycial from utools.planner.PopfPlanner_Phycial import PopfPlanner_Pyhcial from utools.planner.OpticPlanner_Phycial import OpticPlanner_Phycial from utools.planner.FfPlanner_Phycial import FfPlanner_Phycial from utools.planner.FdPlanner_Phycial import FdPlanner_Phycial from utools.planner.SymkPlanner_Phycial import SymkPlanner_Phycial from utools.generateInput.generatePDDLInput_Phycial import GenPDDLInputFile_Phycial from utools.planner.JshopPlanner import JshopPlanner from utools.generateInput.generateJSHOPInput_Phycial import GenJSHOPInputFile_Phycial from utools.planner.PyhopPlanner import PyhopPlanner from utools.generateInput.generatePYHOPInput_Phycial import GenPYHOPInputFile_Phycial from time import time import os # 设置超时时间 import eventlet eventlet.monkey_patch() # 必须加这条代码 # 导入日志模块 import logging # 设置日志级别 logging.basicConfig(level=logging.INFO) # 设置日志格式 logger = logging.getLogger(__name__) # 设置日志输出格式 formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # 设置日志输出到文件 file_handler = logging.FileHandler("result/log/planner.log", mode='w') # 将格式化器设置到处理器中 file_handler.setFormatter(formatter) # 将处理器添加到日志器中 logger.addHandler(file_handler) def static_planner_sence_topo_type(planner, sence, topo_type, topo_multis, logger = logger, parent_dir = None, domainfile_path=None, goaltasks=None): # 定义planner对应的具体规划器实例 planner_dict = { "pddl/sgplan":SgplanPlanner_Phycial, "htn/jshop": JshopPlanner, "htn/pyhop": PyhopPlanner, "pddl/popf2": PopfPlanner_Pyhcial, "pddl/optic": OpticPlanner_Phycial, "pddl/ff": FfPlanner_Phycial, "pddl/fd": FdPlanner_Phycial, "pddl/symk": SymkPlanner_Phycial } # 根据sence获取拓扑的名称 topo_name = "Topo" + sence.split("Topo")[1] if parent_dir is None: parent_dir = f"result/{planner}/{sence}/{topo_type}/" else: parent_dir = f"{parent_dir}/{planner}/{sence}/{topo_type}/" if "pddl" in planner: if domainfile_path is None: domainfile_path = f"input/{planner}/{sence}/domain1.pddl" problemfile_path = f"input/{planner}/{sence}/problem1.pddl" # 判断problemfile_path是否存在,如果存在则删除 # 初始化场景生成器 gpddl = GenPDDLInputFile_Phycial() # 获取规划器实例 planner_obj = planner_dict[planner]() # 定义存储规划结果的文件 result_path = parent_dir + "result.soln2" # 清空目录 clear_dir(parent_dir) static_file = parent_dir + "static.csv" create_csv(static_file, ["multi", "host_num", "edge_num", "plan_counter", "path_num", "sag_node", "sag_edge","enum_time", "max_mem"]) for multi in topo_multis: logger.info(f"当前拓扑为:input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json") try: with eventlet.Timeout(3600, False): # 设置超时时间为2000秒 pddl_paths(planner,sence, topo_type,gpddl, planner_obj, multi,result_path, static_file, domainfile_path, problemfile_path, parent_dir) # 读取static_file文件中的最后一行,判断生成的路径数量是否小于上一行 with open(static_file, "r") as f: lines = f.readlines() last_line = lines[-1] last_line = last_line.strip().split(",") last_last_line = lines[-2] last_last_line = last_last_line.strip().split(",") if int(last_line[4]) == 0: logger.info(f"规划器在倍数{multi}无法找到路径,停止运行") break # 如果last_last_line是数字 elif last_last_line[4].isdigit(): if int(last_line[4]) <= int(last_last_line[4]): logger.info(f"规划器在倍数{multi}多规划时间超过限制时间,停止运行") break except eventlet.timeout.Timeout: logger.info(f"程序运行超时在倍数{multi}") # 根据planner获取调用的规划器进程是否关闭 if "symk" in planner: os.system("kill -9 $(ps -ef | grep downward| awk '{print $2}')") break else: # 代表是htn规划器,htn规划器又分为jshop和pyhop规划器 if "jshop" in planner: if domainfile_path is None: domainfile_path = f"input/{planner}/{sence}/domain1.jshop" problemfile_path = f"input/{planner}/{sence}/problem1.jshop" # 初始化场景生成器 gjshop = GenJSHOPInputFile_Phycial() # 获取规划器实例 planner_obj = planner_dict[planner]() # 定义存储规划结果的文件 result_path = parent_dir + "result.soln2" # 清空目录 clear_dir(parent_dir + "all") clear_dir(parent_dir + "filter") # 定义统计文件 static_file = parent_dir + "all/static.csv" create_csv(static_file, ["multi", "host_num", "edge_num","path_num", "sag_node", "sag_edge","enum_time", "max_mem"]) static_file2 = parent_dir + "filter/static.csv" create_csv(static_file2, ["multi", "host_num", "edge_num","path_num", "sag_node", "sag_edge","enum_time", "max_mem"]) current_dir = os.getcwd() for multi in topo_multis: logger.info(f"当前拓扑为:input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json") try: with eventlet.Timeout(3600, True): logger.info("当前使用的是改进后的jshop规划器") # 设置超时时间为2000秒 jshop_paths(planner,sence,topo_type, gjshop, planner_obj, multi,result_path, static_file, domainfile_path, problemfile_path, parent_dir) except eventlet.timeout.Timeout: logger.info(f"程序运行超时在倍数{multi}") os.system("kill -9 $(ps -ef | grep 'java -Xss2048K -Xmx1024M problem'| awk '{print $2}')") os.chdir(current_dir) break for multi in topo_multis: logger.info(f"当前拓扑为:input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json") try: with eventlet.Timeout(3600, True): logger.info("当前使用的是原始的jshop规划器,启用长度过滤") jshop_paths_filter(planner,sence,topo_type, gjshop, planner_obj, multi,result_path, static_file2, domainfile_path, problemfile_path, parent_dir) except eventlet.timeout.Timeout: logger.info(f"程序运行超时在倍数{multi}") os.system("kill -9 $(ps -ef | grep 'java -Xss2048K -Xmx1024M problem'| awk '{print $2}')") os.chdir(current_dir) break elif "pyhop" in planner: if domainfile_path is None: domainfile_path = f"input/{planner}/{sence}/domain1.py" if goaltasks is None: if "Topo1" in sence: goaltasks = [('AttackGoal', 'Attacker', 'Attacker', 'Host3')] elif "Topo2" in sence: goaltasks = [('AttackGoal', 'Attacker', 'Attacker', 'Host6')] elif "Topo3" in sence: goaltasks = [('AttackGoal', 'Attacker', 'Attacker', 'Splc')] # 获取分解方案的名称 domain_name = domainfile_path.split("/")[-1].split(".")[0] problemfile_path = f"input/{planner}/{sence}/problem.py" # 初始化场景生成器 gpyhop = GenPYHOPInputFile_Phycial() # 获取规划器实例 planner_obj = planner_dict[planner]() result_path = parent_dir + "result.soln2" # 清空目录 clear_dir(parent_dir + f"{domain_name}/all") clear_dir(parent_dir + f"{domain_name}/filter") clear_dir(parent_dir + f"{domain_name}/enum") # 定义统计文件 static_file = parent_dir + f"{domain_name}/all/static.csv" create_csv(static_file, ["multi", "host_num", "edge_num","path_num", "sag_node", "sag_edge","enum_time", "max_mem"]) static_file2 = parent_dir + f"{domain_name}/filter/static.csv" create_csv(static_file2, ["multi", "host_num", "edge_num","path_num", "sag_node", "sag_edge","enum_time", "max_mem"]) static_file3 = parent_dir + f"{domain_name}/enum/static.csv" create_csv(static_file3, ["multi", "host_num", "edge_num","plan_counter", "path_num", "sag_node", "sag_edge","enum_time", "max_mem"]) for multi in topo_multis: # 当前使用的拓扑为 logger.info(f"当前拓扑为:input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json") try: with eventlet.Timeout(3600, True): # 设置超时时间为2000秒 logger.info("当前使用的是改进后的pyhop规划器") pyhop_paths(planner,sence,topo_type, gpyhop, planner_obj, multi,result_path, static_file, domainfile_path, goaltasks, domain_name, parent_dir) except eventlet.timeout.Timeout: logger.info(f"程序运行超时在倍数{multi}") os.system("kill -9 $(ps -ef | grep 'utools/planner/PyhopG.py'| awk '{print $2}')") break #for multi in topo_multis: # # 当前使用的拓扑为 # logger.info(f"当前拓扑为:input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json") # try: # with eventlet.Timeout(3600, True): # # 设置超时时间为2000秒 # logger.info("当前使用的是原始的pyhop规划器,启用枚举") # pyhop_paths_enum(planner,sence,topo_type, gpyhop, planner_obj, multi,result_path, static_file3, domainfile_path, goaltasks, domain_name, parent_dir) # except eventlet.timeout.Timeout: # logger.info(f"程序运行超时在倍数{multi}") # os.system("kill -9 $(ps -ef | grep 'utools/planner/PyhopG.py'| awk '{print $2}')") # break #for multi in topo_multis: # 当前使用的拓扑为 # logger.info(f"当前拓扑为:input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json") # try: # with eventlet.Timeout(3600, True): # 设置超时时间为2000秒 # logger.info("当前使用的是改进后的pyhop规划器,启用长度过滤") # pyhop_paths_filter(planner,sence,topo_type, gpyhop, planner_obj, multi,result_path, static_file2, domainfile_path, goaltasks, domain_name, parent_dir) # except eventlet.timeout.Timeout: # logger.info(f"程序运行超时在倍数{multi}") # os.system("kill -9 $(ps -ef | grep 'utools/planner/PyhopG.py'| awk '{print $2}')") # break def pddl_paths(planner,sence, topo_type,gpddl, planner_obj, multi,result_path, static_file, domainfile_path, problemfile_path, parent_dir): topo_name = "Topo" + sence.split("Topo")[1] topo_path = f"input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json" # logger.info(f"当前拓扑为:{topo_path}") # 生成pddl问题文件 gpddl.loadTopo(topo_path) host_num, edge_num = gpddl.get_host_edge() clear_dir(parent_dir + f"{multi}") # 初始化规划器 if "Complex" in sence: gpddl.genProblemFile_Complex(problemfile_path) if "symk" not in planner: planner_obj.Init_Complex(topo_path, f"input/info/Complex{topo_name}/action_map.json", f"input/info/Complex{topo_name}/non_Critical.json") paths, plan_counter, enum_time, max_time, max_mem = planner_obj.path_generator_Complex(domainfile_path, problemfile_path, result_path) else: # 求解全部路径 paths, plan_counter, enum_time, max_time, max_mem = planner_obj.path_generator(domainfile_path, problemfile_path, result_path) out_filepath = parent_dir + f"{multi}/paths.txt" planner_obj.output_paths(paths, out_filepath) sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num) append_csv(static_file, [multi, host_num, edge_num, plan_counter, len(paths), sag_node, sag_edge,enum_time, max_mem]) # logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划的次数为{plan_counter}, 规划器生成的路径数量为{len(paths)}") # 刷新缓冲区 file_handler.flush() elif "Simple" in sence: gpddl.genProblemFile_Simple(problemfile_path) if "symk" not in planner: planner_obj.Init_Simple(topo_path, f"input/info/Simple{topo_name}/action_map.json", f"input/info/Simple{topo_name}/non_Critical.json") # 为程序设置超时时间 paths, plan_counter, enum_time, max_time, max_mem = planner_obj.path_generator_Simple(domainfile_path, problemfile_path, result_path) else: # 求解全部路径 paths, plan_counter, enum_time, max_time, max_mem = planner_obj.path_generator(domainfile_path, problemfile_path, result_path) out_filepath = parent_dir + f"{multi}/paths.txt" planner_obj.output_paths(paths, out_filepath) sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num) append_csv(static_file, [multi, host_num, edge_num, plan_counter, len(paths), sag_node, sag_edge,enum_time, max_mem]) # logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划的次数为{plan_counter}, 规划器生成的路径数量为{len(paths)}") # 刷新缓冲区 file_handler.flush() def jshop_paths(planner,sence, topo_type,gjshop, planner_obj, multi,result_path, static_file,domainfile_path, problemfile_path, parent_dir): topo_name = "Topo" + sence.split("Topo")[1] topo_path = f"input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json" # logger.info(f"当前拓扑为:{topo_path}") # 生成pddl问题文件 gjshop.loadTopo(topo_path) host_num, edge_num = gjshop.get_host_edge() clear_dir(parent_dir + f"all/{multi}") if "Complex" in sence: gjshop.genProblemFile_Complex(problemfile_path) paths, enum_time, max_mem= planner_obj.path_generator(domainfile_path, problemfile_path, result_path) out_filepath = parent_dir + f"all/{multi}/paths.txt" planner_obj.output_paths(paths, out_filepath) sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num) append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge,enum_time, max_mem]) # logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}") # 刷新缓冲区 file_handler.flush() elif "Simple" in sence: gjshop.genProblemFile_Simple(problemfile_path) paths, enum_time, max_mem = planner_obj.path_generator(domainfile_path, problemfile_path, result_path) out_filepath = parent_dir + f"all/{multi}/paths.txt" planner_obj.output_paths(paths, out_filepath) sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num) append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge,enum_time, max_mem]) # logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}") # 刷新缓冲区 file_handler.flush() def jshop_paths_filter(planner,sence, topo_type,gjshop, planner_obj, multi,result_path, static_file,domainfile_path, problemfile_path, parent_dir): topo_name = "Topo" + sence.split("Topo")[1] topo_path = f"input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json" # logger.info(f"当前拓扑为:{topo_path}") # 生成pddl问题文件 gjshop.loadTopo(topo_path) host_num, edge_num = gjshop.get_host_edge() clear_dir(parent_dir + f"filter/{multi}") if "Complex" in sence: start = time() len_paths = gjshop.get_filter_edges() end = time() paths = [] all_enum_time, all_max_mem = 0, 0 for length, filter_edges in len_paths.items(): # 使用True代表开启边过滤 gjshop.genProblemFile_Complex(problemfile_path, filter_edges, True) path, enum_time, max_mem = planner_obj.path_generator(domainfile_path, problemfile_path, result_path,jshop2="origion") all_enum_time += enum_time all_max_mem = max(all_max_mem, max_mem) # paths += path for p in path: if p not in paths: paths.append(p) all_enum_time = round(all_enum_time+end-start, 3) out_filepath = parent_dir + f"filter/{multi}/paths.txt" planner_obj.output_paths(paths, out_filepath) sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num) append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge,all_enum_time, all_max_mem]) # logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}") # 刷新缓冲区 file_handler.flush() elif "Simple" in sence: start = time() len_paths = gjshop.get_filter_edges() end = time() paths = [] all_enum_time, all_max_mem = 0, 0 for length, filter_edges in len_paths.items(): # 使用True代表开启边过滤 gjshop.genProblemFile_Simple(problemfile_path, filter_edges, True) path, enum_time, max_mem = planner_obj.path_generator(domainfile_path, problemfile_path, result_path,jshop2="origion") all_enum_time += enum_time all_max_mem = max(all_max_mem, max_mem) # paths += path for p in path: if p not in paths: paths.append(p) all_enum_time = round(all_enum_time+end-start, 3) out_filepath = parent_dir + f"filter/{multi}/paths.txt" planner_obj.output_paths(paths, out_filepath) sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num) append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge,all_enum_time, all_max_mem]) # logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}") # 刷新缓冲区 file_handler.flush() def pyhop_paths(planner,sence,topo_type, gpyhop, planner_obj, multi,result_path, static_file, domainfile_path, goaltasks, domain_name, parent_dir): topo_name = "Topo" + sence.split("Topo")[1] topo_path = f"input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json" # logger.info(f"当前拓扑为:{topo_path}") # 生成pddl问题文件 gpyhop.loadTopo(topo_path) host_num, edge_num = gpyhop.get_host_edge() del gpyhop clear_dir(parent_dir + f"{domain_name}/all/{multi}") #将paths保存到文件中 out_filepath = parent_dir + f"{domain_name}/all/{multi}/paths.txt" if "Complex" in sence: paths, enum_time, max_mem = planner_obj.path_generator(domainfile_path, topo_path, goaltasks, result_path) planner_obj.output_paths(paths, out_filepath) # 生成攻击图 sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num) # 将攻击图的节点和边的数量保存到文件中 append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge, enum_time, max_mem]) # print(paths) # logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}") # 刷新缓冲区 file_handler.flush() elif "Simple" in sence: paths, enum_time, max_mem = planner_obj.path_generator(domainfile_path, topo_path, goaltasks, result_path) planner_obj.output_paths(paths, out_filepath) # 生成攻击图 sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num) # 将攻击图的节点和边的数量保存到文件中 append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge, enum_time, max_mem]) # print(paths) # logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}") # 刷新缓冲区 file_handler.flush() def pyhop_paths_filter(planner,sence,topo_type, gpyhop, planner_obj, multi,result_path, static_file, domainfile_path, goaltasks, domain_name, parent_dir): topo_name = "Topo" + sence.split("Topo")[1] topo_path = f"input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json" # logger.info(f"当前拓扑为:{topo_path}") # 生成pddl问题文件 gpyhop.loadTopo(topo_path) host_num, edge_num = gpyhop.get_host_edge() del gpyhop clear_dir(parent_dir + f"{domain_name}/filter/{multi}") #将paths保存到文件中 out_filepath = parent_dir + f"{domain_name}/filter/{multi}/paths.txt" if "Complex" in sence: paths, enum_time, max_mem = planner_obj.path_generator(domainfile_path, topo_path, goaltasks, result_path,"filter") planner_obj.output_paths(paths, out_filepath) # 生成攻击图 sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num) # 将攻击图的节点和边的数量保存到文件中 append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge, enum_time, max_mem]) # print(paths) # logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}") # 刷新缓冲区 file_handler.flush() elif "Simple" in sence: paths, enum_time, max_mem = planner_obj.path_generator(domainfile_path, topo_path, goaltasks, result_path, "filter") planner_obj.output_paths(paths, out_filepath) # 生成攻击图 sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num) # 将攻击图的节点和边的数量保存到文件中 append_csv(static_file, [multi, host_num, edge_num, len(paths), sag_node, sag_edge, enum_time, max_mem]) # print(paths) # logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}") # 刷新缓冲区 file_handler.flush() def pyhop_paths_enum(planner,sence,topo_type, gpyhop, planner_obj, multi,result_path, static_file, domainfile_path, goaltasks, domain_name, parent_dir): topo_name = "Topo" + sence.split("Topo")[1] topo_path = f"input/expand_topo/{topo_name}/{topo_type}/Host0_width_{multi}.json" # logger.info(f"当前拓扑为:{topo_path}") # 生成pddl问题文件 gpyhop.loadTopo(topo_path) host_num, edge_num = gpyhop.get_host_edge() del gpyhop clear_dir(parent_dir + f"{domain_name}/enum/{multi}") out_filepath = parent_dir + f"{domain_name}/enum/{multi}/paths.txt" if "Complex" in sence: paths, plan_counter, enum_time, max_mem = planner_obj.path_generator(domainfile_path, topo_path, goaltasks, result_path,"enum") planner_obj.output_paths(paths, out_filepath) # 生成攻击图 sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num) # 将攻击图的节点和边的数量保存到文件中 append_csv(static_file, [multi, host_num, edge_num, plan_counter, len(paths), sag_node, sag_edge, enum_time, max_mem]) # print(paths) # logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}") # 刷新缓冲区 file_handler.flush() elif "Simple" in sence: paths, plan_counter, enum_time, max_mem = planner_obj.path_generator(domainfile_path, topo_path, goaltasks, result_path, "enum") #将paths保存到文件中 planner_obj.output_paths(paths, out_filepath) # 生成攻击图 sag_node, sag_edge = sag_generate(out_filepath, host_num, edge_num) # 将攻击图的节点和边的数量保存到文件中 append_csv(static_file, [multi, host_num, edge_num, plan_counter, len(paths), sag_node, sag_edge, enum_time, max_mem]) # print(paths) # logger.info(f"当前拓扑的主机数量为{host_num},边的数量为{edge_num}, 规划器生成的路径数量为{len(paths)}") # 刷新缓冲区 file_handler.flush() if __name__ == "__main__": # 选择规划planners = ["htn/pyhop"] # planners = ["pddl/sgplan", "pddl/popf2", "pddl/optic", "pddl/ff", "pddl/fd"] # planners = ["htn/pyhop", "pddl/sgplan", "pddl/popf2", "pddl/optic", "pddl/ff", "pddl/fd", "pddl/symk", "htn/jshop"] # 首先,选择场景 sences = ["ComplexTopo5"] # sences = ["SimpleTopo3"] # 选择扩展的拓扑类型 # topo_types = ["ring","star","mesh"] topo_types = ["mesh"] # 选择拓扑倍数 topo_multis = range(0, 1, 20) goaltasks = [('AttackGoal', 'Attacker', 'Attacker', 'Splc')] # 选择一个规划器 for planner in planners: # 清空此前的规划结果 clear_dir(f"result/{planner}") # 选择一个场景 for sence in sences: # 选择一个拓扑类型 for topo_type in topo_types: logger.info(f"当前规划器为:{planner}, 场景为:{sence}, 拓扑类型为:{topo_type}") static_planner_sence_topo_type(planner, sence, topo_type, topo_multis, logger, goaltasks=goaltasks) 这个是static_all_phycial的代码,也一并结合分析
10-08
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值