record start/stop time in batch file

本文介绍了一种简单的方法来记录和显示执行特定任务所需的耗时。通过设置变量记录开始时间,在任务完成后显示开始与结束时间,可以直观地了解任务耗时情况。

When executing a time consuming task, I am always interested in knowing the time it takes. Here is a way.

 

set temp_time=%TIME%

pause

@echo start at %temp_time%

@echo stop at %TIME%

import numpy as np import gurobipy as gp from gurobipy import GRB from scipy.spatial import distance from scipy import io import itertools import random import copy import time import matplotlib.pyplot as plt from collections import deque # ====================== # 仿真环境构建 # ====================== print("初始化仿真环境...") J = 8 # 服务区域内站点数量 m = 3 # 车队规模 c = 6 # 车载容量 N = 16 # 每趟列车的乘客发生量强度 batch_size = 10 # 总batch数量 service_time = 1 # 停站服务时间(分钟) headway = 10 # 车头时距间隔(分钟) M = 100 # 未被服务时的出行时间 t = 0 # 当前仿真时间 btw = np.zeros((m, 1)) # 车辆可用的时间 btw_vehicle = [] for i in range(m): vehicle = { 'record': [ [0, 0, 0, 0] # 初始化,第一行为时间、路径等信息 ] } btw_vehicle.append(vehicle) beta1, beta2, beta3 = 100, 10, 1 # 权重参数 metro_station = np.array([[0, 0]]) # 地铁站坐标 vehicle_speed = 30 # 公里/小时 servicearea_L, servicearea_W = 5, 3 # 服务区域尺寸 mat_data = io.loadmat('F:/LM程序/LM_stop8.mat') LM_stop = mat_data['LM_stop'] all_stop = np.vstack([metro_station, LM_stop]) # 合并所有站点 dist_matrix = distance.squareform(distance.pdist(all_stop)) # 计算欧氏距离 traveltime_matrix = dist_matrix / vehicle_speed * 60 # ====================== # 辅助函数 # ====================== def candidate_route_gen(max_stop, max_traveltime, traveltime_matrix, service_time, J): """生成满足约束的候选路径""" print(f"生成候选路径 (最多站点: {max_stop}, 最大行程时间: {max_traveltime}分钟)...") all_routes = [] for i in range(1, max_stop + 1): combinations = itertools.combinations(range(1, J + 1), i) for combo in combinations: permutations = itertools.permutations(combo) for perm in permutations: route = [0] + list(perm) + [0] all_routes.append(route) candidate_route = [] for route in all_routes: travel_time = 0 for j in range(len(route) - 1): from_node = route[j] to_node = route[j + 1] travel_time += traveltime_matrix[from_node][to_node] total_time = travel_time + (len(route) - 2) * service_time if total_time <= max_traveltime: candidate_route.append({ 'route': route, 'travel_time': total_time }) print(f"生成 {len(candidate_route)} 条候选路径") return candidate_route def parameter_gen(candidate_routes, J, M, service_time, traveltime_matrix): """生成模型所需参数矩阵""" print("生成模型参数...") K = len(candidate_routes) tk = [route['travel_time'] for route in candidate_routes] phi_jk = [] for j in range(1, J + 1): row = [] for route in candidate_routes: # 检查站点j是否在当前路径中 route_nodes = route['route'] row.append(1 if j in route_nodes else 0) phi_jk.append(row) t_jk = [[M] * K for _ in range(J)] for j_idx in range(J): j = j_idx + 1 for k, route in enumerate(candidate_routes): current_route = route['route'] if j not in current_route: continue idx = current_route.index(j) arrival_time = 0.0 for seg in range(idx): from_node = current_route[seg] to_node = current_route[seg + 1] arrival_time += traveltime_matrix[from_node, to_node] arrival_time += service_time * (idx - 1) t_jk[j_idx][k] = arrival_time return phi_jk, tk, t_jk def route_rank2(Wk, tk, Zjk, phi_jk, btw, t, headway): """对路径进行优先级排序""" btw = np.maximum(btw, t) valid_indices = np.where(Wk >= 1)[0] if len(valid_indices) == 0: return np.empty((0, 4), dtype=int) route_numbers = (valid_indices + 1).astype(int) S = np.zeros((len(route_numbers), 4), dtype=int) S[:, 0] = route_numbers S[:, 1] = Wk[valid_indices] S[:, 2] = [tk[i] for i in valid_indices] # 使用列表推导式获取正确的行程时间 all_permutations = list(itertools.permutations(route_numbers)) min_ft = float('inf') best_sequence = None for seq in all_permutations: current_btw = btw.copy() total_wait = 0 for route in seq: vid = np.argmin(current_btw) start_time = current_btw[vid].item() # 获取当前路径的行程时间 route_idx = route - 1 route_travel_time = tk[route_idx] current_btw[vid] += route_travel_time total_wait += np.sum(Zjk[:, route_idx]) * (start_time - t) idle_time = np.sum(np.maximum(t + headway - current_btw, 0)) ft = total_wait + idle_time if ft < min_ft: min_ft = ft best_sequence = seq if best_sequence is not None: priority_dict = {route: idx + 1 for idx, route in enumerate(best_sequence)} S[:, 3] = np.vectorize(priority_dict.get)(S[:, 0]) return S[S[:, 3].argsort()] else: return S # ====================== # 修正后的车辆调度函数 # ====================== def vehicle_dispatch(btw, t_jk, S, U, Zjk, t, headway, total_trip, total_traveltime, total_waitingtime, totoal_ridingtime, btw_vehicle, chengke): """执行车辆调度""" U = U.copy() btw = btw.copy() for i in range(len(btw)): if btw[i] < t: vr = btw_vehicle[i] if len(vr['record']) == 0: vr['record'].append([t, t, 0, 0]) else: last_end = vr['record'][-1][1] vr['record'].append([last_end, t, 0, 0]) btw[i] = t for current_time in range(t, t + headway + 1): available = [i for i, bt in enumerate(btw) if current_time > bt] sorted_available = sorted(available, key=lambda x: btw[x]) if sorted_available and np.sum(U) > 0: for bus_idx in sorted_available: if np.sum(U) <= 0: break if S.size == 0: break route_info = S[0] total_trip[0] += 1 route_idx = route_info[0] - 1 # 路径索引 route_travel_time = route_info[2] # 路径行程时间 total_traveltime[0] += route_travel_time served_pax = Zjk[:, route_idx] totoal_ridingtime[0] += np.sum(served_pax * t_jk[:, route_idx]) waiting_time = btw[bus_idx] - t total_waitingtime[0] += np.sum(served_pax) * waiting_time # 更新乘客信息 for j in range(len(served_pax)): if served_pax[j] > 0: stop = j + 1 # 站点编号 pax_mask = (chengke[:, 2] == stop) & (chengke[:, 9] == 0) pax_candidates = np.where(pax_mask)[0] if len(pax_candidates) > 0: num_pax = min(served_pax[j], len(pax_candidates)) selected = pax_candidates[:num_pax] chengke[selected, 9] = 1 # 标记为已服务 chengke[selected, 4] = btw[bus_idx] # 上车时间 chengke[selected, 5] = chengke[selected, 4] - chengke[selected, 3] # 等待时间 chengke[selected, 6] = t_jk[j, route_idx] # 乘车时间 chengke[selected, 7] = route_info[0] # 路径ID chengke[selected, 8] = bus_idx + 1 # 车辆ID # 更新车辆记录 vr = btw_vehicle[bus_idx] if not vr['record']: vr['record'].append([ btw[bus_idx], btw[bus_idx] + route_travel_time, route_info[0], route_travel_time ]) else: last_end = vr['record'][-1][1] vr['record'].append([ last_end, last_end + route_travel_time, route_info[0], route_travel_time ]) # 更新车辆可用时间和需求 btw[bus_idx] += route_travel_time U = U - Zjk[:, route_idx] S = np.delete(S, 0, axis=0) # 移除已分配路径 if np.sum(U) <= 0: break # 处理未服务的乘客 if current_time == t + headway and np.sum(U) > 0: total_waitingtime[0] += np.sum(U) * headway return (btw, S, U, total_trip, total_traveltime, total_waitingtime, totoal_ridingtime, btw_vehicle, chengke) def lastmile_model(phi_jk, tk, t_jk, U, beta1, beta2, beta3, K, J, c): """构建并求解混合整数规划模型""" print("构建并求解MIP模型...") try: model = gp.Model("LastMile") model.Params.OutputFlag = 0 model.Params.TimeLimit = 30 # 设置30秒时间限制 wk = model.addVars(K, vtype=GRB.INTEGER, name="wk") g = model.addVar(vtype=GRB.INTEGER, name="g") zjk = model.addVars(J, K, vtype=GRB.INTEGER, name="zjk") obj = beta1 * g obj += beta2 * gp.quicksum(tk[k] * wk[k] for k in range(K)) obj += beta3 * gp.quicksum(t_jk[j][k] * zjk[j, k] for j in range(J) for k in range(K)) model.setObjective(obj, GRB.MINIMIZE) # 约束1: 所有需求必须被满足 for j in range(J): model.addConstr( gp.quicksum(zjk[j, k] * phi_jk[j][k] for k in range(K)) == U[j], name=f"constr1_j{j}" ) # 约束2: 车辆容量约束 for k in range(K): model.addConstr( gp.quicksum(zjk[j, k] * phi_jk[j][k] for j in range(J)) <= c * wk[k], name=f"constr2_k{k}" ) # 约束3: 总行程数 model.addConstr( gp.quicksum(wk[k] for k in range(K)) == g, name="constr3_total_trips" ) # 约束4: 非负约束 model.addConstr(g >= 1, name="constr4_g_min") for k in range(K): model.addConstr(wk[k] >= 0, name=f"constr4_wk{k}_min") for j in range(J): for k in range(K): model.addConstr(zjk[j, k] >= 0, name=f"constr4_zjk{j}{k}_min") model.optimize() if model.status == GRB.OPTIMAL: Zjk = np.zeros((J, K), dtype=int) Wk = np.zeros(K, dtype=int) for j in range(J): for k in range(K): Zjk[j][k] = round(zjk[j, k].X) for k in range(K): Wk[k] = round(wk[k].X) G = round(g.X) return Zjk, Wk, G else: # 如果未找到最优解,使用启发式方法生成可行解 print("未找到最优解,使用启发式方法生成可行解...") return heuristic_solution(phi_jk, U, c, K, J) except gp.GurobiError as e: print(f"Gurobi错误: {e}") return heuristic_solution(phi_jk, U, c, K, J) def heuristic_solution(phi_jk, U, c, K, J): """启发式方法生成可行解""" print("使用启发式方法生成可行解...") Zjk = np.zeros((J, K), dtype=int) Wk = np.zeros(K, dtype=int) # 简单启发式:为每个站点分配车辆 remaining_demand = U.copy() k = 0 while np.sum(remaining_demand) > 0 and k < K: # 尝试覆盖尽可能多的站点 coverage = np.zeros(J, dtype=int) for j in range(J): if phi_jk[j][k] == 1 and remaining_demand[j] > 0: coverage[j] = 1 if np.sum(coverage) > 0: # 分配车辆 Wk[k] = 1 # 分配乘客 for j in range(J): if coverage[j] == 1: assign = min(remaining_demand[j], c) Zjk[j][k] = assign remaining_demand[j] -= assign k += 1 else: k += 1 G = np.sum(Wk) return Zjk, Wk, G # ====================== # 数据加载与预处理 # ====================== print("加载乘客分布数据...") passenger_distributionUN = io.loadmat('F:/LM程序/passenger_distribution_16UN.mat')['passenger_distributionUN'] passenger_distributionSH = io.loadmat('F:/LM程序/passenger_distribution_16SH.mat')['passenger_distributionSH'] passenger_distributionEH = io.loadmat('F:/LM程序/passenger_distribution_16EH.mat')['passenger_distributionEH'] ui = passenger_distributionEH # 选择分布类型 chengke = [] # 初始化乘客列表 for i in range(1, batch_size + 1): passenger_count_in_batch = 1 for j in range(1, J + 1): passenger_num = ui[i - 1, j - 1].item() if passenger_num > 0: for _ in range(int(passenger_num)): arrival_time = t + (i - 1) * headway passenger_record = [ i, # 批次编号 passenger_count_in_batch, # 批次内序号 j, # 下车站点 arrival_time, # 到达时间 *[0] * 6 # 初始化后6个字段 ] chengke.append(passenger_record) passenger_count_in_batch += 1 # ====================== # 候选路径生成 # ====================== candidate_route = candidate_route_gen( max_stop=3, max_traveltime=14, traveltime_matrix=traveltime_matrix, service_time=service_time, J=J ) K = len(candidate_route) phi_jk, tk, t_jk = parameter_gen(candidate_route, J, M, service_time, traveltime_matrix) # ====================== # 初始化记录变量 # ====================== total_trip = [0] total_traveltime = [0] total_waitingtime = [0] totoal_ridingtime = [0] chengke = np.array(chengke) t_jk = np.array(t_jk) btw = np.array(btw) tk = np.array(tk) btw_record = np.zeros((len(btw), batch_size + 1)) s = [{'route': None} for _ in range(batch_size + 100)] # 确保s是字典列表 pax_asg = [{'record': None} for _ in range(batch_size + 100)] # 确保pax_asg是字典列表 # ====================== # 主仿真循环 # ====================== print("开始主仿真循环...") for i in range(batch_size): print(f"\n处理批次 {i + 1}/{batch_size}...") if i == 0: U = ui[0, :].copy() else: U += ui[i, :] print(f"当前需求: {U}") # 求解模型 Zjk, Wk, G = lastmile_model(phi_jk, tk, t_jk, U, beta1, beta2, beta3, K, J, c) print(f"模型求解完成: 总行程数={G}, 路径分配={Wk}") # 路径排序 S = route_rank2(Wk, tk, Zjk, phi_jk, btw, t, headway) print(f"路径排序完成: 分配{len(S)}条路径") Temp_S = S.copy() if S.size > 0 else np.array([]) # 车辆调度 (btw, S, U, total_trip, total_traveltime, total_waitingtime, totoal_ridingtime, btw_vehicle, chengke) = vehicle_dispatch( btw, t_jk, S, U, Zjk, t, headway, total_trip, total_traveltime, total_waitingtime, totoal_ridingtime, btw_vehicle, chengke ) # 保存结果 if Temp_S.size > 0: s[i]["route"] = Temp_S pax_asg[i]['record'] = Zjk else: s[i]["route"] = np.array([]) pax_asg[i]['record'] = np.zeros((J, K)) # 更新时间和车辆状态 t += headway btw_record[:, i + 1] = btw.squeeze() print(f"批次完成, 剩余需求: {np.sum(U)}") # 处理剩余需求 print("\n处理剩余需求...") plus_trip = batch_size while np.sum(U) > 0 and plus_trip < batch_size + 10: # 添加安全限制 plus_trip += 1 print(f"额外批次 {plus_trip - batch_size}, 剩余需求: {np.sum(U)}") # 求解模型 Zjk, Wk, G = lastmile_model(phi_jk, tk, t_jk, U, beta1, beta2, beta3, K, J, c) print(f"模型求解完成: 总行程数={G}, 路径分配={Wk}") # 路径排序 S = route_rank2(Wk, tk, Zjk, phi_jk, btw, t, headway) print(f"路径排序完成: 分配{len(S)}条路径") Temp_S = S.copy() if S.size > 0 else np.array([]) # 车辆调度 (btw, S, U, total_trip, total_traveltime, total_waitingtime, totoal_ridingtime, btw_vehicle, chengke) = vehicle_dispatch( btw, t_jk, S, U, Zjk, t, headway, total_trip, total_traveltime, total_waitingtime, totoal_ridingtime, btw_vehicle, chengke ) # 保存结果 if Temp_S.size > 0: s[plus_trip] = {"route": Temp_S} pax_asg[plus_trip] = {'record': Zjk} else: s[plus_trip] = {"route": np.array([])} pax_asg[plus_trip] = {'record': np.zeros((J, K))} # 更新时间 t += headway print(f"\n额外的运行周期:{plus_trip - batch_size}") total_pax = np.sum(ui) print(f'总的乘客数量为:{total_pax}') print(f'总的行程数量为:{total_trip[0]}') print(f'总的服务时间为:{total_traveltime[0]}') print(f'乘客总的乘车时间为:{totoal_ridingtime[0]}') print(f'乘客总的等待时间为:{total_waitingtime[0]}') if total_pax > 0: avg_riding = totoal_ridingtime[0] / total_pax avg_waiting = total_waitingtime[0] / total_pax print(f'乘客总的平均乘车时间为:{avg_riding:.2f}') print(f'乘客总的平均等待时间为:{avg_waiting:.2f}') else: print('乘客总数为零,无法计算平均值') # ====================== # 禁忌搜索优化器 # ====================== class TabuSearchOptimizer: def __init__(self, initial_solution, candidate_routes, travel_time_matrix, passenger_data, vehicle_capacity, headway, num_vehicles, max_iter=50, max_no_improve=10, tabu_tenure=7): """ 初始化禁忌搜索优化器 """ self.initial_solution = initial_solution self.candidate_routes = candidate_routes self.travel_time_matrix = travel_time_matrix self.passenger_data = passenger_data self.vehicle_capacity = vehicle_capacity self.headway = headway self.num_vehicles = num_vehicles self.max_iter = max_iter self.max_no_improve = max_no_improve self.tabu_tenure = tabu_tenure # 初始化数据结构 self.best_solution = self.initialize_solution(initial_solution) self.best_objective = self.evaluate_solution(self.best_solution) self.current_solution = copy.deepcopy(self.best_solution) self.current_objective = self.best_objective self.tabu_list = deque(maxlen=tabu_tenure) self.objective_history = [self.best_objective] self.improvement_history = [] def initialize_solution(self, solution): """确保解决方案使用列表而不是numpy数组""" initialized = [] for interval in solution: # 转换route为列表 if 'route' in interval and isinstance(interval['route'], np.ndarray): # 将numpy数组转换为列表 if interval['route'].size > 0: interval['route'] = interval['route'].tolist() else: interval['route'] = [] initialized.append(interval) return initialized def evaluate_solution(self, solution): """ 评估解决方案的目标函数值(总等待时间+乘车时间) """ total_waiting = 0 total_riding = 0 vehicle_available = np.zeros(self.num_vehicles) unserved_passengers = [] # 预处理乘客数据为结构化数组 passenger_array = np.array(self.passenger_data, dtype=object) # 处理每个时间间隔 for i, interval in enumerate(solution): interval_start = i * self.headway # 添加当前间隔到达的乘客 batch_mask = (passenger_array[:, 0] == i + 1) if np.any(batch_mask): batch_passengers = passenger_array[batch_mask].copy() batch_passengers = np.column_stack((batch_passengers, np.full(batch_passengers.shape[0], interval_start))) unserved_passengers.extend(batch_passengers.tolist()) # 处理当前间隔的路径 if 'route' in interval and interval['route']: routes = interval['route'] # 按优先级排序 sorted_routes = sorted(routes, key=lambda x: x[3] if len(x) > 3 else 0) for route in sorted_routes: route_idx = route[0] - 1 route_info = self.candidate_routes[route_idx] # 选择最早可用的车辆 vehicle_idx = np.argmin(vehicle_available) start_time = max(vehicle_available[vehicle_idx], interval_start) # 服务乘客 capacity_used = 0 passengers_to_remove = [] route_stops = set(route_info['route'][1:-1]) # 筛选符合条件的乘客 eligible_passengers = [] for idx, p in enumerate(unserved_passengers): if p[2] in route_stops: eligible_passengers.append((idx, p)) # 按到达时间排序 eligible_passengers.sort(key=lambda x: x[1][3]) # 服务乘客直到车辆满载 for idx, p in eligible_passengers: if capacity_used >= self.vehicle_capacity: break # 计算等待时间和乘车时间 waiting_time = start_time - p[3] from_node = 0 # 起点(地铁站) to_node = p[2] # 下车站点 riding_time = self.travel_time_matrix[from_node][to_node] total_waiting += waiting_time total_riding += riding_time capacity_used += 1 passengers_to_remove.append(idx) # 移除已服务乘客 for idx in sorted(passengers_to_remove, reverse=True): unserved_passengers.pop(idx) # 更新车辆可用时间 vehicle_available[vehicle_idx] = start_time + route_info['travel_time'] # 对未服务乘客的惩罚 last_time = len(solution) * self.headway for p in unserved_passengers: total_waiting += (last_time - p[3]) * 10 # 惩罚因子 return total_waiting + total_riding def generate_neighbors(self, solution, num_neighbors=10): """ 生成邻域解 """ neighbors = [] for _ in range(num_neighbors): neighbor = copy.deepcopy(solution) interval_idx = random.randint(0, len(solution) - 1) operation = random.choice(['replace', 'swap', 'add', 'remove']) # 替换操作 if operation == 'replace' and 'route' in neighbor[interval_idx] and neighbor[interval_idx]['route']: route_idx = random.randint(0, len(neighbor[interval_idx]['route']) - 1) new_route_idx = random.randint(0, len(self.candidate_routes) - 1) new_route = [ new_route_idx + 1, 1, self.candidate_routes[new_route_idx]['travel_time'], random.random() # 随机优先级 ] neighbor[interval_idx]['route'][route_idx] = new_route move = ('replace', interval_idx, route_idx, new_route_idx) neighbors.append((neighbor, move)) # 交换操作 elif operation == 'swap' and len(solution) > 1: interval_idx1 = random.randint(0, len(solution) - 1) interval_idx2 = random.randint(0, len(solution) - 1) if interval_idx1 != interval_idx2: if ('route' in neighbor[interval_idx1] and neighbor[interval_idx1]['route'] and 'route' in neighbor[interval_idx2] and neighbor[interval_idx2]['route']): route_idx1 = random.randint(0, len(neighbor[interval_idx1]['route']) - 1) route_idx2 = random.randint(0, len(neighbor[interval_idx2]['route']) - 1) # 交换路径 (neighbor[interval_idx1]['route'][route_idx1], neighbor[interval_idx2]['route'][route_idx2]) = ( neighbor[interval_idx2]['route'][route_idx2], neighbor[interval_idx1]['route'][route_idx1] ) move = ('swap', interval_idx1, interval_idx2, route_idx1, route_idx2) neighbors.append((neighbor, move)) # 添加操作 elif operation == 'add': new_route_idx = random.randint(0, len(self.candidate_routes) - 1) new_route = [ new_route_idx + 1, 1, self.candidate_routes[new_route_idx]['travel_time'], random.random() # 随机优先级 ] if 'route' not in neighbor[interval_idx]: neighbor[interval_idx]['route'] = [new_route] elif neighbor[interval_idx]['route'] is None: neighbor[interval_idx]['route'] = [new_route] else: neighbor[interval_idx]['route'].append(new_route) move = ('add', interval_idx, new_route_idx) neighbors.append((neighbor, move)) # 删除操作 elif operation == 'remove' and 'route' in neighbor[interval_idx] and neighbor[interval_idx]['route']: route_idx = random.randint(0, len(neighbor[interval_idx]['route']) - 1) removed_route = neighbor[interval_idx]['route'].pop(route_idx) move = ('remove', interval_idx, removed_route[0]) neighbors.append((neighbor, move)) return neighbors def is_tabu(self, move): """检查移动是否在禁忌表中""" for tabu_move in self.tabu_list: if move == tabu_move: return True return False def optimize(self): """执行禁忌搜索优化""" no_improve_count = 0 start_time = time.time() print(f"开始禁忌搜索优化,初始目标值: {self.best_objective:.2f}") print(f"{'迭代':<5} | {'当前目标值':<12} | {'历史最优':<12} | {'改进量':<10} | {'耗时(s)':<8}") print("-" * 60) for iteration in range(self.max_iter): iter_start = time.time() neighbors = self.generate_neighbors(self.current_solution, num_neighbors=20) best_neighbor = None best_neighbor_obj = float('inf') best_move = None # 评估邻域解 for neighbor, move in neighbors: if self.is_tabu(move): continue neighbor_obj = self.evaluate_solution(neighbor) if neighbor_obj < best_neighbor_obj: best_neighbor = neighbor best_neighbor_obj = neighbor_obj best_move = move # 更新当前解 if best_neighbor is not None: self.current_solution = best_neighbor self.current_objective = best_neighbor_obj self.tabu_list.append(best_move) # 更新历史最优解 if best_neighbor_obj < self.best_objective: improvement = self.best_objective - best_neighbor_obj self.improvement_history.append(improvement) self.best_solution = copy.deepcopy(best_neighbor) self.best_objective = best_neighbor_obj no_improve_count = 0 # 打印改进信息 iter_time = time.time() - iter_start print(f"{iteration + 1:<5} | {best_neighbor_obj:<12.2f} | {self.best_objective:<12.2f} | " f"+{improvement:<10.2f} | {iter_time:<8.2f}") else: no_improve_count += 1 else: no_improve_count += 1 self.objective_history.append(self.current_objective) # 提前终止条件 if no_improve_count >= self.max_no_improve: print(f"\n提前终止:连续 {no_improve_count} 次迭代无改进") break total_time = time.time() - start_time print("\n优化完成!") print(f"总迭代次数: {iteration + 1}") print(f"总耗时: {total_time:.2f}秒") print(f"初始目标值: {self.objective_history[0]:.2f}") print(f"最终目标值: {self.best_objective:.2f}") improvement_percent = ((self.objective_history[0] - self.best_objective) / self.objective_history[0]) * 100 print(f"改进幅度: {self.objective_history[0] - self.best_objective:.2f} ({improvement_percent:.2f}%)") return self.best_solution, self.best_objective def plot_optimization_progress(self): """绘制优化过程图""" plt.figure(figsize=(12, 6)) # 目标函数值变化 plt.subplot(1, 2, 1) plt.plot(self.objective_history, 'b-', linewidth=2) plt.xlabel('迭代次数') plt.ylabel('目标函数值') plt.title('目标函数优化过程') plt.grid(True) # 改进历史 if self.improvement_history: plt.subplot(1, 2, 2) plt.plot(self.improvement_history, 'go-', linewidth=2) plt.xlabel('改进次数') plt.ylabel('改进量') plt.title('每次改进的优化量') plt.grid(True) plt.tight_layout() plt.savefig('optimization_progress.png', dpi=300) plt.show() # ====================== # 执行禁忌搜索优化 # ====================== print("\n准备禁忌搜索优化...") # 准备初始解数据 initial_solution = [] for i in range(min(batch_size + plus_trip, len(s)): # 确保不越界 interval_data = { 'route': s[i].get('route', None), 'pax_asg': pax_asg[i].get('record', None) if i < len(pax_asg) else None } initial_solution.append(interval_data) # 创建禁忌搜索优化器 ts_optimizer = TabuSearchOptimizer( initial_solution=initial_solution, candidate_routes=candidate_route, travel_time_matrix=traveltime_matrix, passenger_data=chengke.tolist(), vehicle_capacity=c, headway=headway, num_vehicles=m, max_iter=50, max_no_improve=10, tabu_tenure=7 ) # 执行优化 best_solution, best_objective = ts_optimizer.optimize() ts_optimizer.plot_optimization_progress() # 输出最优解 print("\n最优解结构:") for i, interval in enumerate(best_solution): print(f"间隔 {i + 1}:") if 'route' in interval and interval['route']: for j, route in enumerate(interval['route']): print(f" 路径 {j + 1}: ID={route[0]}, 服务时间={route[2]}, 优先级={route[3]}") else: print(" 无路径") print("\n优化完成!") Traceback (most recent call last): File "F:\PycharmProjects\PythonProject1\taboo3.py", line 755, in <module> best_solution, best_objective = ts_optimizer.optimize() ^^^^^^^^^^^^^^^^^^^^^^^ File "F:\PycharmProjects\PythonProject1\taboo3.py", line 640, in optimize neighbors = self.generate_neighbors(self.current_solution, num_neighbors=20) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "F:\PycharmProjects\PythonProject1\taboo3.py", line 615, in generate_neighbors removed_route = neighbor[interval_idx]['route'].pop(route_idx) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ AttributeError: 'numpy.ndarray' object has no attribute 'pop'. Did you mean: 'ptp'? 进程已结束,退出代码为 1
06-11
<!-- -*- nxml-child-indent: 4; tab-width: 4; indent-tabs-mode: nil -*- --> <config> <!-- For more detailed documentation on typical configuration options please see: https://sdk.collaboraonline.com/docs/installation/Configuration.html --> <!-- Note: 'default' attributes are used to document a setting's default value as well as to use as fallback. --> <!-- Note: When adding a new entry, a default must be set in WSD in case the entry is missing upon deployment. --> <accessibility desc="Accessibility settings"> <enable type="bool" desc="Controls whether accessibility support should be enabled or not." default="false">false</enable> </accessibility> <allowed_languages desc="List of supported languages of Writing Aids (spell checker, grammar checker, thesaurus, hyphenation) on this instance. Allowing too many has negative effect on startup performance." default="de_DE en_GB en_US es_ES fr_FR it nl pt_BR pt_PT ru">de_DE en_GB en_US es_ES fr_FR it nl pt_BR pt_PT ru</allowed_languages> <!-- These are the settings of external (remote) spellchecker and grammar checker services. Currently LanguageTool and Duden Korrekturserver APIs are supported, you can set either of them. By default they are disabled. To turn the support on, please set "enabled" property to true. It works with self hosted or cloud services, free and premium as well. The "base_url" may be https://api.languagetoolplus.com/v2 if the cloud version of LanguageTool is used. Please note that your data in the document e.g. the text part of it will be sent to the cloud API. Please read the respective privacy policies, e.g. https://languagetool.org/legal/privacy. --> <languagetool desc="Remote API settings for spell and grammar checking"> <enabled desc="Enable Remote Spell and Grammar Checker" type="bool" default="false">false</enabled> <base_url desc="HTTP endpoint for the API server, without /check or /languages postfix at the end." type="string" default=""></base_url> <user_name desc="LanguageTool or Duden account username for premium usage." type="string" default=""></user_name> <api_key desc="API key provided by LanguageTool or Duden account for premium usage." type="string" default=""></api_key> <ssl_verification desc="Enable or disable SSL verification. You may have to disable it in test environments with self-signed certificates." type="string" default="true">true</ssl_verification> <rest_protocol desc="REST API protocol. For LanguageTool leave it blank, for Duden Korrekturserver use the string 'duden'." type="string" default=""></rest_protocol> </languagetool> <deepl desc="DeepL API settings for translation service"> <enabled desc="If true, shows translate option as a menu entry in the compact view and as an icon in the tabbed view." type="bool" default="false">false</enabled> <api_url desc="URL for the API" type="string" default=""></api_url> <auth_key desc="Auth Key generated by your account" type="string" default=""></auth_key> </deepl> <sys_template_path desc="Path to a template tree with shared libraries etc to be used as source for chroot jails for child processes." type="path" relative="true" default="systemplate"></sys_template_path> <child_root_path desc="Path to the directory under which the chroot jails for the child processes will be created. Should be on the same file system as systemplate and lotemplate. Must be an empty directory." type="path" relative="true" default="jails"></child_root_path> <mount_jail_tree desc="Controls whether the systemplate and lotemplate contents are mounted or not, which is much faster than the default of linking/copying each file." type="bool" default="true">true</mount_jail_tree> <server_name desc="External hostname:port of the server running coolwsd. If empty, it's derived from the request (please set it if this doesn't work). May be specified when behind a reverse-proxy or when the hostname is not reachable directly." type="string" default=""></server_name> <file_server_root_path desc="Path to the directory that should be considered root for the file server. This should be the directory containing cool." type="path" relative="true" default="browser/../"></file_server_root_path> <hexify_embedded_urls desc="Enable to protect encoded URLs from getting decoded by intermediate hops. Particularly useful on Azure deployments" type="bool" default="false">false</hexify_embedded_urls> <experimental_features desc="Enable/Disable experimental features" type="bool" default="true">true</experimental_features> <memproportion desc="The maximum percentage of available memory consumed by all of the Collabora Online Development Edition processes, after which we start cleaning up idle documents. If cgroup memory limits are set, this is the maximum percentage of that limit to consume." type="double" default="80.0"></memproportion> <num_prespawn_children desc="Number of child processes to keep started in advance and waiting for new clients." type="uint" default="4">4</num_prespawn_children> <fetch_update_check desc="Every number of hours will fetch latest version data. Defaults to 10 hours." type="uint" default="10">10</fetch_update_check> <allow_update_popup desc="Allows notification about an update in the editor" type="bool" default="true">true</allow_update_popup> <per_document desc="Document-specific settings, including LO Core settings."> <max_concurrency desc="The maximum number of threads to use while processing a document." type="uint" default="4">4</max_concurrency> <batch_priority desc="A (lower) priority for use by batch eg. convert-to processes to avoid starving interactive ones" type="uint" default="5">5</batch_priority> <bgsave_priority desc="A (lower) priority for use by background save processes to free time for interactive ones" type="uint" default="5">5</bgsave_priority> <bgsave_timeout_secs desc="The default maximum number of seconds to wait for the background save processes to finish before giving up and reverting to synchronous saving" type="uint" default="120">120</bgsave_timeout_secs> <redlining_as_comments desc="If true show red-lines as comments" type="bool" default="false">false</redlining_as_comments> <pdf_resolution_dpi desc="The resolution, in DPI, used to render PDF documents as image. Memory consumption grows proportionally. Must be a positive value less than 385. Defaults to 96." type="uint" default="96">96</pdf_resolution_dpi> <idle_timeout_secs desc="The maximum number of seconds before unloading an idle document. Defaults to 1 hour." type="uint" default="3600">3600</idle_timeout_secs> <idlesave_duration_secs desc="The number of idle seconds after which document, if modified, should be saved. Disabled when 0. Defaults to 30 seconds." type="uint" default="30">30</idlesave_duration_secs> <autosave_duration_secs desc="The number of seconds after which document, if modified, should be saved. Disabled when 0. Defaults to 5 minutes." type="uint" default="300">300</autosave_duration_secs> <background_autosave desc="Allow auto-saves to occur in a forked background process where possible." type="bool" default="true">true</background_autosave> <background_manualsave desc="Allow manual save to occur in a forked background process where possible" type="bool" default="true">true</background_manualsave> <always_save_on_exit desc="On exiting the last editor, always perform a save and upload if the document had been modified. This is to allow the storage to store the document, if it had skipped doing so, previously, as an optimization." type="bool" default="false">false</always_save_on_exit> <limit_virt_mem_mb desc="The maximum virtual memory allowed to each document process. 0 for unlimited." type="uint">0</limit_virt_mem_mb> <limit_stack_mem_kb desc="The maximum stack size allowed to each document process. 0 for unlimited." type="uint">8000</limit_stack_mem_kb> <limit_file_size_mb desc="The maximum file size allowed to each document process to write. 0 for unlimited." type="uint">0</limit_file_size_mb> <limit_num_open_files desc="The maximum number of files allowed to each document process to open. 0 for unlimited." type="uint">0</limit_num_open_files> <limit_load_secs desc="Maximum number of seconds to wait for a document load to succeed. 0 for unlimited." type="uint" default="100">100</limit_load_secs> <limit_store_failures desc="Maximum number of consecutive save-and-upload to storage failures when unloading the document. 0 for unlimited (not recommended)." type="uint" default="5">5</limit_store_failures> <limit_convert_secs desc="Maximum number of seconds to wait for a document conversion to succeed. 0 for unlimited." type="uint" default="100">100</limit_convert_secs> <min_time_between_saves_ms desc="Minimum number of milliseconds between saving the document on disk." type="uint" default="500">500</min_time_between_saves_ms> <min_time_between_uploads_ms desc="Minimum number of milliseconds between uploading the document to storage." type="uint" default="5000">5000</min_time_between_uploads_ms> <cleanup desc="Checks for resource consuming (bad) documents and kills associated kit process. A document is considered resource consuming (bad) if is in idle state for idle_time_secs period and memory usage passed limit_dirty_mem_mb or CPU usage passed limit_cpu_per" enable="true"> <cleanup_interval_ms desc="Interval between two checks" type="uint" default="10000">10000</cleanup_interval_ms> <bad_behavior_period_secs desc="Minimum time period for a document to be in bad state before associated kit process is killed. If in this period the condition for bad document is not met once then this period is reset" type="uint" default="60">60</bad_behavior_period_secs> <idle_time_secs desc="Minimum idle time for a document to be candidate for bad state" type="uint" default="300">300</idle_time_secs> <limit_dirty_mem_mb desc="Minimum memory usage for a document to be candidate for bad state" type="uint" default="3072">3072</limit_dirty_mem_mb> <limit_cpu_per desc="Minimum CPU usage for a document to be candidate for bad state" type="uint" default="85">85</limit_cpu_per> <lost_kit_grace_period_secs desc="The minimum grace period for a lost kit process (not referenced by coolwsd) to resolve its lost status before it is terminated. To disable the cleanup of lost kits use value 0" default="120">120</lost_kit_grace_period_secs> </cleanup> </per_document> <per_view desc="View-specific settings."> <out_of_focus_timeout_secs desc="The maximum number of seconds before dimming and stopping updates when the browser tab is no longer in focus. Defaults to 300 seconds." type="uint" default="300">300</out_of_focus_timeout_secs> <idle_timeout_secs desc="The maximum number of seconds before dimming and stopping updates when the user is no longer active (even if the browser is in focus). Defaults to 15 minutes." type="uint" default="900">900</idle_timeout_secs> <custom_os_info desc="Custom string shown as OS version in About dialog, get from system if empty." type="string" default=""></custom_os_info> <min_saved_message_timeout_secs type="uint" desc="The minimum number of seconds before the last modified message is being displayed." default="6">6</min_saved_message_timeout_secs> </per_view> <ver_suffix desc="Appended to etags to allow easy refresh of changed files during development" type="string" default=""></ver_suffix> <logging> <color type="bool">true</color> <!-- Note to developers: When you do "make run", the logging.level will be set on the coolwsd command line, so if you want to change it for your testing, do it in Makefile.am, not here. --> <level type="string" desc="Can be 0-8 (with the lowest numbers being the least verbose), or none (turns off logging), fatal, critical, error, warning, notice, information, debug, trace" default="warning">warning</level> <level_startup type="string" desc="As for level - but for the initial startup phase which is most problematic, logging reverts to level configured above when startup is complete" default="trace">trace</level_startup> <disabled_areas type="string" desc="High verbosity logging ie. info to trace are disable-able, comma separated: Generic, Pixel, Socket, WebSocket, Http, WebServer, Storage, WOPI, Admin, Javascript" default="Socket,WebSocket,Admin,Pixel">Socket,WebSocket,Admin,Pixel</disabled_areas> <most_verbose_level_settable_from_client type="string" desc="A loggingleveloverride message from the client can not set a more verbose log level than this" default="notice">notice</most_verbose_level_settable_from_client> <least_verbose_level_settable_from_client type="string" desc="A loggingleveloverride message from a client can not set a less verbose log level than this" default="fatal">fatal</least_verbose_level_settable_from_client> <protocol type="bool" desc="Enable minimal client-site JS protocol logging from the start">false</protocol> <!-- lokit_sal_log example: Log WebDAV-related messages, that is interesting for debugging Insert - Image operation: "+TIMESTAMP+INFO.ucb.ucp.webdav+WARN.ucb.ucp.webdav" See also: https://docs.libreoffice.org/sal/html/sal_log.html --> <lokit_sal_log type="string" desc="Fine tune log messages from LOKit. Default is to suppress log messages from LOKit." default="-INFO-WARN">-INFO-WARN</lokit_sal_log> <file enable="false"> <!-- If you use other path than /var/log and you run coolwsd from systemd, make sure that you enable that path in coolwsd.service (ReadWritePaths). Also the log file path must be writable by the 'cool' user. --> <property name="path" desc="Log file path.">/var/log/coolwsd.log</property> <property name="rotation" desc="Log file rotation strategy. See Poco FileChannel.">never</property> <property name="archive" desc="Append either timestamp or number to the archived log filename.">timestamp</property> <property name="compress" desc="Enable/disable log file compression.">true</property> <property name="purgeAge" desc="The maximum age of log files to preserve. See Poco FileChannel.">10 days</property> <property name="purgeCount" desc="The maximum number of log archives to preserve. Use 'none' to disable purging. See Poco FileChannel.">10</property> <property name="rotateOnOpen" desc="Enable/disable log file rotation on opening.">true</property> <property name="flush" desc="Enable/disable flushing after logging each line. May harm performance. Note that without flushing after each line, the log lines from the different processes will not appear in chronological order.">false</property> </file> <anonymize> <anonymize_user_data type="bool" desc="Enable to anonymize/obfuscate of user-data in logs. If default is true, it was forced at compile-time and cannot be disabled." default="false">false</anonymize_user_data> <anonymization_salt type="uint" desc="The salt used to anonymize/obfuscate user-data in logs. Use a secret 64-bit random number." default="82589933">82589933</anonymization_salt> </anonymize> <docstats type="bool" desc="Enable to see document handling information in logs." default="false">false</docstats> <userstats desc="Enable user stats. i.e: logs the details of a file and user" type="bool" default="false">false</userstats> <disable_server_audit type="bool" desc="Disabled server audit dialog and notification. Admin will no longer see warnings in the application user interface. This doesn't affect log file." default="false">false</disable_server_audit> </logging> <canvas_slideshow_enabled type="bool" desc="If true, WebGl presentation rendered on the client side is enabled, otherwise interactive SVG is used." default="true">true</canvas_slideshow_enabled> <logging_ui_cmd> <merge type="bool" desc="If true, repeated commands after each other will be merged into 1 line. If false, every command will be 1 new line." default="true">true</merge> <merge_display_end_time type="bool" desc="If true, the duration of the merged command will also be logged." default="false">true</merge_display_end_time> <file enable="false"> <!-- If you use other path than /var/log and you run coolwsd from systemd, make sure that you enable that path in coolwsd.service (ReadWritePaths). Also the log file path must be writable by the 'cool' user. --> <property name="path" desc="Log file path.">/var/log/coolwsd-ui-cmd.log</property> <property name="purgeCount" desc="The maximum number of log archives to preserve. Use 'none' to disable purging. See Poco FileChannel.">10</property> <property name="rotateOnOpen" desc="Enable/disable log file rotation on opening.">true</property> <property name="flush" desc="Enable/disable flushing after logging each line. May harm performance. Note that without flushing after each line, the log lines from the different processes will not appear in chronological order.">false</property> </file> </logging_ui_cmd> <!-- Note to developers: When you do "make run", the trace_event[@enable] will be set on the coolwsd command line, so if you want to change it for your testing, do it in Makefile.am, not here. --> <trace_event desc="The possibility to turn on generation of a Chrome Trace Event file" enable="false"> <path desc="Output path for the Trace Event file, to which they will be written if turned on at run-time" type="string" default="/var/log/coolwsd.trace.json">/var/log/coolwsd.trace.json</path> </trace_event> <browser_logging desc="Logging in the browser console" default="false">false</browser_logging> <trace desc="Dump commands and notifications for replay. When 'snapshot' is true, the source file is copied to the path first." enable="false"> <path desc="Output path to hold trace file and docs. Use '%' for timestamp to avoid overwriting. For example: /some/path/to/cooltrace-%.gz" compress="true" snapshot="false"></path> <filter> <message desc="Regex pattern of messages to exclude"></message> </filter> <outgoing> <record desc="Whether or not to record outgoing messages" default="false">false</record> </outgoing> </trace> <net desc="Network settings"> <!-- On systems where localhost resolves to IPv6 [::1] address first, when net.proto is all and net.listen is loopback, coolwsd unexpectedly listens on [::1] only. You need to change net.proto to IPv4, if you want to use 127.0.0.1. --> <proto type="string" default="all" desc="Protocol to use IPv4, IPv6 or all for both">all</proto> <listen type="string" default="any" desc="Listen address that coolwsd binds to. Can be 'any' or 'loopback'.">any</listen> <!-- this allows you to shift all of our URLs into a sub-path from https://my.com/browser/a123... to https://my.com/my/sub/path/browser/a123... --> <service_root type="path" default="" desc="Prefix the base URL for all the pages, websockets, etc. with this path. This includes the discovery URL."></service_root> <post_allow desc="Allow/deny client IP address for POST(REST)." allow="true"> <host desc="The IPv4 private 192.168 block as plain IPv4 dotted decimal addresses.">192\.168\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:192\.168\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="The IPv4 loopback (localhost) address.">127\.0\.0\.1</host> <host desc="Ditto, but as IPv4-mapped IPv6 address">::ffff:127\.0\.0\.1</host> <host desc="The IPv6 loopback (localhost) address.">::1</host> <host desc="The IPv4 private 172.16.0.0/12 subnet part 1.">172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="The IPv4 private 172.16.0.0/12 subnet part 2.">172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="The IPv4 private 172.16.0.0/12 subnet part 3.">172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="The IPv4 private 10.0.0.0/8 subnet (Podman).">10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}</host> </post_allow> <lok_allow desc="Allowed hosts as an external data source inside edited files. All allowed post_allow.host and storage.wopi entries are also considered to be allowed as a data source. Used for example in: PostMessage Action_InsertGraphic, =WEBSERVICE() function, external reference in the cell."> <host desc="The IPv4 private 192.168 block as plain IPv4 dotted decimal addresses.">192\.168\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:192\.168\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="The IPv4 loopback (localhost) address.">127\.0\.0\.1</host> <host desc="Ditto, but as IPv4-mapped IPv6 address">::ffff:127\.0\.0\.1</host> <host desc="The IPv6 loopback (localhost) address.">::1</host> <host desc="The IPv4 private 172.16.0.0/12 subnet part 1.">172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="The IPv4 private 172.16.0.0/12 subnet part 2.">172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="The IPv4 private 172.16.0.0/12 subnet part 3.">172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="The IPv4 private 10.0.0.0/8 subnet (Podman).">10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}</host> <host desc="Localhost access by name">localhost</host> </lok_allow> <content_security_policy desc="Customize the CSP header by specifying one or more policy-directive, separated by semicolons. See w3.org/TR/CSP2"> </content_security_policy> <frame_ancestors> http://192.168.2.107:8881 http://10.1.200.64:* http://192.168.11.33:* </frame_ancestors> <connection_timeout_secs desc="Specifies the connection, send, recv timeout in seconds for connections initiated by coolwsd (such as WOPI connections)." type="int" default="30">30</connection_timeout_secs> <!-- this setting radically changes how online works, it should not be used in a production environment --> <proxy_prefix type="bool" default="false" desc="Enable a ProxyPrefix to be passed-in through which to redirect requests">false</proxy_prefix> </net> <ssl desc="SSL settings"> <!-- switches from https:// + wss:// to http:// + ws:// --> <enable type="bool" desc="Controls whether SSL encryption between coolwsd and the network is enabled (do not disable for production deployment). If default is false, must first be compiled with SSL support to enable." default="true">true</enable> <!-- SSL off-load can be done in a proxy, if so disable SSL, and enable termination below in production --> <termination desc="Connection via proxy where coolwsd acts as working via https, but actually uses http." type="bool" default="false">false</termination> <cert_file_path desc="Path to the cert file" type="path" relative="false">/etc/coolwsd/cert.pem</cert_file_path> <key_file_path desc="Path to the key file" type="path" relative="false">/etc/coolwsd/key.pem</key_file_path> <ca_file_path desc="Path to the ca file" type="path" relative="false">/etc/coolwsd/ca-chain.cert.pem</ca_file_path> <ssl_verification desc="Enable or disable SSL verification of hosts remote to coolwsd. If true SSL verification will be strict, otherwise certs of hosts will not be verified. You may have to disable it in test environments with self-signed certificates." type="string" default="false">false</ssl_verification> <cipher_list desc="List of OpenSSL ciphers to accept" type="string" default="ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"></cipher_list> <hpkp desc="Enable HTTP Public key pinning" enable="false" report_only="false"> <max_age desc="HPKP's max-age directive - time in seconds browser should remember the pins" enable="true" type="uint" default="1000">1000</max_age> <report_uri desc="HPKP's report-uri directive - pin validation failure are reported at this URL" enable="false" type="string"></report_uri> <pins desc="Base64 encoded SPKI fingerprints of keys to be pinned"> <pin></pin> </pins> </hpkp> <sts desc="Strict-Transport-Security settings, per rfc6797. Subdomains are always included."> <enabled desc="Whether or not Strict-Transport-Security is enabled. Enable only when ready for production. Cannot be disabled without resetting the browsers." type="bool" default="false">false</enabled> <max_age desc="Strict-Transport-Security max-age directive, in seconds. 0 is allowed; please see rfc6797 for details. Defaults to 1 year." type="int" default="31536000">31536000</max_age> </sts> </ssl> <security desc="Altering these defaults potentially opens you to significant risk"> <seccomp desc="Should failure to enable seccomp system call filtering be a fatal error." type="bool" default="true">true</seccomp> <!-- deprecated: If capabilities is 'false', coolwsd will assume mount_namespaces of 'true' to achieve this goal, only avoiding chroot for process isolation if linux namespaces are unavailable --> <capabilities desc="Should we require capabilities to isolate processes into chroot jails" type="bool" default="true">true</capabilities> <jwt_expiry_secs desc="Time in seconds before the Admin Console's JWT token expires" type="int" default="1800">1800</jwt_expiry_secs> <enable_macros_execution desc="Specifies whether the macro execution is enabled in general. This will enable Basic and Python scripts to execute both installed and from documents. If it is set to false, the macro_security_level is ignored. If it is set to true, the mentioned entry specified the level of macro security." type="bool" default="false">false</enable_macros_execution> <macro_security_level desc="Level of Macro security. 1 (Medium) Confirmation required before executing macros from untrusted sources. 0 (Low, not recommended) All macros will be executed without confirmation." type="int" default="1">1</macro_security_level> <enable_websocket_urp desc="Should we enable URP (UNO remote protocol) communication over the websocket. This allows full control of the Kit child server to anyone with access to the websocket including executing macros without confirmation or running arbitrary shell commands in the jail." type="bool" default="false">false</enable_websocket_urp> <enable_metrics_unauthenticated desc="When enabled, the /cool/getMetrics endpoint will not require authentication." type="bool" default="false">false</enable_metrics_unauthenticated> <server_signature desc="Whether to send server signature in HTTP response headers" type="bool" default="false">false</server_signature> </security> <certificates> <database_path type="string" desc="Path to the NSS certificates that are available to all users" default=""></database_path> </certificates> <watermark> <opacity desc="Opacity of on-screen watermark from 0.0 to 1.0" type="double" default="0.2">0.2</opacity> <text desc="Watermark text to be displayed on the document if entered" type="string"></text> </watermark> <user_interface> <mode type="string" desc="Controls the user interface style. The 'default' means: Take the value from ui_defaults, or decide for one of compact or tabbed (default|compact|tabbed)" default="default">default</mode> <use_integration_theme desc="Use theme from the integrator" type="bool" default="true">true</use_integration_theme> <statusbar_save_indicator desc="Show saving status indicator in the statusbar" type="bool" default="true">true</statusbar_save_indicator> </user_interface> <storage desc="Backend storage"> <filesystem allow="false" /> <wopi desc="Allow/deny wopi storage." allow="true"> <max_file_size desc="Maximum document size in bytes to load. 0 for unlimited." type="uint">0</max_file_size> <locking desc="Locking settings"> <refresh desc="How frequently we should re-acquire a lock with the storage server, in seconds (default 15 mins) or 0 for no refresh" type="int" default="900">900</refresh> </locking> <alias_groups desc="default mode is 'first' it allows only the first host when groups are not defined. set mode to 'groups' and define group to allow multiple host and its aliases" mode="groups"> <group>192.168.2.107:8880,localhost:3000,10.1.200.64</group> </alias_groups> <is_legacy_server desc="Set to true for legacy server that need deprecated headers." type="bool" default="false">false</is_legacy_server> </wopi> <ssl desc="SSL settings"> <as_scheme type="bool" default="true" desc="When set we exclusively use the WOPI URI's scheme to enable SSL for storage">true</as_scheme> <enable type="bool" desc="If as_scheme is false or not set, this can be set to force SSL encryption between storage and coolwsd. When empty this defaults to following the ssl.enable setting"></enable> <cert_file_path desc="Path to the cert file. When empty this defaults to following the ssl.cert_file_path setting" type="path" relative="false"></cert_file_path> <key_file_path desc="Path to the key file. When empty this defaults to following the ssl.key_file_path setting" type="path" relative="false"></key_file_path> <ca_file_path desc="Path to the ca file. When empty this defaults to following the ssl.ca_file_path setting" type="path" relative="false"></ca_file_path> <cipher_list desc="List of OpenSSL ciphers to accept. If empty the defaults are used. These can be overridden only if absolutely needed."></cipher_list> </ssl> </storage> <admin_console desc="Web admin console settings."> <enable desc="Enable the admin console functionality" type="bool" default="true">true</enable> <enable_pam desc="Enable admin user authentication with PAM" type="bool" default="false">false</enable_pam> <username desc="The username of the admin console. Ignored if PAM is enabled."></username> <password desc="The password of the admin console. Deprecated on most platforms. Instead, use PAM or coolconfig to set up a secure password."></password> <logging desc="Log admin activities irrespective of logging.level"> <admin_login desc="log when an admin logged into the console" type="bool" default="true">true</admin_login> <metrics_fetch desc="log when metrics endpoint is accessed and metrics endpoint authentication is enabled" type="bool" default="true">true</metrics_fetch> <monitor_connect desc="log when external monitor gets connected" type="bool" default="true">true</monitor_connect> <admin_action desc="log when admin does some action for example killing a process" type="bool" default="true">true</admin_action> </logging> </admin_console> <monitors desc="Addresses of servers we connect to on start for monitoring"> <!-- <monitor desc="Address of the monitor and interval after which it should try reconnecting after disconnect" retryInterval="20">wss://foobar:234/ws</monitor> --> </monitors> <quarantine_files desc="Files are stored here to be examined later in cases of crashes or similar situation." default="false" enable="false"> <limit_dir_size_mb desc="Maximum directory size, in MBs. On exceeding the specified limit, older files will be deleted." default="250" type="uint">250</limit_dir_size_mb> <max_versions_to_maintain desc="How many versions of the same file to keep." default="5" type="uint">5</max_versions_to_maintain> <path desc="Absolute path of the directory under which quarantined files will be stored. Do not use a relative path." type="path" relative="false"></path> <expiry_min desc="Time in mins after quarantined files will be deleted." type="int" default="3000">3000</expiry_min> </quarantine_files> <cache_files desc="Files are cached here to speed up config support."> <path desc="Absolute path of the directory under which cached files will be stored. Do not use a relative path." type="path" relative="false"></path> <expiry_min desc="Time in mins after disuse at which cache files will be deleted." type="int" default="3000">1000</expiry_min> </cache_files> <extra_export_formats desc="Enable various extra export formats for additional compatibility. Note that disabling options here *only* disables them visually: these are all 'safe' to export, it might just be undesirable to show them, so you can't disable exporting these server-side"> <impress_swf desc="Enable exporting Adobe flash .swf files from presentations" type="bool" default="false">false</impress_swf> <impress_bmp desc="Enable exporting .bmp bitmap files from presentation slides" type="bool" default="false">false</impress_bmp> <impress_gif desc="Enable exporting .gif image files from presentation slides" type="bool" default="false">false</impress_gif> <impress_png desc="Enable exporting .png image files from presentation slides" type="bool" default="false">false</impress_png> <impress_svg desc="Enable exporting interactive .svg image files from presentations" type="bool" default="false">false</impress_svg> <impress_tiff desc="Enable exporting .tiff image files from presentation slides" type="bool" default="false">false</impress_tiff> </extra_export_formats> <serverside_config> <idle_timeout_secs desc="The maximum number of seconds before unloading an idle sub forkit. Defaults to 1 hour." type="uint" default="3600">3600</idle_timeout_secs> </serverside_config> <remote_config> <remote_url desc="remote server to which you will send request to get remote config in response" type="string" default=""></remote_url> </remote_config> <stop_on_config_change desc="Stop coolwsd whenever config files change." type="bool" default="false">false</stop_on_config_change> <remote_font_config> <url desc="URL of optional JSON file that lists fonts to be included in Online" type="string" default=""></url> </remote_font_config> <fonts_missing> <handling desc="How to handle fonts missing in a document: 'report', 'log', 'both', or 'ignore'" type="string" default="log">log</handling> </fonts_missing> <indirection_endpoint> <url desc="URL endpoint to server which servers routeToken in json format" type="string" default=""></url> <migration_timeout_secs desc="The maximum number of seconds waiting for shutdown migration message from indirection server before unloading an document. Defaults to 180 second." type="uint" default="180">180</migration_timeout_secs> <geolocation_setup> <enable desc="Enable geolocation_setup when using indirection server with geolocation configuration" type="bool" default="false">false</enable> <timezone desc="IANA timezone of server. For example: Europe/Berlin" type="string"></timezone> <allowed_websocket_origins desc="Origin header to get accepted during websocket upgrade"> <!-- <origin></origin> --> </allowed_websocket_origins> </geolocation_setup> <server_name desc="server name to show in cluster overview admin panel" type="string" default=""></server_name> </indirection_endpoint> <home_mode> <enable desc="Home users can enable this setting, which in turn disables welcome screen and user feedback popups, but also limits concurrent open connections to 20 and concurrent open documents to 10. The default means that number of concurrent open connections and concurrent open documents are unlimited, but welcome screen and user feedback cannot be switched off." type="bool" default="false">false</enable> </home_mode> <zotero desc="Zotero plugin configuration. For more details about Zotero visit https://www.zotero.org/"> <enable desc="Enable Zotero plugin." type="bool" default="true">true</enable> </zotero> <help_url desc="The Help root URL, or empty for no help (hides the Help buttons)" type="string" default="https://help.collaboraoffice.com/help.html?">https://help.collaboraoffice.com/help.html?</help_url> <overwrite_mode> <enable desc="Enable overwrite mode (user can use insert key)" type="bool" default="false">false</enable> </overwrite_mode> <wasm desc="WASM-specific settings"> <enable desc="Enable WASM support" type="bool" default="false">false</enable> <force desc="When enabled, all requests are redirected to WASM." type="bool" default="false">false</force> </wasm> <document_signing desc="Document signing settings"> <enable desc="Enable document signing" type="bool" default="true">true</enable> </document_signing> </config> 这是我的coolwsd.xml 这样可以吗
11-07
内容概要:本文介绍了一个基于多传感器融合的定位系统设计方案,采用GPS、里程计和电子罗盘作为定位传感器,利用扩展卡尔曼滤波(EKF)算法对多源传感器数据进行融合处理,最终输出目标的滤波后位置信息,并提供了完整的Matlab代码实现。该方法有效提升了定位精度与稳定性,尤其适用于存在单一传感器误差或信号丢失的复杂环境,如自动驾驶、移动采用GPS、里程计和电子罗盘作为定位传感器,EKF作为多传感器的融合算法,最终输出目标的滤波位置(Matlab代码实现)机器人导航等领域。文中详细阐述了各传感器的数据建模方式、状态转移与观测方程构建,以及EKF算法的具体实现步骤,具有较强的工程实践价值。; 适合人群:具备一定Matlab编程基础,熟悉传感器原理和滤波算法的高校研究生、科研人员及从事自动驾驶、机器人导航等相关领域的工程技术人员。; 使用场景及目标:①学习和掌握多传感器融合的基本理论与实现方法;②应用于移动机器人、无人车、无人机等系统的高精度定位与导航开发;③作为EKF算法在实际工程中应用的教学案例或项目参考; 阅读建议:建议读者结合Matlab代码逐行理解算法实现过程,重点关注状态预测与观测更新模块的设计逻辑,可尝试引入真实传感器数据或仿真噪声环境以验证算法鲁棒性,并进一步拓展至UKF、PF等更高级滤波算法的研究与对比。
内容概要:文章围绕智能汽车新一代传感器的发展趋势,重点阐述了BEV(鸟瞰图视角)端到端感知融合架构如何成为智能驾驶感知系统的新范式。传统后融合与前融合方案因信息丢失或算力需求过高难以满足高阶智驾需求,而基于Transformer的BEV融合方案通过统一坐标系下的多源传感器特征融合,在保证感知精度的同时兼顾算力可行性,显著提升复杂场景下的鲁棒性与系统可靠性。此外,文章指出BEV模型落地面临大算力依赖与高数据成本的挑战,提出“数据采集-模型训练-算法迭代-数据反哺”的高效数据闭环体系,通过自动化标注与长尾数据反馈实现算法持续进化,降低对人工标注的依赖,提升数据利用效率。典型企业案例进一步验证了该路径的技术可行性与经济价值。; 适合人群:从事汽车电子、智能驾驶感知算法研发的工程师,以及关注自动驾驶技术趋势的产品经理和技术管理者;具备一定自动驾驶基础知识,希望深入了解BEV架构与数据闭环机制的专业人士。; 使用场景及目标:①理解BEV+Transformer为何成为当前感知融合的主流技术路线;②掌握数据闭环在BEV模型迭代中的关键作用及其工程实现逻辑;③为智能驾驶系统架构设计、传感器选型与算法优化提供决策参考; 阅读建议:本文侧重技术趋势分析与系统级思考,建议结合实际项目背景阅读,重点关注BEV融合逻辑与数据闭环构建方法,并可延伸研究相关企业在舱泊一体等场景的应用实践。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值