dist_auto_connect的作用

Erlang节点连接配置
本文介绍了Erlang中节点连接的自动建立机制,并探讨了如何通过设置内核参数dist_auto_connect为false来禁用这一功能,从而实现显式连接节点。

转载:http://mryufeng.iteye.com/blog/343202


Normally, connections are established automatically when another node is referenced(也就是说给节点发信息 ping rpc等等). This functionality can be disabled by setting the Kernel configuration parameter dist_auto_connect to false, see kernel(6). In this case, connections must be established explicitly by calling net_kernel:connect_node/1.

这个参数保证了只是你想explictly连接的节点才在你的nodes()列表里面.因为不需要的节点需要维护 发tick包什么的 浪费资源!

节点间如何established automatically请参考另外一篇 http://mryufeng.iteye.com/admin/blogs/120666

bool rrt_sharp(const Eigen::Vector3d &s, const Eigen::Vector3d &g) { ros::Time rrt_start_time = ros::Time::now(); bool goal_found = false; double c_square = (g - s).squaredNorm() / 4.0; /* kd tree init */ kdtree *kd_tree = kd_create(3); // Add start and goal nodes to kd tree kd_insert3(kd_tree, start_node_->x[0], start_node_->x[1], start_node_->x[2], start_node_); /* main loop */ int idx = 0; for (idx = 0; (ros::Time::now() - rrt_start_time).toSec() < search_time_ && valid_tree_node_nums_ < max_tree_node_nums_; ++idx) { /* biased random sampling */ Eigen::Vector3d x_rand; sampler_.samplingOnce(x_rand); // samplingOnce(x_rand); if (!map_ptr_->isStateValid(x_rand)) { continue; } struct kdres *p_nearest = kd_nearest3(kd_tree, x_rand[0], x_rand[1], x_rand[2]); if (p_nearest == nullptr) { ROS_ERROR("nearest query error"); continue; } RRTNode3DPtr nearest_node = (RRTNode3DPtr)kd_res_item_data(p_nearest); kd_res_free(p_nearest); Eigen::Vector3d x_new = steer(nearest_node->x, x_rand, steer_length_); if (!map_ptr_->isSegmentValid(nearest_node->x, x_new)) { continue; } /* 1. find parent */ /* kd_tree bounds search for parent */ Neighbour neighbour_nodes; neighbour_nodes.nearing_nodes.reserve(50); neighbour_nodes.center = x_new; struct kdres *nbr_set; nbr_set = kd_nearest_range3(kd_tree, x_new[0], x_new[1], x_new[2], search_radius_); if (nbr_set == nullptr) { ROS_ERROR("bkwd kd range query error"); break; } while (!kd_res_end(nbr_set)) { RRTNode3DPtr curr_node = (RRTNode3DPtr)kd_res_item_data(nbr_set); neighbour_nodes.nearing_nodes.emplace_back(curr_node, false, false); // store range query result so that we dont need to query again for rewire; kd_res_next(nbr_set); // go to next in kd tree range query result } kd_res_free(nbr_set); // reset kd tree range query /* choose parent from kd tree range query result*/ double dist2nearest = calDist(nearest_node->x, x_new); double min_dist_from_start(nearest_node->cost_from_start + dist2nearest); double cost_from_p(dist2nearest); RRTNode3DPtr min_node(nearest_node); // set the nearest_node as the default parent // TODO sort by potential cost-from-start for (auto &curr_node : neighbour_nodes.nearing_nodes) { if (curr_node.node_ptr == nearest_node) // the nearest_node already calculated and checked collision free { continue; } // check potential first, then check edge collision double curr_dist = calDist(curr_node.node_ptr->x, x_new); double potential_dist_from_start = curr_node.node_ptr->cost_from_start + curr_dist; if (min_dist_from_start > potential_dist_from_start) { bool connected = map_ptr_->isSegmentValid(curr_node.node_ptr->x, x_new); curr_node.is_checked = true; if (connected) { curr_node.is_valid = true; cost_from_p = curr_dist; min_dist_from_start = potential_dist_from_start; min_node = curr_node.node_ptr; } } } /* parent found within radius, then add a node to rrt and kd_tree */ // sample rejection double dist_to_goal = calDist(x_new, goal_node_->x); if (min_dist_from_start + dist_to_goal >= goal_node_->cost_from_start) { // ROS_WARN("parent found but sample rejected"); continue; } /* 1.1 add the randomly sampled node to rrt_tree */ RRTNode3DPtr new_node(nullptr); new_node = addTreeNode(min_node, x_new, min_dist_from_start, cost_from_p, dist_to_goal); /* 1.2 add the randomly sampled node to kd_tree */ kd_insert3(kd_tree, x_new[0], x_new[1], x_new[2], new_node); // end of find parent /* 2. try to connect to goal if possible */ if (dist_to_goal <= search_radius_) { bool is_connected2goal = map_ptr_->isSegmentValid(x_new, goal_node_->x); if (is_connected2goal && goal_node_->cost_from_start > dist_to_goal + new_node->cost_from_start) // a better path found { if (!goal_found) { first_path_use_time_ = (ros::Time::now() - rrt_start_time).toSec(); } goal_found = true; changeNodeParent(goal_node_, new_node, dist_to_goal); vector<Eigen::Vector3d> curr_best_path; fillPath(goal_node_, curr_best_path); path_list_.emplace_back(curr_best_path); solution_cost_time_pair_list_.emplace_back(goal_node_->cost_from_start, (ros::Time::now() - rrt_start_time).toSec()); // vis_ptr_->visualize_path(curr_best_path, "rrt_sharp_final_path"); // vis_ptr_->visualize_pointcloud(curr_best_path, "rrt_sharp_final_wpts"); if (use_informed_sampling_) { scale_[0] = goal_node_->cost_from_start / 2.0; scale_[1] = sqrt(scale_[0] * scale_[0] - c_square); scale_[2] = scale_[1]; sampler_.setInformedSacling(scale_); std::vector<visualization::ELLIPSOID> ellps; ellps.emplace_back(trans_, scale_, rot_); vis_ptr_->visualize_ellipsoids(ellps, "informed_set", visualization::yellow, 0.2); } else if (use_GUILD_sampling_) { RRTNode3DPtr beacon_node = beaconSelect(); calInformedSet(beacon_node->cost_from_start, start_node_->x, beacon_node->x, scale1_, trans1_, rot1_); calInformedSet(goal_node_->cost_from_start - beacon_node->cost_from_start, beacon_node->x, goal_node_->x, scale2_, trans2_, rot2_); sampler_.setGUILDInformed(scale1_, trans1_, rot1_, scale2_, trans2_, rot2_); std::vector<visualization::ELLIPSOID> ellps; ellps.emplace_back(trans1_, scale1_, rot1_); ellps.emplace_back(trans2_, scale2_, rot2_); vis_ptr_->visualize_ellipsoids(ellps, "local_set", visualization::green, 0.2); } } } /* 3.rewire */ std::priority_queue<RRTNode3DPtr, RRTNode3DPtrVector, RRTNodeComparator> rewire_queue; for (auto &curr_node : neighbour_nodes.nearing_nodes) { double dist_to_potential_child = calDist(new_node->x, curr_node.node_ptr->x); bool not_consistent = new_node->cost_from_start + dist_to_potential_child < curr_node.node_ptr->cost_from_start ? 1 : 0; bool promising = new_node->cost_from_start + dist_to_potential_child + curr_node.node_ptr->heuristic_to_goal < goal_node_->cost_from_start ? 1 : 0; if (not_consistent && promising) { bool connected(false); if (curr_node.is_checked) connected = curr_node.is_valid; else connected = map_ptr_->isSegmentValid(new_node->x, curr_node.node_ptr->x); // If we can get to a node via the sampled_node faster than via it's existing parent then change the parent if (connected) { double best_cost_before_rewire = goal_node_->cost_from_start; changeNodeParent(curr_node.node_ptr, new_node, dist_to_potential_child); rewire_queue.emplace(curr_node.node_ptr); if (best_cost_before_rewire > goal_node_->cost_from_start) { vector<Eigen::Vector3d> curr_best_path; fillPath(goal_node_, curr_best_path); path_list_.emplace_back(curr_best_path); solution_cost_time_pair_list_.emplace_back(goal_node_->cost_from_start, (ros::Time::now() - rrt_start_time).toSec()); // vis_ptr_->visualize_path(curr_best_path, "rrt_sharp_final_path"); // vis_ptr_->visualize_pointcloud(curr_best_path, "rrt_sharp_final_wpts"); if (use_informed_sampling_) { scale_[0] = goal_node_->cost_from_start / 2.0; scale_[1] = sqrt(scale_[0] * scale_[0] - c_square); scale_[2] = scale_[1]; sampler_.setInformedSacling(scale_); std::vector<visualization::ELLIPSOID> ellps; ellps.emplace_back(trans_, scale_, rot_); vis_ptr_->visualize_ellipsoids(ellps, "informed_set", visualization::yellow, 0.2); } else if (use_GUILD_sampling_) { RRTNode3DPtr beacon_node = beaconSelect(); calInformedSet(beacon_node->cost_from_start, start_node_->x, beacon_node->x, scale1_, trans1_, rot1_); calInformedSet(goal_node_->cost_from_start - beacon_node->cost_from_start, beacon_node->x, goal_node_->x, scale2_, trans2_, rot2_); sampler_.setGUILDInformed(scale1_, trans1_, rot1_, scale2_, trans2_, rot2_); std::vector<visualization::ELLIPSOID> ellps; ellps.emplace_back(trans1_, scale1_, rot1_); ellps.emplace_back(trans2_, scale2_, rot2_); vis_ptr_->visualize_ellipsoids(ellps, "local_set", visualization::green, 0.2); } } } } /* go to the next entry */ } while (!rewire_queue.empty()) { RRTNode3DPtr curr_rewire_node = rewire_queue.top(); std::make_heap(const_cast<RRTNode3DPtr *>(&rewire_queue.top()), const_cast<RRTNode3DPtr *>(&rewire_queue.top()) + rewire_queue.size(), RRTNodeComparator()); // re-order the queue every time before pop a node, since the key may change during changeNodeParent() rewire_queue.pop(); struct kdres *nbr_set; nbr_set = kd_nearest_range3(kd_tree, curr_rewire_node->x[0], curr_rewire_node->x[1], curr_rewire_node->x[2], search_radius_); if (nbr_set == nullptr) { ROS_ERROR("bkwd kd range query error"); break; } while (!kd_res_end(nbr_set)) { RRTNode3DPtr curr_node = (RRTNode3DPtr)kd_res_item_data(nbr_set); double dist_to_potential_child = calDist(curr_rewire_node->x, curr_node->x); bool not_consistent = curr_rewire_node->cost_from_start + dist_to_potential_child < curr_node->cost_from_start ? 1 : 0; bool promising = curr_rewire_node->cost_from_start + dist_to_potential_child + curr_node->heuristic_to_goal < goal_node_->cost_from_start ? 1 : 0; if (not_consistent && promising) // If we can get to a node via the curr_rewire_node faster than via it's existing parent then change the parent { bool connected = map_ptr_->isSegmentValid(curr_rewire_node->x, curr_node->x); // If we can get to a node via the sampled_node faster than via it's existing parent then change the parent if (connected) { double best_cost_before_rewire = goal_node_->cost_from_start; changeNodeParent(curr_node, curr_rewire_node, dist_to_potential_child); rewire_queue.emplace(curr_node); if (best_cost_before_rewire > goal_node_->cost_from_start) { vector<Eigen::Vector3d> curr_best_path; fillPath(goal_node_, curr_best_path); path_list_.emplace_back(curr_best_path); solution_cost_time_pair_list_.emplace_back(goal_node_->cost_from_start, (ros::Time::now() - rrt_start_time).toSec()); vis_ptr_->visualize_path(curr_best_path, "rrt_sharp_final_path"); vis_ptr_->visualize_pointcloud(curr_best_path, "rrt_sharp_final_wpts"); } } } kd_res_next(nbr_set); // go to next in kd tree range query result } kd_res_free(nbr_set); // reset kd tree range query } /* end of rewire */ } /* end of sample once */ vector<Eigen::Vector3d> vertice; vector<std::pair<Eigen::Vector3d, Eigen::Vector3d>> edges; sampleWholeTree(start_node_, vertice, edges); std::vector<visualization::BALL> balls; balls.reserve(vertice.size()); visualization::BALL node_p; node_p.radius = 0.2; for (size_t i = 0; i < vertice.size(); ++i) { node_p.center = vertice[i]; balls.push_back(node_p); } vis_ptr_->visualize_balls(balls, "tree_vertice", visualization::Color::blue, 1.0); vis_ptr_->visualize_pairline(edges, "tree_edges", visualization::Color::red, 0.1); if (goal_found) { final_path_use_time_ = (ros::Time::now() - rrt_start_time).toSec(); fillPath(goal_node_, final_path_); ROS_INFO_STREAM("[RRT#]: first_path_use_time: " << first_path_use_time_ << ", length: " << solution_cost_time_pair_list_.front().first); } else if (valid_tree_node_nums_ == max_tree_node_nums_) { ROS_ERROR_STREAM("[RRT#]: NOT CONNECTED TO GOAL after " << max_tree_node_nums_ << " nodes added to rrt-tree"); } else { ROS_ERROR_STREAM("[RRT#]: NOT CONNECTED TO GOAL after " << (ros::Time::now() - rrt_start_time).toSec() << " seconds"); } return goal_found; } 注释上述代码
09-19
修改脚本功能: 1.原逻辑中需要从表中取相关信息,修改为磁盘价格从本地json文件获取 2.实例价格修改为从云厂商接口获取后直接计算,取折后价格,并且云接口所需参数需要从实例列表返回后传入,比如腾讯所需的镜像id等参数,其他云也类似 3.只有最终结果asktao_dist_price和dist_list区组对照表和gs_info,其余不再入库,如果必须使用,写入本地json HuaWei SAS 0.35 HuaWei STAT 0.3 HuaWei SSD 1 Tencent CLOUD_BASIC 0.3 Tencent CLOUD_PREMIUM 0.35 Tencent CLOUD_SSD 1.1 AliCloud cloud 0.3 AliCloud cloud_efficiency 0.35 AliCloud cloud_ssd 1 AliCloud cloud_essd 1 Tencent CLOUD_BSSD 0.5 Tencent CLOUD_TSSD 0.65 Tencent CLOUD_HSSD 0.65 Volcengi ESSD_PL0 0.65 3.最终费用仍为实例价格+磁盘价格,不计算打折部分,价格直接是折后的 main.py -------- #_*_coding:utf-8_*_ from cloud import alicloud from cloud import tencent from cloud import huawei from cloud import volcengine import json import sys import MySQLdb as pymysql import datetime import time from decimal import Decimal if sys.version_info.major == 3: import urllib.request as urllib2 from functools import reduce else: import urllib2 reload(sys) sys.setdefaultencoding('utf-8') remove_dist = [-200, 11, 12, 35, 1, -100, 258, 259, 368, 20004, 440, 444, 10000] db_config = { "charset": "utf8", "db": "game_server_price", "host": "10.14.101.113", "passwd": "NGI", "port": 30001, "user": "grafana", } center_db_config = { "charset": "utf8", "db": "dw_game_wd", "host": "10.14.50.2", "passwd": "GW09iO", "port": 3306, "user": "ops_read", } def mysql_conn(sql, values_list=[], **config): try: conn = pymysql.connect(**config) cursor = conn.cursor() if values_list: cursor.executemany(sql, values_list) else: cursor.execute(sql) conn.commit() except Exception as e: print(e) #print(sql) sys.exit(e) conn.close() return cursor.fetchall() def get_online_number(): # 查询当月每日最大人数求平均值,去除最大值小于100的项,当新服未开放时无法查询出新服,后续使用try处理 # 去除最大值小于100项目用于解决问题:新服开放当月平均值会被开服前的0平均掉 sql = "select" \ " dist,avg(cnt_max) from dw_game_wd.fact_online_day " \ "where" \ " DATE_FORMAT( log_date, '%Y%m' ) = DATE_FORMAT( CURRENT_DATE, '%Y%m' ) and cnt_max > 100 group by dist;" result = mysql_conn(sql, **center_db_config) online_info = {} for dist in result: online_info[dist[0]] = dist[1] return online_info def get_servers(): all_servers_list = [] all_disks_list = [] #cloud_list = [huawei, alicloud, tencent, volcengine] cloud_list = [volcengine] for cloud in cloud_list: instances = cloud() result_cloud = instances.run_cloud() server_list = result_cloud["instences"] disk_list = result_cloud["disks"] if not server_list: continue all_servers_list = all_servers_list + server_list if not disk_list: continue all_disks_list = all_disks_list + disk_list mysql_conn("delete from game_server_price.disk_info where date=CURRENT_DATE;", **db_config) mysql_conn("delete from game_server_price.instances_info where date=CURRENT_DATE;", **db_config) values_list = [] for instance in all_servers_list: name = instance['name'] ip = instance['ip'] instance_type = instance['instance_type'] company = instance['company'] id = instance['id'] values_list.append([name, ip, instance_type, company, id]) sql = 'insert into game_server_price.instances_info value (%s,%s,%s,%s,%s,CURRENT_DATE)' mysql_conn(sql, values_list, **db_config) sql = 'insert into game_server_price.disk_info value (%s,%s,%s,%s,%s,0,CURRENT_DATE)' mysql_conn(sql, all_disks_list, **db_config) return all_servers_list def get_ucenter_info(mainid=2): ucurl = "http://creator-sre.gyyx.cn/app_ass_info?main_id=%s" % (mainid) result_url = urllib2.urlopen(ucurl, timeout=30) result_url_data = result_url.read() alldata = json.loads(result_url_data)['data'] #alldata = json.loads(result_url_data) dist_info_list = [] tmp_info = {} for data in alldata: if data['app_name'] in ['ldb_s', 'ldb', 'stat'] or int(data['dist_id']) in remove_dist: continue ip = data['ass_ip'] dist_id = data['dist_id'] name = data['ass_gysn'] if not name: print(data) app_name = data['app_name'] if app_name == "LB":continue disk_size_setting = data['setting'].replace('G', '').replace(' ', '').split("+")[1:] if disk_size_setting == []: disk_size = 0 else: disk_size = reduce(lambda x, y: x + y, map(int, disk_size_setting)) dist_info_list.append([dist_id, name, app_name, ip, disk_size]) tmp_info[data['ip']] = name line_info_list = [] ucurl_line = 'http://creator-sre.gyyx.cn/gamelinelist?mainid=2' result_url = urllib2.urlopen(ucurl_line, timeout=30) result_url_data = result_url.read() alldata = json.loads(result_url_data)['data'] #alldata = json.loads(result_url_data) for dist in alldata: if int(dist['dist_id']) in remove_dist or int(dist['dist_id']) > 10000: continue dist_id = int(dist['dist_id']) name = tmp_info[dist['ip']] line_num = len(dist['line_nums'].split(',')) line_info_list.append([dist_id, name, line_num]) mysql_conn("delete from game_server_price.gs_info where date=CURRENT_DATE;", **db_config) gs_info_sql = 'insert into game_server_price.gs_info value (%s,%s,%s,current_date);' mysql_conn(gs_info_sql, line_info_list, **db_config) mysql_conn("delete from game_server_price.instances_center where date=CURRENT_DATE;", **db_config) mysql_conn("truncate table game_server_price.dist_list;", **db_config) sql = 'insert into game_server_price.instances_center value (%s,%s,%s,%s,%s,CURRENT_DATE)' mysql_conn(sql, dist_info_list, **db_config) sub_data_sql = 'select dist_id,sub_name from center_app.sub_category where main_id = 2 and flag in (1,0) and dist_id BETWEEN 2 and 25000 and del_info=1;' result = mysql_conn(sub_data_sql, **center_db_config) sub_data = list(result) insert_sub_sql = 'insert into game_server_price.dist_list value (%s,%s)' mysql_conn(insert_sub_sql, sub_data, **db_config) return dist_info_list def count_instance_price(): SQL_PRICES = ''' select c.name ,sum(b.price_base*a.size) as disk_price ,d.instances_price ,(sum(b.price_base*a.size)+ d.instances_price) as total ,c.instance_type ,e.trade_discount ,((sum(b.price_base*a.size)+ d.instances_price)*e.trade_discount) as last_price from game_server_price.disk_info a ,game_server_price.disk_price_info b ,game_server_price.trade_discount_base e ,game_server_price.instances_info c left join game_server_price.ecs_price_base d on c.instance_type=d.instance_type where a.disk_type = b.disk_type and a.company = b.company and a.date=date_sub(CURDATE(),interval 1 day) and c.date=date_sub(CURDATE(),interval 1 day) and c.id=a.server_id and a.company = e.company group by a.server_id ; ''' price_dict = {} result = mysql_conn(SQL_PRICES, **db_config) for i in result: name = str(i[0]).strip() price = str(i[6]) price_dict[name] = price return price_dict def count_all_dist_price(): sql = 'select a.name,a.app_name,b.company,b.instance_type,a.disk_size,a.dist_id ' \ 'from instances_center a,instances_info b ' \ 'where a.name=b.name and b.date=date_sub(CURDATE(),interval 1 day) ' \ 'and a.date=date_sub(CURDATE(),interval 1 day);' result = mysql_conn(sql, **db_config) all_price_dict = count_instance_price() all_instance_price = {} instance_price_list = [] error_list = [] for instance in result: name = str(instance[0]) price = all_price_dict[name] if price == "None": print("ERROR: %s price count failed!" % name) error_list.append(name) else: price = Decimal(price) if instance[1] == "adb_s": price = price / 2 instance_price_list.append([instance[5], instance[0], price]) try: all_instance_price[instance[5]][instance[0]] = price except: all_instance_price[instance[5]] = {} all_instance_price[instance[5]][instance[0]] = price if error_list: print "Error list:", str(error_list) sys.exit(1) mysql_conn("delete from game_server_price.instances_price_history where date=date_sub(CURRENT_DATE,interval 1 day);", **db_config) sql = 'insert into game_server_price.instances_price_history value (%s,%s,%s,date_sub(CURRENT_DATE,interval 1 day));' print instance_price_list mysql_conn(sql, instance_price_list, **db_config) all_dist_price = {} for dist in all_instance_price: all_dist_price[dist] = [] price = 0 for instance in all_instance_price[dist]: price = price + all_instance_price[dist][instance] all_dist_price[dist].append(price) return all_dist_price def get_dist_idc(): SQL_IDC = ''' select a.dist_id ,b.company from instances_center a ,instances_info b where a.app_name ='ccs' and a.name=b.name and a.date=b.date and a.date=date_sub(CURDATE(),interval 1 day); ''' idc_dict = {} result = mysql_conn(SQL_IDC, **db_config) for i in result: dist_id = i[0] idc = str(i[1]) idc_dict[dist_id] = idc return idc_dict def main(): get_servers() get_ucenter_info() count_all_dist_price() date = (datetime.datetime.now() + datetime.timedelta(days=-1)).strftime("%Y-%m-%d") all_dist_price = count_all_dist_price() idc_dict = get_dist_idc() online_info = get_online_number() all_dist_price_per = [] for dist_id in all_dist_price: price = Decimal(all_dist_price[dist_id][0]) if dist_id not in idc_dict: continue idc = idc_dict[dist_id] try: online_num = Decimal(online_info[dist_id]) except: # 当新服部署后尚未开服时令人数为0 online_num = Decimal(0) if int(online_num) == 0: # 避免被除数为0 avg_price = price else: avg_price = Decimal(price / online_num).quantize(Decimal("0.000")) all_dist_price_per.append([dist_id, online_num, price, avg_price,idc, date]) date = (datetime.datetime.now() + datetime.timedelta(days=-1)).strftime("%Y-%m-%d") delete_sql = 'delete from game_server_price.asktao_dist_price where date = "%s";' % date mysql_conn(delete_sql, **db_config) sql = 'insert into game_server_price.asktao_dist_price value (%s,%s,%s,%s,%s,%s);' mysql_conn(sql, all_dist_price_per, **db_config) if __name__ == '__main__': main() ---------------- from tencentcloud.common import credential from tencentcloud.common.profile.client_profile import ClientProfile from tencentcloud.common.profile.http_profile import HttpProfile from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException from tencentcloud.cvm.v20170312 import cvm_client, models from tencentcloud.cbs.v20170312 import cbs_client from tencentcloud.cbs.v20170312 import models as cbsmodels from aliyunsdkcore.client import AcsClient from aliyunsdkecs.request.v20140526.DescribeInstancesRequest import DescribeInstancesRequest from aliyunsdkecs.request.v20140526.DescribeDisksRequest import DescribeDisksRequest from aliyunsdkcore.acs_exception.exceptions import ClientException from aliyunsdkcore.acs_exception.exceptions import ServerException from openstack import exceptions from openstack import connection import requests import signer import json class cloud(object): def __init__(self): self.DISK_TYPE = { "SAS": "premium", "STAT": "basic", "SSD": "ssd", "CLOUD_BASIC": "basic", "CLOUD_PREMIUM": "premium", "CLOUD_SSD": "ssd", "ESSD_PL0": "ssd" } def get_instances_from_api(self): pass def get_disk_from_api(self): pass def get_instances(self): pass def get_disks(self): pass class alicloud(cloud): def get_instances_from_api(self, offset): zone_list = ['cn-beijing','cn-shanghai'] server_list = [] for zone_i in zone_list: try: client = AcsClient('LTAI4G696nAcjQUPayU7FVyA', 'Rl6O82hLn0cfssOZ8PdiTQJYmoqAtj', zone_i) request = DescribeInstancesRequest() request.set_accept_format('json') request.set_PageNumber(offset) request.set_PageSize(100) response = client.do_action_with_exception(request) info = json.loads(response)['Instances']['Instance'] for instance in info: name = instance['InstanceName'] ip = instance['VpcAttributes']['PrivateIpAddress']['IpAddress'][0] instance_type = instance['InstanceType'] id = instance['InstanceId'] server_list.append({"name": name, "ip": ip, "instance_type": instance_type, "company": "AliCloud", "id": id}) except Exception as err: print(err) return 0, server_list def get_instances(self): try: server_list = [] for offset in range(20): offset = offset + 1 flag, server_list_tmp = self.get_instances_from_api(offset) if flag: raise Exception("get tencent instances error") if server_list_tmp == []: break server_list = server_list + server_list_tmp return server_list except Exception as err: print(err) return 1, err def get_disk_from_api(self, offset): zone_list = ['cn-beijing','cn-shanghai'] disks_list = [] for zone_i in zone_list: try: client = AcsClient('LTAI4G696nAcjQUPayU7FVyA', '', zone_i) request = DescribeDisksRequest() request.set_accept_format('json') request.set_PageNumber(offset) request.set_PageSize(100) response = client.do_action_with_exception(request) info = json.loads(response)['Disks']['Disk'] for disk in info: server_id = disk['InstanceId'] disk_id = disk['DiskId'] disk_type = disk['Category'] dist_size = disk['Size'] disk_info = ['AliCloud', disk_id, server_id, disk_type, dist_size] disks_list.append(disk_info) except Exception as err: print(err) return 0, disks_list def get_disks(self): try: disks_list = [] for offset in range(20): offset = offset + 1 flag, disks_list_tmp = self.get_disk_from_api(offset) if flag: raise Exception("get tencent disks error") if disks_list_tmp == []: break disks_list = disks_list + disks_list_tmp return disks_list except Exception as err: print(err, 11) return 1, err def run_cloud(self): instences = self.get_instances() disks = self.get_disks() return {"instences": instences, "disks": disks} class tencent(cloud): def get_instances_from_api(self, offset): zone_list = ['ap-beijing','ap-shanghai'] server_list = [] for zone_i in zone_list: try: cred = credential.Credential("AKIDiV3GSXs7brvQhkqYlkV7XkmDV7g0A6ub", "") httpProfile = HttpProfile() httpProfile.endpoint = "cvm.tencentcloudapi.com" clientProfile = ClientProfile() clientProfile.httpProfile = httpProfile client = cvm_client.CvmClient(cred, zone_i, clientProfile) req = models.DescribeInstancesRequest() params = {"Offset": offset, "Limit": 100} params = json.dumps(params) req.from_json_string(params) instance_result = client.DescribeInstances(req).to_json_string() instance_result = json.loads(instance_result) for instance in instance_result["InstanceSet"]: name = instance["InstanceName"] ip = instance["PrivateIpAddresses"][0] instance_type = instance["InstanceType"] id = instance["InstanceId"] status = instance["RestrictState"] print(instance) if status != "NORMAL":continue server_list.append({"name": name, "ip": ip, "instance_type": instance_type, "company": "Tencent", "id": id }) except Exception as err: print(err) return 0, server_list def get_instances(self): try: server_list = [] for offset in range(20): offset = offset * 100 flag, server_list_tmp = self.get_instances_from_api(offset) if flag: raise Exception("get tencent instances error") if server_list_tmp == []: break server_list = server_list + server_list_tmp return server_list except Exception as err: print(err) def get_disk_from_api(self, offset): disks_list = [] zone_list = ['ap-beijing','ap-shanghai'] for zone_i in zone_list: try: cred = credential.Credential("AKIDiV3GSXs7brvQhkqYlkV7XkmDV7g0A6ub", "") httpProfile = HttpProfile() httpProfile.endpoint = "cbs.tencentcloudapi.com" clientProfile = ClientProfile() clientProfile.httpProfile = httpProfile client = cbs_client.CbsClient(cred, zone_i, clientProfile) req = cbsmodels.DescribeDisksRequest() params = {"Offset": offset, "Limit": 100} params = json.dumps(params) req.from_json_string(params) disks_result = client.DescribeDisks(req).to_json_string() disks_result = json.loads(disks_result) for disk in disks_result['DiskSet']: disk_id = disk['DiskId'] try: server_id = disk['InstanceIdList'][0] except: server_id = 'not-mount' disk_type = disk['DiskType'] dist_size = disk['DiskSize'] disk_info = ['Tencent', disk_id, server_id, disk_type, dist_size] disks_list.append(disk_info) except TencentCloudSDKException as err: print(err) return 0, disks_list def get_disks(self): try: disks_list = [] for offset in range(20): offset = offset * 100 flag, disks_list_tmp = self.get_disk_from_api(offset) if flag: raise Exception("get tencent instances error") if disks_list_tmp == []: break disks_list = disks_list + disks_list_tmp return disks_list except Exception as err: print(err) def run_cloud(self): instences = self.get_instances() disks = self.get_disks() return {"instences": instences, "disks": disks} class volcengine(cloud): def get_instances_from_api(self): try: server_list = [] import volcenginesdkcore import volcenginesdkecs from volcenginesdkcore.rest import ApiException configuration = volcenginesdkcore.Configuration() configuration.ak = "AKLTNzgyNTFhODhhMzIwNDM4YTlmNjA2ZTY4OTg4MGFhZTY" configuration.sk = "" configuration.region = "cn-beijing" api_instance = volcenginesdkecs.ECSApi(volcenginesdkcore.ApiClient(configuration)) page_number = 1 page_size = 100 total_count = None while True: describe_instances_request = volcenginesdkecs.DescribeInstancesRequest( page_number=page_number, page_size=page_size ) try: response = api_instance.describe_instances(describe_instances_request) if total_count is None: total_count = response.total_count for instance in response.instances: name = instance.instance_name ip = instance.network_interfaces[0].primary_ip_address if instance.network_interfaces else "" instance_type = instance.instance_type_id id = instance.instance_id server_list.append( {"name": name, "ip": ip, "instance_type": instance_type, "company": "Volcengine", "id": id}) if len(response.instances) < page_size or page_number * page_size >= total_count: break page_number += 1 except ApiException as e: print("Exception when calling ECS API: {}".format(e)) break return 0, server_list except Exception as err: print("Error in get_instances_from_api: {}".format(err)) return 1, err def get_disks_from_api(self): try: disk_list = [] import volcenginesdkcore import volcenginesdkstorageebs from volcenginesdkcore.rest import ApiException configuration = volcenginesdkcore.Configuration() configuration.ak = "AKLTNzgyNTFhODhhMzIwNDM4YTlmNjA2ZTY4OTg4MGFhZTY" configuration.sk = "" configuration.region = "cn-beijing" api_instance = volcenginesdkstorageebs.STORAGEEBSApi(volcenginesdkcore.ApiClient(configuration)) page_number = 1 page_size = 100 total_count = None while True: describe_volumes_request = volcenginesdkstorageebs.DescribeVolumesRequest( page_number=page_number, page_size=page_size ) try: response = api_instance.describe_volumes(describe_volumes_request) if total_count is None: total_count = response.total_count for volume in response.volumes: disk_id = volume.volume_id server_id = volume.instance_id if hasattr(volume, 'instance_id') and volume.instance_id else "not-mount" disk_type = volume.volume_type disk_size = volume.size disk_info = ['Volcengine', disk_id, server_id, disk_type, disk_size] disk_list.append(disk_info) if len(response.volumes) < page_size or page_number * page_size >= total_count: break page_number += 1 except ApiException as e: print("Exception when calling STORAGEEBS API: {}".format(e)) break print(f"Total disks fetched: {len(disk_list)}") return 0, disk_list except Exception as err: print(f"Error in get_disks_from_api: {err}") return 1, err def get_instances(self): try: flag, server_list = self.get_instances_from_api() if flag: raise Exception("get volcengine instances error") return server_list except Exception as err: print(err) return [] def get_disks(self): try: flag, disk_list = self.get_disks_from_api() if flag: raise Exception("get volcengine disks error") return disk_list except Exception as err: print(err) return [] def run_cloud(self): instances = self.get_instances() disks = self.get_disks() return {"instances": instances, "disks": disks} ---------------- 腾讯查询价格api--------- # -*- coding: utf-8 -*- import os import json import types from tencentcloud.common import credential from tencentcloud.common.profile.client_profile import ClientProfile from tencentcloud.common.profile.http_profile import HttpProfile from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException from tencentcloud.cvm.v20170312 import cvm_client, models try: # 密钥信息从环境变量读取,需要提前在环境变量中设置 TENCENTCLOUD_SECRET_ID 和 TENCENTCLOUD_SECRET_KEY # 使用环境变量方式可以避免密钥硬编码在代码中,提高安全性 # 生产环境建议使用更安全的密钥管理方案,如密钥管理系统(KMS)、容器密钥注入等 # 请参见:https://cloud.tencent.com/document/product/1278/85305 # 密钥可前往官网控制台 https://console.cloud.tencent.com/cam/capi 进行获取 cred = credential.Credential(os.getenv("TENCENTCLOUD_SECRET_ID"), os.getenv("TENCENTCLOUD_SECRET_KEY")) # 使用临时密钥示例 # cred = credential.Credential("SecretId", "SecretKey", "Token") # 实例化一个http选项,可选的,没有特殊需求可以跳过 httpProfile = HttpProfile() httpProfile.endpoint = "cvm.tencentcloudapi.com" # 实例化一个client选项,可选的,没有特殊需求可以跳过 clientProfile = ClientProfile() clientProfile.httpProfile = httpProfile # 实例化要请求产品的client对象,clientProfile是可选的 client = cvm_client.CvmClient(cred, "ap-beijing", clientProfile) # 实例化一个请求对象,每个接口都会对应一个request对象 req = models.InquiryPriceRunInstancesRequest() params = { "Placement": { "Zone": "ap-beijing-6" }, "ImageId": "img-p5nkhc2v", "InstanceChargeType": "PREPAID", "InstanceChargePrepaid": { "Period": 1 }, "InstanceType": "SA2.2XLARGE16" } req.from_json_string(json.dumps(params)) # 返回的resp是一个InquiryPriceRunInstancesResponse的实例,与请求对象对应 resp = client.InquiryPriceRunInstances(req) # 输出json格式的字符串回包 print(resp.to_json_string()) except TencentCloudSDKException as err: print(err) ----腾讯api返回: { "Response": { "Price": { "BandwidthPrice": {}, "InstancePrice": { "Discount": 60, "DiscountPrice": 286.98, "OriginalPrice": 478.3 } }, "RequestId": "bd16e011-f83d-4bbc-bb2d-c31361508e11" } } ------------------------------------------------------------------------------------------------------------------------- 阿里查询价格api--------- # -*- coding: utf-8 -*- # This file is auto-generated, don't edit it. Thanks. import os import sys from typing import List from alibabacloud_ecs20140526.client import Client as Ecs20140526Client from alibabacloud_credentials.client import Client as CredentialClient from alibabacloud_tea_openapi import models as open_api_models from alibabacloud_ecs20140526 import models as ecs_20140526_models from alibabacloud_tea_util import models as util_models from alibabacloud_tea_util.client import Client as UtilClient class Sample: def __init__(self): pass @staticmethod def create_client() -> Ecs20140526Client: """ 使用凭据初始化账号Client @return: Client @throws Exception """ # 工程代码建议使用更安全的无AK方式,凭据配置方式请参见:https://help.aliyun.com/document_detail/378659.html。 credential = CredentialClient() config = open_api_models.Config( credential=credential ) # Endpoint 请参考 https://api.aliyun.com/product/Ecs config.endpoint = f'ecs.cn-beijing.aliyuncs.com' return Ecs20140526Client(config) @staticmethod def main( args: List[str], ) -> None: client = Sample.create_client() describe_price_request = ecs_20140526_models.DescribePriceRequest( region_id='cn-beijing', instance_type='ecs.c5.xlarge', price_unit='Month', period=1 ) runtime = util_models.RuntimeOptions() try: # 复制代码运行请自行打印 API 的返回值 client.describe_price_with_options(describe_price_request, runtime) except Exception as error: # 此处仅做打印展示,请谨慎对待异常处理,在工程项目中切勿直接忽略异常。 # 错误 message print(error.message) # 诊断地址 print(error.data.get("Recommend")) UtilClient.assert_as_string(error.message) @staticmethod async def main_async( args: List[str], ) -> None: client = Sample.create_client() describe_price_request = ecs_20140526_models.DescribePriceRequest( region_id='cn-beijing', instance_type='ecs.c5.xlarge', price_unit='Month', period=1 ) runtime = util_models.RuntimeOptions() try: # 复制代码运行请自行打印 API 的返回值 await client.describe_price_with_options_async(describe_price_request, runtime) except Exception as error: # 此处仅做打印展示,请谨慎对待异常处理,在工程项目中切勿直接忽略异常。 # 错误 message print(error.message) # 诊断地址 print(error.data.get("Recommend")) UtilClient.assert_as_string(error.message) if __name__ == '__main__': Sample.main(sys.argv[1:]) --------阿里api返回: { "RequestId": "D1F58F14-AAF7-585D-B3A5-1523CC5D94A1", "PriceInfo": { "Price": { "OriginalPrice": 365, "ReservedInstanceHourPrice": 0, "DiscountPrice": 146, "Currency": "CNY", "DetailInfos": { "DetailInfo": [ { "OriginalPrice": 0, "DiscountPrice": 0, "Resource": "image", "TradePrice": 0 }, { "OriginalPrice": 358, "DiscountPrice": 143.2, "Resource": "instanceType", "TradePrice": 214.8 }, { "OriginalPrice": 7, "DiscountPrice": 2.8, "Resource": "systemDisk", "TradePrice": 4.2 }, { "OriginalPrice": 0, "DiscountPrice": 0, "Resource": "bandwidth", "TradePrice": 0 } ] }, "TradePrice": 219 }, "Rules": { "Rule": [ { "Description": "合同优惠_整单_6.0折", "RuleId": 2000019367043 } ] } } } ------------------------------------------------------------------------------------------------------------------ 火山云询价api # Example Code generated by Beijing Volcanoengine Technology. from __future__ import print_function import volcenginesdkcore import volcenginesdkbilling from volcenginesdkcore.rest import ApiException if __name__ == '__main__': # 注意示例代码安全,代码泄漏会导致AK/SK泄漏,有极大的安全风险。 configuration = volcenginesdkcore.Configuration() configuration.ak = "Your AK" configuration.sk = "Your SK" configuration.region = "cn-beijing" # set default configuration volcenginesdkcore.Configuration.set_default(configuration) # use global default configuration api_instance = volcenginesdkbilling.BILLINGApi() query_price_for_renew_request = volcenginesdkbilling.QueryPriceForRenewRequest( instance_id_list=["i-ye3aogjxfkwh2yrey86c"], product="ECS", use_duration=1, ) try: # 复制代码运行示例,请自行打印API返回值。 api_instance.query_price_for_renew(query_price_for_renew_request) except ApiException as e: # 复制代码运行示例,请自行打印API错误信息。 # print("Exception when calling api: %s\n" % e) pass --------火山云询价api调用返回结果 { "ResponseMetadata": { "RequestId": "20250924090622B58F540645656224EA78", "Action": "QueryPriceForRenew", "Version": "2022-01-01", "Service": "billing", "Region": "cn-beijing" }, "Result": { "Currency": "CNY", "TotalOriginalAmount": "561.17", "TotalDiscountAmount": "252.53", "InstanceAmountList": [ { "InstanceID": "i-ye3aogjxfkwh2yrey86c", "Product": "ECS", "ConfigurationCode": "ecs.c3a.2xlarge.month", "OriginalAmount": "561.17", "DiscountAmount": "252.53" } ], "CalSerialNo": "CSN_d39k83j65irjk3r3543g", "BaseResp": { "StatusMessage": "", "StatusCode": 0, "HTTPStatusCode": 0, "ErrorCode": "" } } }
09-25
内容概要:本文详细介绍了“秒杀商城”微服务架构的设计与实战全过程,涵盖系统从需求分析、服务拆分、技术选型到核心功能开发、分布式事务处理、容器化部署及监控链路追踪的完整流程。重点解决了高并发场景下的超卖问题,采用Redis预减库存、消息队列削峰、数据库乐观锁等手段保障数据一致性,并通过Nacos实现服务注册发现与配置管理,利用Seata处理跨服务分布式事务,结合RabbitMQ实现异步下单,提升系统吞吐能力。同时,项目支持Docker Compose快速部署和Kubernetes生产级编排,集成Sleuth+Zipkin链路追踪与Prometheus+Grafana监控体系,构建可观测性强的微服务系统。; 适合人群:具备Java基础和Spring Boot开发经验,熟悉微服务基本概念的中高级研发人员,尤其是希望深入理解高并发系统设计、分布式事务、服务治理等核心技术的开发者;适合工作2-5年、有志于转型微服务或提升架构能力的工程师; 使用场景及目标:①学习如何基于Spring Cloud Alibaba构建完整的微服务项目;②掌握秒杀场景下高并发、超卖控制、异步化、削峰填谷等关键技术方案;③实践分布式事务(Seata)、服务熔断降级、链路追踪、统一配置中心等企业级中间件的应用;④完成从本地开发到容器化部署的全流程落地; 阅读建议:建议按照文档提供的七个阶段循序渐进地动手实践,重点关注秒杀流程设计、服务间通信机制、分布式事务实现和系统性能优化部分,结合代码调试与监控工具深入理解各组件协作原理,真正掌握高并发微服务系统的构建能力。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值