模拟target_action的内部实现

在给button添加事件的时候,都知道使用button addTarget:<#(nullable id)#> action:<#(nonnull SEL)#> forControlEvents:<#(UIControlEvents)#>这种方法,但是它的内部是怎样实现的呢?今天就让我们一探究竟


def step(self, action, target=False, disturbance=False, base=None, target_set=(20, 1), disturbance_set=(200, 50, 0.3)): """ 完成一个采用周期的动作转移,并输出奖励 s(k+1) = step( s(k), a(k) ) params: action is the opt inc return: state(K+1), reward, terminated, truncated, info 采用欧拉法仿真,仿真时间为 samplet 设置一个退出条件, 仿真结束后,系统进入新的稳定状态 """ # 系统前进 samplet 秒 # 叠加上控制偏置量 # self.ubias += action[0] self.ubias = 0 self.virtual_target = action[0] for tick in np.arange(self.simT, self.sampleT+self.simT, self.simT): t = tick + self.time_now if t >= target_set[0] and target: # 加入目标值 self.y_target = target_set[1] self.virtual_target = self.y_target*(han_trans(t - target_set[0], 80)) e_k = self.virtual_target - self.y_k x1_k1 = -3.6 * e_k * self.simT + self.x1_k # for x1(k+1) x2_k1 = (850 * e_k - self.x2_k) * self.simT + self.x2_k # for x2(k+1) x3_k1 = (-1/15.4 * self.x3_k + 1/15.4 * self.u_k * (-0.0139)) * self.simT + self.x3_k # for x3(k+1) x4_k1 = (-1/15.4 * self.x4_k + 1/15.4 * self.x3_k) * self.simT + self.x4_k # for x4(k+1) # 计算迟延环节输出 y_k1 = self.x4_buffer[-1] # add disturbance 0.3 in the time range(450,500) if (t >= disturbance_set[0]) and (t <= (disturbance_set[0] + disturbance_set[1])) and False: y_k1 += disturbance_set[2] # 更新 控制量 e_k1 = self.virtual_target - y_k1 um_k1 = x1_k1 + x2_k1 - 960 * e_k1 # PID k 时刻输出的控制指令 960 u_k1 = rate_limiter(um_k1 + self.ubias, self.u_k, T=self.simT, up=20, down=-20) # 状态转移 self.x1_k, self.x2_k, self.x3_k, self.x4_k = x1_k1, x2_k1, x3_k1, x4_k1 self.u_k = u_k1 self.y_k = y_k1 self.e_k = e_k1 self.x4_buffer[1:] = self.x4_buffer[0:-1] self.x4_buffer[0] = x4_k1 # 保存仿真数据 self.y_save[self.index] = y_k1 # 从 0.1s 开始 self.u_save[self.index] = u_k1 self.um_save[self.index] = um_k1 self.ubias_save[self.index] = self.ubias # 一个辅助控制器的采用周期才改变一次 self.y_target_save[self.index] = self.y_target # y_target 可能在过程中改变 self.virtual_target_save[self.index] = self.virtual_target # y_target 可能在过程中改变 self.index += 1 解释代码(详细)
03-29
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import rospy import actionlib import cv2 import pytesseract from std_msgs.msg import String from geometry_msgs.msg import PoseStamped from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal from sensor_msgs.msg import Image from cv_bridge import CvBridge from my_control.msg import simulation,simulation_result # 自定义消息 class SimulationNode: def __init__(self): rospy.init_node('simulation_node') # 初始化参数 self.target_class = None self.current_room = None self.room_results = {} # 存储房间检测结果 # 房间坐标映射 (根据实际仿真环境配置) self.room_coordinates = { 'A': (1.0, 2.0, 0.0, 1.0), 'B': (3.0, 4.0, 0.0, 1.0), 'C': (5.0, 6.0, 0.0, 1.0) } # ROS通信设置 self.qr_sub = rospy.Subscriber('/qr_result', String, self.qr_callback) self.image_sub = rospy.Subscriber('/camera/image_raw', Image, self.image_callback) self.result_pub = rospy.Publisher('/simulation_result', SimulationResult, queue_size=10) # 导航客户端 self.move_base_client = actionlib.SimpleActionClient('move_base', MoveBaseAction) rospy.loginfo("等待move_base服务器...") self.move_base_client.wait_for_server() rospy.loginfo("已连接move_base服务器") # 图像处理工具 self.bridge = CvBridge() rospy.loginfo("仿真节点已启动,等待二维码识别结果...") def qr_callback(self, msg): """处理二维码识别结果""" rospy.loginfo(f"收到二维码结果: {msg.data}") # 验证并存储目标类别 valid_classes = ['fruit', 'vegetable', 'dessert'] if msg.data.lower() in valid_classes: self.target_class = msg.data.lower() rospy.loginfo(f"开始搜索: {self.target_class}") self.search_rooms() else: rospy.logwarn(f"无效的类别: {msg.data}") def search_rooms(self): """按顺序搜索房间""" rooms = ['A', 'B', 'C'] for room in rooms: rospy.loginfo(f"导航到房间 {room}") self.current_room = room self.navigate_to_room(room) # 等待到达并检测 rospy.sleep(2) # 实际应用中应使用导航状态回调 rospy.loginfo(f"在房间 {room} 检测标定板...") rospy.sleep(3) # 给检测留出时间 # 所有房间检测完成后发布结果 self.publish_final_result() def navigate_to_room(self, room_id): """导航到指定房间""" if room_id not in self.room_coordinates: rospy.logerr(f"无效的房间ID: {room_id}") return x, y, z, w = self.room_coordinates[room_id] goal = MoveBaseGoal() goal.target_pose.header.frame_id = "map" goal.target_pose.header.stamp = rospy.Time.now() goal.target_pose.pose.position.x = x goal.target_pose.pose.position.y = y goal.target_pose.pose.orientation.w = w self.move_base_client.send_goal(goal) self.move_base_client.wait_for_result() def image_callback(self, msg): """处理摄像头图像""" if not self.target_class or not self.current_room: return try: cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8") detected_class = self.detect_calibration_board(cv_image) if detected_class == self.target_class: rospy.loginfo(f"在房间 {self.current_room} 检测到目标: {detected_class}") self.room_results[self.current_room] = detected_class except Exception as e: rospy.logerr(f"图像处理错误: {str(e)}") def detect_calibration_board(self, image): """检测标定板上的文字类别""" # 图像预处理 gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY) # 使用OCR识别文字 text = pytesseract.image_to_string(thresh).strip().lower() # 返回识别到的类别 valid_classes = ['fruit', 'vegetable', 'dessert'] for cls in valid_classes: if cls in text: return cls return "unknown" def publish_final_result(self): """发布最终检测结果""" result_msg = SimulationResult() # 查找包含目标的房间 target_room = "N/A" for room, cls in self.room_results.items(): if cls == self.target_class: target_room = room break result_msg.room = target_room result_msg.myclass = self.target_class self.result_pub.publish(result_msg) rospy.loginfo(f"发布结果: 房间 {target_room}, 类别 {self.target_class}") # 重置状态 self.target_class = None self.current_room = None self.room_results = {} if __name__ == '__main__': try: node = SimulationNode() rospy.spin() except rospy.ROSInterruptException: pass 根据rosbridge通信要求,再写一遍这个代码
最新发布
07-31
我将这个方法步骤一用appium工具中可以替换的雨具替换了,请帮我替换步骤2的语句,保持原功能存在,步骤4返回最大匹配位置,不用记录了,步骤5点击,长按操作请帮我替换appium中支持的语句,方法如下: def touchImage(ad, src_path, duration=0.1, similar=0.95, timeout=3, loc=5, offsetX=0, offsetY=0, debug=False): attempt = 0 # 循环计数器(原Ooooo0Oo00oO0) while attempt < timeout: # 在超时时间内循环尝试 # 步骤1:获取当前屏幕截图路径 # screenshot_path = ad.take_picture("test", ".png", isOrigin=True) screenshot_dir = os.path.join(os.getcwd(), "resource/superapp") os.makedirs(screenshot_dir, exist_ok=True) current_screenshot_path = os.path.join(screenshot_dir, "test.png") # 步骤2:获取目标图片绝对路径并校验存在性 target_path = getResourcePath(src_path) if target_path is None: raise DoesNotExistError(f"TOUCH_IMAGE: {src_path} 不存在!") # 步骤3:校验截图文件有效性 if not os.path.exists(current_screenshot_path) or not os.path.isfile(current_screenshot_path): print(f"错误:当前截图 {current_screenshot_path} 无效!") return False # 关键修改:校验失败提前返回 attempt += 1 # 计数+1 # 步骤4:图像匹配(返回匹配率和位置) match_rate, target_pos = matchImageLocation(screenshot_path, target_path) ad.log.info(f"最大匹配位置: {target_pos}") # 步骤5:匹配率达标则执行触控 if match_rate >= similar: x, y = target_pos[0] + offsetX, target_pos[1] + offsetY # 应用偏移量 if duration <= 0.1: # 点击操作(坐标x,y) ad.adb.shell(f"input tap {x} {y}") else: # 长按操作(坐标x,y,时长转换为毫秒) ad.adb.shell(f"input swipe {x} {y} {x} {y} {int(1000 * duration)}") return True # 操作成功 time.sleep(1) # 未匹配则等待1秒重试 # 超时未匹配 print("图像匹配率过低,操作失败") return False
07-19
一:#!/usr/bin/env python from actionlib.action_client import GoalManager import rospy from geometry_msgs.msg import Twist import actionlib from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal import re import os import time def send_goal(goal_number, goal): client = actionlib.SimpleActionClient('move_base', MoveBaseAction) client.wait_for_server() goal.target_pose.header.frame_id = "map" goal.target_pose.header.stamp = rospy.Time.now() client.send_goal(goal) str_log = "Send NO. %s Goal !!!" % str(goal_number) rospy.loginfo(str_log) wait = client.wait_for_result(rospy.Duration.from_sec(60.0)) if not wait: str_log = "The NO. %s Goal Planning Failed for some reasons" % str(goal_number) rospy.loginfo(str_log) else: str_log = "The NO. %s Goal achieved success !!!" % str(goal_number) rospy.loginfo(str_log) def read_goal(filename): goal = MoveBaseGoal() file_to_read = open(filename) index = 0 for line in file_to_read.readlines(): line = line.strip() index += 1 if index == 2: pattern = re.compile(r"(?<=\[).*?(?=\])") query = pattern.search(line) listFromLine = query.group().split(',') goal.target_pose.pose.position.x = float(listFromLine[0]) goal.target_pose.pose.position.y = float(listFromLine[1]) if index == 3: pattern = re.compile(r"(?<=\[).*?(?=\])") query = pattern.search(line) listFromLine = query.group().split(',') goal.target_pose.pose.orientation.z = float(listFromLine[2]) goal.target_pose.pose.orientation.w = float(listFromLine[3]) print(goal.target_pose.pose) return goal def pick_aruco(): os.system("python /home/robuster/beetle_ai/scripts/3pick.py") def place(): os.system("python /home/robuster/beetle_ai/scripts/place_2.py") def reach_test1(): os.system("python /home/robuster/beetle_ai/scripts/reach_place1.py") def test_2(): os.system("python /home/robuster/beetle_ai/scripts/2test_2.py") def test_1(): os.system("python /home/robuster/beetle_ai/scripts/2test_1.py") def test(): os.system("python /home/robuster/beetle_ai/scripts/2test.py") def test_3(): os.system("python /home/robuster/beetle_ai/scripts/2test3.py") def forward(): print("forward...") move_cmd = Twist() pub = rospy.Publisher('cmd_vel', Twist, queue_size=1) count = 15 rate = rospy.Rate(count) # 20hz while count > 0: move_cmd.linear.x = 0.1 pub.publish(move_cmd) rate.sleep() print("forward...") count -= 1 def backward(): print("backward...") move_cmd = Twist() pub = rospy.Publisher('cmd_vel', Twist, queue_size=1) count = 5 rate = rospy.Rate(count) # 20hz while count > 0: move_cmd.linear.x = -0.1 pub.publish(move_cmd) rate.sleep() print("forward...") count -= 1 def moblie_fetch_demo(): goal1 = read_goal("goal_1.txt") goal2 = read_goal("goal_2.txt") # pick cube from goal1 goal_number = 1 reach_test1() pick_aruco() test() pick_aruco() test_1() pick_aruco() test_2() pick_aruco() test_3() # pick cube from goal2 goal_number = 2 send_goal(goal_number, goal2) return "Mission Finished." if __name__ == '__main__': rospy.init_node('send_goals_python', anonymous=True) result = moblie_fetch_demo() rospy.loginfo(result) 二: def send_goal(goal): client = actionlib.SimpleActionClient('move_base', MoveBaseAction) client.wait_for_server() goal.target_pose.header.frame_id = "map" goal.target_pose.header.stamp = rospy.Time.now() client.send_goal(goal) client.wait_for_result() return client.get_state() == actionlib.GoalStatus.SUCCEEDED def read_goal(filename): goal = MoveBaseGoal() file_to_read = open(filename) index = 0 for line in file_to_read.readlines(): line = line.strip() index += 1 if index == 2: pattern = re.compile(r"(?<=\[).*?(?=\])") query = pattern.search(line) listFromLine = query.group().split(',') goal.target_pose.pose.position.x = float(listFromLine[0]) goal.target_pose.pose.position.y = float(listFromLine[1]) if index == 3: pattern = re.compile(r"(?<=\[).*?(?=\])") query = pattern.search(line) listFromLine = query.group().split(',') goal.target_pose.pose.orientation.z = float(listFromLine[2]) goal.target_pose.pose.orientation.w = float(listFromLine[3]) print(goal.target_pose.pose) return goal def simple_mobile_navigation(): goal1 = read_goal("goal_1.txt") goal2 = read_goal("goal_2.txt") if not goal1 or not goal2: return "stop" if send_goal(goal1): rospy.loginfo("goal1") else: rospy.logwarn("stop") if __name__ == '__main__': rospy.init_node('simple_navigation_node') result = simple_mobile_navigation() rospy.loginfo(result)而且需要在一代码不改变情况下合并
06-06
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值