关于上面的问题,有没有可能是我运行代码的问题,如果有问题,那大概是哪一个部分出了问题import argparse
import csv
import os
import platform
import sys
import time
from pathlib import Path
import torch
import serial
import cv2
import rospy
from std_srvs.srv import Trigger, TriggerResponse
from ultralytics.utils.plotting import Annotator, colors, save_one_box
from models.common import DetectMultiBackend
from utils.dataloaders import LoadStreams
from utils.general import (
LOGGER,
Profile,
check_img_size,
increment_path,
non_max_suppression,
scale_boxes,
xyxy2xywh,
)
from utils.torch_utils import select_device, smart_inference_mode
@smart_inference_mode()
class GunControllerNode:
def __init__(self, opt):
self.opt = opt
self.save_img = not opt.nosave
self.source = str(opt.source)
self.project = opt.project
self.name = opt.name
self.exist_ok = opt.exist_ok
# 初始化变量以跟踪射击状态
self.gun_state = "idle" # idle, firing, cooling
self.fire_start_time = 0
self.dt = [Profile(), Profile(), Profile()] # 初始化计时器
# 初始化串口连接
try:
self.ser = serial.Serial(opt.port, opt.baudrate, timeout=1)
LOGGER.info(f"Successfully opened serial port {opt.port} at {opt.baudrate} baudrate")
except Exception as e:
LOGGER.error(f"Failed to open serial port {opt.port}: {e}")
self.ser = None
# 初始化云台角度
self.servo1_angle = 90 # 水平云台 (0-180)
self.servo2_angle = 90 # 垂直云台 (70-110)
# 跟踪调整状态
self.is_adjusting = False
self.adjustment_complete_time = 0
self.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)
(self.save_dir / "labels" if opt.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)
self.device = select_device(opt.device)
self.model = DetectMultiBackend(opt.weights, device=self.device, dnn=opt.dnn, data=opt.data, fp16=opt.half)
self.stride, self.names, self.pt = self.model.stride, self.model.names, self.model.pt
self.imgsz = check_img_size(opt.imgsz, s=self.stride)
self.dataset = LoadStreams(self.source, img_size=self.imgsz, stride=self.stride, auto=self.pt)
self.vid_path, self.vid_writer = [None] * len(self.dataset), [None] * len(self.dataset)
self.model.warmup(imgsz=(1 if self.pt or self.model.triton else len(self.dataset), 3, *self.imgsz))
self.gun_service = rospy.Service('/gun', Trigger, self.gun_callback)
rospy.init_node('gun_controller', anonymous=True)
self.view_img = opt.view_img
self.windows = []
def send_servo_command(self, servo_id, angle):
if self.ser is not None:
try:
cmd = bytes([0xAA, 0xBB, servo_id, angle, 0x0D, 0x0A])
self.ser.write(cmd)
return True
except Exception as e:
LOGGER.error(f"Failed to send servo command: {e}")
return False
return False
def send_gun_command(self, is_fire):
if self.ser is not None:
try:
if is_fire:
cmd = bytes([0xAA, 0xBB, 0x03, 0x01, 0x0D, 0x0A])
else:
cmd = bytes([0xAA, 0xBB, 0x03, 0x00, 0x0D, 0x0A])
self.ser.write(cmd)
return True
except Exception as e:
LOGGER.error(f"Failed to send gun command: {e}")
return False
return False
def gun_callback(self, req):
response = TriggerResponse()
try:
for path, im, im0s, vid_cap, s in self.dataset:
with self.dt[0]:
im = torch.from_numpy(im).to(self.model.device)
im = im.half() if self.model.fp16 else im.float()
im /= 255
if len(im.shape) == 3:
im = im[None]
with self.dt[1]:
pred = self.model(im, augment=self.opt.augment, visualize=self.opt.visualize)
with self.dt[2]:
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, self.opt.classes, self.opt.agnostic_nms, max_det=self.opt.max_det)
for i, det in enumerate(pred):
p, im0, frame = path[i], im0s[i].copy(), self.dataset.count
p = Path(p)
save_path = str(self.save_dir / p.name)
txt_path = str(self.save_dir / "labels" / p.stem) + ("" if self.dataset.mode == "image" else f"_{frame}")
s += f"{im.shape[2]}x{im.shape[3]} "
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]
annotator = Annotator(im0, line_width=self.opt.line_thickness, example=str(self.names))
if det is not None and len(det):
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
for c in det[:, 5].unique():
n = (det[:, 5] == c).sum()
s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, "
max_area = 0
selected_det = None
for *xyxy, conf, cls in reversed(det):
area = (xyxy[2] - xyxy[0]) * (xyxy[3] - xyxy[1])
if area > max_area:
max_area = area
selected_det = (xyxy, conf, cls)
if selected_det is not None:
xyxy, conf, cls = selected_det
x_center = float((xyxy[0] + xyxy[2]) / 2)
y_center = float((xyxy[1] + xyxy[3]) / 2)
cv2.circle(im0, (int(x_center), int(y_center)), 5, (0, 0, 255), -1)
H, W = im0.shape[:2]
center_x, center_y = W // 2, H // 2
threshold_x = W * 0.05
threshold_y = H * 0.05
CAMERA_OFFSET_ANGLE = -5 # 相机安装导致的固定向右偏移1°
pixels_per_degree = W / 60 # 假设水平视场角约60度,计算每度对应的像素值
fixed_offset_pixels = int(CAMERA_OFFSET_ANGLE * pixels_per_degree) # 计算像素偏移量
# 在目标处理部分修改(替换原来的中心线计算)
adjusted_center_x = center_x + fixed_offset_pixels # 补偿后的中心位置
cv2.line(im0, (adjusted_center_x, 0), (adjusted_center_x, H), (255, 0, 0), 1) # 用蓝色显示补偿中心线
# 修改目标偏差计算(替换原来的dx计算)
dx = x_center - adjusted_center_x # 使用补偿后的中心位置计算偏差
dy = y_center - center_y # 垂直方向保持不变
needs_adjustment = abs(dx) > threshold_x * 0.5 or abs(dy) > threshold_y * 0.5
if needs_adjustment:
self.is_adjusting = True
if self.gun_state == "firing":
self.gun_state = "idle"
self.send_gun_command(False)
LOGGER.info("Aborting fire due to target movement")
if abs(dx) > threshold_x:
if dx < 0:
self.servo1_angle += 2
else:
self.servo1_angle -= 2
self.servo1_angle = max(0, min(180, self.servo1_angle))
self.send_servo_command(0x01, self.servo1_angle)
elif abs(dx) > threshold_x * 0.5:
if dx < 0:
self.servo1_angle += 1
else:
self.servo1_angle -= 1
self.servo1_angle = max(0, min(180, self.servo1_angle))
self.send_servo_command(0x01, self.servo1_angle)
if abs(dy) > threshold_y:
if dy < 0:
self.servo2_angle += 2
else:
self.servo2_angle -= 2
self.servo2_angle = max(70, min(110, self.servo2_angle))
self.send_servo_command(0x02, self.servo2_angle)
elif abs(dy) > threshold_y * 0.5:
if dy < 0:
self.servo2_angle += 1
else:
self.servo2_angle -= 1
self.servo2_angle = max(70, min(110, self.servo2_angle))
self.send_servo_command(0x02, self.servo2_angle)
LOGGER.info(f"Adjusting: dx={dx:.1f}, dy={dy:.1f} | Servo angles: {self.servo1_angle}°, {self.servo2_angle}°")
else:
if self.is_adjusting:
self.adjustment_complete_time = time.time()
self.is_adjusting = False
LOGGER.info("Adjustment completed, waiting 2 seconds before firing")
if self.gun_state == "idle" and not self.is_adjusting and time.time() - self.adjustment_complete_time >= 2:
if self.send_gun_command(True):
self.fire_start_time = time.time()
self.gun_state = "firing"
LOGGER.info("Firing started")
if self.gun_state == "firing" and time.time() - self.fire_start_time >= 2:
if self.send_gun_command(False):
self.gun_state = "cooling"
LOGGER.info("Firing stopped, entering 3-second cooling period")
if self.gun_state == "cooling" and time.time() - self.fire_start_time >= 5:
self.gun_state = "idle"
LOGGER.info("Cooling completed, ready to fire again")
else:
if self.gun_state in ["firing", "cooling"]:
self.send_gun_command(False)
self.gun_state = "idle"
LOGGER.info("No target detected, gun turned off")
response.success = False
response.message = "No target detected, gun turned off"
return response
im0 = annotator.result()
if self.view_img:
if platform.system() == "Linux" and p not in self.windows:
self.windows.append(p)
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
cv2.imshow(str(p), im0)
cv2.waitKey(1)
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{self.dt[1].dt * 1e3:.1f}ms")
if self.ser is not None:
self.ser.close()
response.success = True
response.message = "Gun operation completed successfully"
except Exception as e:
LOGGER.error(f"Error during gun operation: {e}")
response.success = False
response.message = f"Error during gun operation: {e}"
return response
def run(self):
rospy.spin()
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument("--weights", nargs="+", type=str, default=Path("target.pt"), help="model path or triton URL")
parser.add_argument("--source", type=str, default=Path("0"), help="file/dir/URL/glob/screen/0(webcam)")
parser.add_argument("--data", type=str, default=Path("data/coco128.yaml"), help="(optional) dataset.yaml path")
parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640], help="inference size h,w")
parser.add_argument("--conf-thres", type=float, default=0.25, help="confidence threshold")
parser.add_argument("--iou-thres", type=float, default=0.25, help="NMS IoU threshold")
parser.add_argument("--max-det", type=int, default=1000, help="maximum detections per image")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--view-img", action="store_true", help="show results")
parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels")
parser.add_argument("--save-crop", action="store_true", help="save cropped prediction boxes")
parser.add_argument("--nosave", action="store_true", help="do not save images/videos")
parser.add_argument("--classes", nargs="+", type=int, help="filter by class: --classes 0, or --classes 0 2 3")
parser.add_argument("--agnostic-nms", action="store_true", help="class-agnostic NMS")
parser.add_argument("--augment", action="store_true", help="augmented inference")
parser.add_argument("--visualize", action="store_true", help="visualize features")
parser.add_argument("--update", action="store_true", help="update all models")
parser.add_argument("--project", default=Path("runs/detect"), help="save results to project/name")
parser.add_argument("--name", default="exp", help="save results to project/name")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
parser.add_argument("--line-thickness", default=3, type=int, help="bounding box thickness (pixels)")
parser.add_argument("--hide-labels", default=False, action="store_true", help="hide labels")
parser.add_argument("--hide-conf", default=False, action="store_true", help="hide confidences")
parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
parser.add_argument("--port", type=str, default="/dev/gun", help="serial port for servo control")
parser.add_argument("--baudrate", type=int, default=115200, help="baud rate for serial communication")
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1
return opt
def main(opt):
controller = GunControllerNode(opt)
controller.run()
if __name__ == "__main__":
opt = parse_opt()
main(opt)
最新发布