TX2通过opencv调用mipi, usb, ipc三种相机(python)

最近由于项目需要使用tx2并调用相机,在Nvidia技术论坛上找到了相关说明与代码,整理以下以便后续使用。

项目中需要对三种相机(mipi相机(猎豹), usb相机和网络相机)的性能进行对比,在延迟方面,测试发现usb相机较好而mipi与ipc测试结果比较接近。但在测试中发现不同品牌的摄像头延迟差距很大,从摄像头种类上直接判断性能并不准确。

网上很多教程的第一步都是删除jetpack自动安装的opencv并重新安装(打开gstreamer),但我使用jetpack4.2安装opencv后gstreamer已经是打开的(R32),并不需要重新安装。可以通过程序(C++)

std::cout << cv::getBuildInformation() << std::endl;

输出opencv版本及判断是否启用gstreaemer

网上给出了调用三种摄像头的通用python代码如下(链接https://gist.github.com/jkjung-avt/86b60a7723b97da19f7bfa3cb7d2690e)

import sys
import argparse
import subprocess

import cv2


WINDOW_NAME = 'CameraDemo'


def parse_args():
    # Parse input arguments
    desc = 'Capture and display live camera video on Jetson TX2/TX1'
    parser = argparse.ArgumentParser(description=desc)
    parser.add_argument('--rtsp', dest='use_rtsp',
                        help='use IP CAM (remember to also set --uri)',
                        action='store_true')
    parser.add_argument('--uri', dest='rtsp_uri',
                        help='RTSP URI, e.g. rtsp://192.168.1.64:554',
                        default=None, type=str)
    parser.add_argument('--latency', dest='rtsp_latency',
                        help='latency in ms for RTSP [200]',
                        default=200, type=int)
    parser.add_argument('--usb', dest='use_usb',
                        help='use USB webcam (remember to also set --vid)',
                        action='store_true')
    parser.add_argument('--vid', dest='video_dev',
                        help='device # of USB webcam (/dev/video?) [1]',
                        default=1, type=int)
    parser.add_argument('--width', dest='image_width',
                        help='image width [1920]',
                        default=1920, type=int)
    parser.add_argument('--height', dest='image_height',
                        help='image height [1080]',
                        default=1080, type=int)
    args = parser.parse_args()
    return args


def open_cam_rtsp(uri, width, height, latency):
    gst_str = ('rtspsrc location={} latency={} ! '
               'rtph264depay ! h264parse ! omxh264dec ! '
               'nvvidconv ! '
               'video/x-raw, width=(int){}, height=(int){}, '
               'format=(string)BGRx ! '
               'videoconvert ! appsink').format(uri, latency, width, height)
    return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)


def open_cam_usb(dev, width, height):
    # We want to set width and height here, otherwise we could just do:
    #     return cv2.VideoCapture(dev)
    gst_str = ('v4l2src device=/dev/video{} ! '
               'video/x-raw, width=(int){}, height=(int){} ! '
               'videoconvert ! appsink').format(dev, width, height)
    return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)


def open_cam_onboard(width, height):
    gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
    if 'nvcamerasrc' in gst_elements:
        # On versions of L4T prior to 28.1, add 'flip-method=2' into gst_str
        gst_str = ('nvcamerasrc ! '
                   'video/x-raw(memory:NVMM), '
                   'width=(int)2592, height=(int)1458, '
                   'format=(string)I420, framerate=(fraction)30/1 ! '
                   'nvvidconv ! '
                   'video/x-raw, width=(int){}, height=(int){}, '
                   'format=(string)BGRx ! '
                   'videoconvert ! appsink').format(width, height)
    elif 'nvarguscamerasrc' in gst_elements:
        gst_str = ('nvarguscamerasrc ! '
                   'video/x-raw(memory:NVMM), '
                   'width=(int)1920, height=(int)1080, '
                   'format=(string)NV12, framerate=(fraction)30/1 ! '
                   'nvvidconv flip-method=2 ! '
                   'video/x-raw, width=(int){}, height=(int){}, '
                   'format=(string)BGRx ! '
                   'videoconvert ! appsink').format(width, height)
    else:
        raise RuntimeError('onboard camera source not found!')
    return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)


def open_window(width, height):
    cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(WINDOW_NAME, width, height)
    cv2.moveWindow(WINDOW_NAME, 0, 0)
    cv2.setWindowTitle(WINDOW_NAME, 'Camera Demo for Jetson TX2/TX1')


def read_cam(cap):
    show_help = True
    full_scrn = False
    help_text = '"Esc" to Quit, "H" for Help, "F" to Toggle Fullscreen'
    font = cv2.FONT_HERSHEY_PLAIN
    while True:
        if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
            # Check to see if the user has closed the window
            # If yes, terminate the program
            break
        _, img = cap.read() # grab the next image frame from camera
        if show_help:
            cv2.putText(img, help_text, (11, 20), font,
                        1.0, (32, 32, 32), 4, cv2.LINE_AA)
            cv2.putText(img, help_text, (10, 20), font,
                        1.0, (240, 240, 240), 1, cv2.LINE_AA)
        cv2.imshow(WINDOW_NAME, img)
        key = cv2.waitKey(10)
        if key == 27: # ESC key: quit program
            break
        elif key == ord('H') or key == ord('h'): # toggle help message
            show_help = not show_help
        elif key == ord('F') or key == ord('f'): # toggle fullscreen
            full_scrn = not full_scrn
            if full_scrn:
                cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
                                      cv2.WINDOW_FULLSCREEN)
            else:
                cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
                                      cv2.WINDOW_NORMAL)


def main():
    args = parse_args()
    print('Called with args:')
    print(args)
    print('OpenCV version: {}'.format(cv2.__version__))

    if args.use_rtsp:
        cap = open_cam_rtsp(args.rtsp_uri,
                            args.image_width,
                            args.image_height,
                            args.rtsp_latency)
    elif args.use_usb:
        cap = open_cam_usb(args.video_dev,
                           args.image_width,
                           args.image_height)
    else: # by default, use the Jetson onboard camera
        cap = open_cam_onboard(args.image_width,
                               args.image_height)

    if not cap.isOpened():
        sys.exit('Failed to open camera!')

    open_window(args.image_width, args.image_height)
    read_cam(cap)

    cap.release()
    cv2.destroyAllWindows()


if __name__ == '__main__':
    main()

运行方式

板载相机(mipi猎豹)

python3 tegra-cam.py

usb相机(分辨率1280*720, vid 1表示/ dev / video1)

python3 tegra-cam.py --usb --vid 1 --width 1280 --height 720

ipc(替换url)

python3 tegra-cam.py --rtsp --uri rtsp://admin:XXXXXX@192.168.1.64:554

 

Python中,OpenCV (Open Source Computer Vision Library) 可以配合 GStreamer (一种跨平台多媒体框架) 来处理视频流。GStreamer允许你在多种协议、文件格式以及硬件设备间流畅地传递数据,包括网络摄像头、本地视频文件等。 下面是一个简单的示例,演示了如何使用OpenCV和GStreamer从视频源读取视频流: ```python import cv2 import gi gi.require_version('Gst', '1.0') from gi.repository import Gst, GObject # 初始化GStreamer上下文 GObject.threads_init() Gst.init(None) def on_new_sample(sink): sample = sink.emit("pull-sample") if not sample: return False # 获取样本的数据 buffer = sample.get_buffer() # 解析数据为图像 caps = sample.get_caps() width, height, format = caps.get_structure(0).get_ints("width", "height", "format") img_data = buffer.extract_dup(0, buffer.get_size()) # 转换数据为OpenCV可以使用的BGR格式 img_array = np.frombuffer(img_data, dtype=np.uint8) img_array = img_array.reshape((height, width, 3)) # BGR order # 显示图像 cv2.imshow("Video Stream", img_array) # 如果按下q键退出循环 if cv2.waitKey(1) & 0xFF == ord('q'): break # 创建播放器 pipeline pipeline_str = "v4l2src device=/dev/video0 ! video/x-raw,format=BGR ! omxh264dec ! videoconvert ! appsink" player = Gst.parse_launch(pipeline_str) sink = player.get_by_name("appsink") # 连接信号处理器 bus = player.get_bus() bus.add_signal_watch() bus.connect("message", on_new_sample) # 启动播放 player.set_state(Gst.State.PLAYING) try: # 等待播放完成 while player.query_state(Gst.QueryFlags.GET_STATE) == Gst.State.PLAYING: GObject.main_iteration() except: pass # 关闭和释放资源 player.set_state(Gst.State.NULL) bus.remove_signal_watch() cv2.destroyAllWindows() player.stop() player.release() ``` 这个脚本首先初始化GStreamer,然后创建一个从/dev/video0读取的视频源,通过H.264解码,转换格式并发送到appsink。`on_new_sample`函数会接收每帧新的数据,并将其显示在OpenCV窗口中。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值