经过这几天的验证,终于走通了FFmpeg读取USB摄像头H264帧,然后用rtmp推流。使用的版本是4.0.2,网上的示例要么是命令形式的,要么是读取YUV格式的数据,然后在编码的,所以只能自己摸索了。
FFmpeg的源码在ubuntu16.04上的编译就不说了,这个网上的文章很多,这里我要说的是,好像FFmpeg对v4l2的封装,不能从摄像头多种输出格式数据中,选择
V4L2_PIX_FMT_H264
这种格式的数据输出,只能是默认的输出,这点还没有研究明白。
没办法只能写v4l2的操作,将数据保存到内存中,在用FFmpeg从内存中读取数据,最后用rtmp推流。
这里要非常感谢 雷神的两篇博客:
可以说没有这两篇博客,我还要摸索很久,可惜他不在世了;他的贡献现在还在让我们受益。
代码
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <fcntl.h>
#include <malloc.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/poll.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#include <libavutil/time.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#define DEV_TYPE "video4linux2"
#define DEV_NAME "/dev/video1"
#define MAX_CHANNEL (4)
#define AV_IO_BUF_SIZE (96*1024)
#define CLEAR(x) memset(&(x), 0, sizeof(x))
struct buffer {
void *start;
size_t length;
};
struct usbcamera_node
{
int channel;
char id[32];
int usb_port;
//V4L2
char devname[32];
int fd;
struct v4l2_format fmt;
struct v4l2_streamparm parm;
struct v4l2_requestbuffers req;
struct buffer *buffers;
int n_buffers;
int poll_index[MAX_CHANNEL];
};
struct usbcamera_node usbcamra;
struct pollfd usbcamra_poll_fd[MAX_CHANNEL];
nfds_t usbcamra_poll_fd_num = 0;
unsigned int frame_len = 0;
unsigned int frame_cnt = 0;
int avError(int errNum);
static int xioctl(int fh, int request, void *arg)
{
int r;
do
{
r = ioctl(fh, request, arg);
} while (-1 == r && EINTR == errno);
return r;
}
static int video_init(struct usbcamera_node *camera_node)
{
struct v4l2_capability cap;
struct v4l2_fmtdesc fmtdesc;
int ret = 0;
// open the video device with the API of open()
camera_node->fd = open(camera_node->devname, O_RDWR | O_NONBLOCK, 0);
if (-1 == camera_node->fd)
{
fprintf(stderr, "Cannot open '%s': %d, %s\n", camera_node->devname, errno, strerror(errno));
return -1;
}
// inquire video device capability with the API of ioctl
if (-1 == xioctl(camera_node->fd, VIDIOC_QUERYCAP, &cap))
{
fprintf(stderr, "%s is no V4L2 device\n", camera_node->devname);
return -1;
}
// Set video device settings
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))
{
fprintf(stderr, "%s is no video capture device\n", camera_node->devname);
return -1;
}
if (!(cap.capabilities & V4L2_CAP_STREAMING))
{
fprintf(stderr, "%s does not support streaming i/o\n", camera_node->devname);
return -1;
}
printf("\nVIDOOC_QUERYCAP\n");
printf("the camera driver is: %s\n", cap.driver);
printf("the camera card is: %s\n", cap.card);
printf("the camera bus info is: %s\n", cap.bus_info);
printf("the version is: %d\n", cap.version);
printf("the capabilities is: 0x%x\n", cap.capabilities);
printf("the device_caps is: 0x%x\n", cap.device_caps);
fmtdesc.index = 0; //form number
fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;//frame type
while(ioctl(camera_node->fd, VIDIOC_ENUM_FMT, &fmtdesc) != -1)
{
printf("VIDIOC_ENUM_FMT success! fmtdesc.index:%d, fmtdesc.type:%d, fmtdesc.flags:%d, "
"fmtdesc.description:%s, fmtdesc.pixelformat:%d\n",
fmtdesc.index, fmtdesc.type, fmtdesc.flags, fmtdesc.description, fmtdesc.pixelformat);
fmtdesc.index ++;
}
if (-1 == xioctl(camera_node->fd, VIDIOC_S_FMT, &camera_node->fmt))
{
fprintf(stderr, "%s set fmt failed\n", camera_node->devname);
return -1;
}
printf("VIDIOC_S_FMT success! width:%d, height:%d, pixelformat:%x, field:%d, bytesperline:%d, "
"sizeimage:%d, colorspace:%d, priv:%d, flags:%x, ycbcr_enc:%d, quantization:%d, xfer_func:%d\n",
camera_node->fmt.fmt.pix.width, camera_node->fmt.fmt.pix.height, camera_node->fmt.fmt.pix.pixelformat,
camera_node->fmt.fmt.pix.field, camera_node->fmt.fmt.pix.bytesperline, camera_node->fmt.fmt.pix.sizeimage,
camera_node->fmt.fmt.pix.colorspace, camera_node->fmt.fmt.pix.priv, camera_node->fmt