ios直播推流每秒能达到30帧,比安卓要强,视频采用软编码的话手机会发烫,得采用码编码,播放视频采用opengl渲染。
ffmpeg初始化代码如下:
复制代码
1 int init_Code(int width, int height, const char *out_path) {
2 av_log_set_callback(custom_log);
3 //avcodec_register_all();
4 av_register_all();
5 avformat_network_init();
6 avformat_alloc_output_context2(&ofmt_ctx, NULL, “flv”, out_path);
7
8 //Open output URL,set before avformat_write_header() for muxing
9 AVOutputFormat * ofmt = ofmt_ctx->oformat;
10 if (!(ofmt->flags & AVFMT_NOFILE)) { //Open output URL
11 if (avio_open(&ofmt_ctx->pb, out_path, AVIO_FLAG_READ_WRITE) < 0) {
12 return -1;
13 }
14 }
15 if (isAudio == 1) {
16 if (init_audio_Code() != 0)//初始化 音频参数
17 return -1;
18 }
19 if (isVideo == 1) {
20 if (init_video_code(width, height) != 0)//初始化 视频参数
21 return -1;
22 }
23 av_dump_format(ofmt_ctx, 0, out_path, 1);
24 if (avformat_write_header(ofmt_ctx, NULL) < 0) { //Write file header
25 //LOGE(“Error occurred when opening output file\n”);
26 return -1;
27 }
28
29 start_thread_encode(); //开始编码
30 return 0;
31 }
复制代码
视频硬编码:
复制代码
1 /* 视频 硬编码**/
2 int encodeVideo_h264(uint8_t* in, int64_t time, int size, int keyframe) {
3 int ret;
4 // 定义AVPacket对象后,请使用av_init_packet进行初始化
5 av_init_packet(&video_pkt);
6 //av_new_packet(&video_pkt,size);
7 video_pkt.stream_index = video_st->index;
8 video_pkt.data = in;
9
10 video_pkt.size = size;
11 video_pkt.pos = -1;
12 ptsPacket(video_st, &video_pkt, time);
13 if (video_pkt.buf != NULL) {
14 video_pkt.buf->data = in;
15 video_pkt.buf->size = size;
16 }
17 video_pkt.flags = keyframe;
18 if (keyframe == 1) {
19 //LOGE(“硬编码-关键帧: %lld”, time);
20 }
21
22 ret = av_interleaved_write_frame(ofmt_ctx, &video_pkt);
23 if (ret != 0) {
24 printf("----encodeVideo–encodeVideo -ret: %d “, ret);
25 //LOGE(”----encodeVideo–encodeVideo -ret: %d ", ret);
26 }
27 av_free_packet(&video_pkt);
28
29 return 0;
30 }
复制代码
音频硬编码:
复制代码
1 /* 音频 硬编码**/
2 int encodeAudio_AAC(uint8_t* in, int64_t time, int size) {
3 if (isAudio == 0)
4 return 0;
5 av_init_packet(&audio_pkt);
6 int got_frame = 0;
7 audio_pkt.stream_index = audio_st->index; //标识该AVPacket所属的视频/音频流。
8 audio_pkt.data = in;
9 audio_pkt.size = size;
10 audio_pkt.pts = time;
11 audio_pkt.dts = time;
12 //audio_pkt.pos = -1;
13 audio_pkt.flags = 1;
14 //audio_pkt.duration = 10;
15 int ret = av_interleaved_write_frame(ofmt_ctx, &audio_pkt);
16 if (ret != 0) {
17 //LOGE("----encodeAudio—ret: %d size:%d ,time:%lld ",
18 // ret, size, time);
19 }
20 return 0;
21 }
复制代码
初始化相机:
复制代码
1 - (void) initCamera:(BOOL)type
2 {
3 NSError deviceError;
4 AVCaptureDeviceInput inputCameraDevice;
5 if (type==false)
6 {
7 inputCameraDevice = [AVCaptureDeviceInput deviceInputWithDevice:cameraDeviceB error:&deviceError];
8 }
9 else
10 {
11 inputCameraDevice = [AVCaptureDeviceInput deviceInputWithDevice:cameraDeviceF error:&deviceError];
12 }
13 AVCaptureVideoDataOutput outputVideoDevice = [[AVCaptureVideoDataOutput alloc] init];
14
15 NSString key = (NSString)kCVPixelBufferPixelFormatTypeKey;
16 NSNumber val = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange];
17 NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:val forKey:key];
18 outputVideoDevice.videoSettings = videoSettings;
19 [outputVideoDevice setSampleBufferDelegate:self queue:dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0)];
20 captureSession = [[AVCaptureSession alloc] init];
21 [captureSession addInput:inputCameraDevice];
22 [captureSession addOutput:outputVideoDevice];
23 [captureSession beginConfiguration];
24
25 [captureSession setSessionPreset:[NSString stringWithString:AVCaptureSessionPreset352x288]];
26 connectionVideo = [outputVideoDevice connectionWithMediaType:AVMediaTypeVideo];
27 #if TARGET_OS_IPHONE
28 [self setRelativeVideoOrientation];
29
30 NSNotificationCenter* notify = [NSNotificationCenter defaultCenter];
31 [notify addObserver:self
32 selector:@selector(statusBarOrientationDidChange:)
33 name:@“StatusBarOrientationDidChange”
34 object:nil];
35 #endif
36
37 [captureSession commitConfiguration];
38 recordLayer = [AVCaptureVideoPreviewLayer layerWithSession:captureSession];
39 [recordLayer setVideoGravity:AVLayerVideoGravityResizeAspect];
40 }
复制代码
设置音频参数
复制代码
1 - (void)setupAudioFormat:(UInt32) inFormatID SampleRate:(int)sampeleRate
2 {
3 //重置下
4 memset(&_recordFormat, 0, sizeof(_recordFormat));
5 //设置采样率,这里先获取系统默认的测试下 //TODO:
6 //采样率的意思是每秒需要采集的帧数
7 _recordFormat.mSampleRate = sampeleRate;//[[AVAudioSession sharedInstance] sampleRate];
8 UInt32 size = sizeof(_recordFormat.mSampleRate);
9 //AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareSampleRate,
10 // &size,
11 // &_recordFormat.mSampleRate);
12 size = sizeof(_recordFormat.mChannelsPerFrame);
13 // AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareInputNumberChannels,
14 // &size,
15 // &_recordFormat.mChannelsPerFrame);
16
17 _recordFormat.mFormatID = inFormatID;
18 if (inFormatID == kAudioFormatLinearPCM){
19 //这个屌属性不知道干啥的。,//要看看是不是这里属性设置问题
20 //结果分析: 8bit为1byte,即为1个通道里1帧需要采集2byte数据,再*通道数,即为所有通道采集的byte数目。
21 //所以这里结果赋值给每帧需要采集的byte数目,然后这里的packet也等于一帧的数据。
22
23 _recordFormat.mFramesPerPacket = 1;
24 _recordFormat.mSampleRate =sampeleRate;// 16000.0;
25 //每个通道里,一帧采集的bit数目 语音每采样点占用位数
26 _recordFormat.mBitsPerChannel = 16;
27 _recordFormat.mChannelsPerFrame = 2;// 1:单声道;2:立体声
28 _recordFormat.mFramesPerPacket = 1;
29 _recordFormat.mBytesPerFrame = (_recordFormat.mBitsPerChannel / 8) * _recordFormat.mChannelsPerFrame;
30 _recordFormat.mBytesPerPacket = _recordFormat.mBytesPerFrame * _recordFormat.mFramesPerPacket;
31 //_recordFormat.mBytesPerPacket = _recordFormat.mBytesPerFrame = (_recordFormat.mBitsPerChannel / 8) * _recordFormat.mChannelsPerFrame;
32 _recordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
33 //_recordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
34 }
35 }
复制代码
开始录音:
复制代码
1 -(void)startRecording
2 {
3 UInt32 size;
4 NSError *error = nil;
5 //设置audio session的category
6 BOOL ret = [[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryRecord error:&error];//注意,这里选的是AVAudioSessionCategoryPlayAndRecord参数,如果只需要录音,就选择Record就可以了,如果需要录音和播放,则选择PlayAndRecord,这个很重要
7 if (!ret) {
8 NSLog(@“设置声音环境失败”);
9 return;
10 }
11 //启用audio session
12 ret = [[AVAudioSession sharedInstance] setActive:YES error:&error];
13 if (!ret)
14 {
15 NSLog(@“启动失败”);
16 return;
17 }
18 //初始化音频输入队列
19 AudioQueueNewInput(&_recordFormat, inputBufferHandler, (__bridge void *)(self), NULL, kCFRunLoopCommonModes, 0, &_audioQueue);//inputBufferHandler这个是回调函数名
20 size = sizeof(_recordFormat);
21 //AudioQueueGetProperty(_audioQueue, kAudioQueueProperty_StreamDescription,
22 // &_recordFormat, &size);
23 //计算估算的缓存区大小
24 //int frames = (int)ceil(kDefaultBufferDurationSeconds * _recordFormat.mSampleRate);//返回大于或者等于指定表达式的最小整数
25 int bufferByteSize =4096;// frames * _recordFormat.mBytesPerFrame;//缓冲区大小在这里设置,这个很重要,在这里设置的缓冲区有多大,那么在回调函数的时候得到的inbuffer的大小就是多大。
26 //bufferByteSize=[self ComputeRecordBufferSize:&_recordFormat sss:kDefaultBufferDurationSeconds];
27 NSLog(@“缓冲区大小:%d”,bufferByteSize);
28 AudioQueueBufferRef _audioBuffers[3];
29 //创建缓冲器
30 for (int i = 0; i < kNumberAudioQueueBuffers; i++){
31 AudioQueueAllocateBuffer(_audioQueue, bufferByteSize, &_audioBuffers[i]);
32 AudioQueueEnqueueBuffer(_audioQueue, _audioBuffers[i], 0, NULL);//将 _audioBuffers[i]添加到队列中
33 }
34 // 开始录音
35 AudioQueueStart(_audioQueue, NULL);
36
37 }