1、ffmpeg解码音频流并且保存成wav文件。
这一步比较简单,只要熟悉ffmpeg解码音频的流程,将解码出的pcm码,保存到本地文件中,并实时统计解码的pcm的字节长度,最后解码完成之后再添加44字节的wav文件头。
save_audio.c
Code example
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
View Code #include <stdio.h> #include "libavformat/avformat.h" #include "libavcodec/avcodec.h" #include "libavutil/avutil.h" static void writeWavHeader(AVCodecContext *pCodecCtx,AVFormatContext *pFormatCtx,FILE *audioFile) { int8_t *data; int32_t long_temp; int16_t short_temp; int16_t BlockAlign; int bits=16; int32_t fileSize; int32_t audioDataSize;
switch(pCodecCtx->sample_fmt) { case AV_SAMPLE_FMT_S16: bits=16; break; case AV_SAMPLE_FMT_S32: bits=32; break; case AV_SAMPLE_FMT_U8: bits=8; break; default: bits=16; break; } audioDataSize=(pFormatCtx->duration)*(bits/8)*(pCodecCtx->sample_rate)*(pCodecCtx->channels); fileSize=audioDataSize+36; data="RIFF"; fwrite(data,sizeof(char),4,audioFile); fwrite(&fileSize,sizeof(int32_t),1,audioFile);
//"WAVE" data="WAVE"; fwrite(data,sizeof(char),4,audioFile); data="fmt "; fwrite(data,sizeof(char),4,audioFile); long_temp=16; fwrite(&long_temp,sizeof(int32_t),1,audioFile); short_temp=0x01; fwrite(&short_temp,sizeof(int16_t),1,audioFile); short_temp=(pCodecCtx->channels); fwrite(&short_temp,sizeof(int16_t),1,audioFile); long_temp=(pCodecCtx->sample_rate); fwrite(&long_temp,sizeof(int32_t),1,audioFile); long_temp=(bits/8)*(pCodecCtx->channels)*(pCodecCtx->sample_rate); fwrite(&long_temp,sizeof(int32_t),1,audioFile); BlockAlign=(bits/8)*(pCodecCtx->channels); fwrite(&BlockAlign,sizeof(int16_t),1,audioFile); short_temp=(bits); fwrite(&short_temp,sizeof(int16_t),1,audioFile); data="data"; fwrite(data,sizeof(char),4,audioFile); fwrite(&audioDataSize,sizeof(int32_t),1,audioFile);
fseek(audioFile,44,SEEK_SET); }
int main(){ char *filename="E:\flv\Love_You.mp4"; AVFormatContext *pFormatCtx; int audioStream=-1; int i; int iFrame=0; AVCodecContext *pCodecCtx; AVCodec *pCodec=NULL; static AVPacket packet; uint8_t *pktData=NULL; int pktSize; int outSize=AVCODEC_MAX_AUDIO_FRAME_SIZE; // FILE *wavfile=NULL; uint8_t *inbuf=(uint8_t *)av_malloc(outSize);
FILE *wavFile=NULL; int32_t audioFileSize=0; av_register_all(); if(av_open_input_file(&pFormatCtx,filename,NULL,0,NULL)!=0) { printf("Could not open input file %sn",filename); return 0; } if(av_find_stream_info(pFormatCtx)<0) { printf("Could not find stream informationn"); } av_dump_format(pFormatCtx,0,filename,0); for(i=0;i<pFormatCtx->nb_streams;i++) { if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO) { audioStream=i; break; } }
pCodecCtx=pFormatCtx->streams[audioStream]->codec; pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(avcodec_open(pCodecCtx,pCodec)<0) { printf("Error avcodec_open failed.n"); return 1; }
printf("tbit_rate=%dn bytes_per_secondes=%dn sample_rate=%dn channels=%dn codec_name=%sn",pCodecCtx->bit_rate,(pCodecCtx->codec_id==CODEC_ID_PCM_U8)?8:16, pCodecCtx->sample_rate,pCodecCtx->channels,pCodecCtx->codec->name);
wavFile=fopen("E:\flv\myPlayerWav.wav","wb"); if (wavFile==NULL) { printf("open errorn"); return 1; }
writeWavHeader(pCodecCtx,pFormatCtx,wavFile);
av_free_packet(&packet); while(av_read_frame(pFormatCtx,&packet)>=0) { if(packet.stream_index==audioStream) { int len=0; if((iFrame++)>=4000) break; pktData=packet.data; pktSize=packet.size; while(pktSize>0) { outSize=AVCODEC_MAX_AUDIO_FRAME_SIZE; len=avcodec_decode_audio3(pCodecCtx,(short *)inbuf,&outSize,&packet); if(len<0){ printf("Error while decodingn"); break; } if(outSize>0) { audioFileSize+=outSize; fwrite(inbuf,1,outSize,wavFile); fflush(wavFile); } pktSize-=len; pktData+=len; } } av_free_packet(&packet); }
fseek(wavFile,40,SEEK_SET); fwrite(&audioFileSize,1,sizeof(int32_t),wavFile); audioFileSize+=36; fseek(wavFile,4,SEEK_SET); fwrite(&audioFileSize,1,sizeof(int32_t),wavFile); fclose(wavFile); av_free(inbuf); if(pCodecCtx!=NULL){ avcodec_close(pCodecCtx); } av_close_input_file(pFormatCtx); return 0; } |
注意:我用的ffmpeg的版本是ffmpeg-0.8.6。我已经成功地将ffmpeg在Windows、Linux(Ubuntu)、Ios平台上编译通过,这段代码没有什么平台依赖性,都是用的标准C的代码。
2、iphone读PCM码,并且送AudioQueue播放
playAudio.h
Code example
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
View Code #import <Foundation/Foundation.h> #import <AudioToolbox/AudioToolbox.h> #import <AudioToolbox/AudioFile.h> #define NUM_BUFFERS 3
@interface playAudio : NSObject{ //播放音频文件ID AudioFileID audioFile; //音频流描述对象 AudioStreamBasicDescription dataFormat; //音频队列 AudioQueueRef queue; SInt64 packetIndex; UInt32 numPacketsToRead; UInt32 bufferByteSize; uint8_t *inbuf; AudioStreamPacketDescription *packetDescs; AudioQueueBufferRef buffers[NUM_BUFFERS]; FILE *wavFile; }
//定义队列为实例属性 @property AudioQueueRef queue; //播放方法定义 -(id)initWithAudio:(NSString *) path; //定义缓存数据读取方法 -(void) audioQueueOutputWithQueue:(AudioQueueRef)audioQueue queueBuffer:(AudioQueueBufferRef)audioQueueBuffer; -(UInt32)readPacketsIntoBuffer:(AudioQueueBufferRef)buffer; //定义回调(Callback)函数 static void BufferCallack(void *inUserData,AudioQueueRef inAQ, AudioQueueBufferRef buffer);
@end |
Code example
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
View Code #import "playAudio.h"
#define AVCODEC_MAX_AUDIO_FRAME_SIZE 4096*2// (0x10000)/4 //static UInt32 gBufferSizeBytes=0x10000;//65536 static UInt32 gBufferSizeBytes=0x10000;//It must be pow(2,x)
@implementation playAudio
@synthesize queue;
//回调函数(Callback)的实现 static void BufferCallback(void *inUserData,AudioQueueRef inAQ, AudioQueueBufferRef buffer){ playAudio* player=(__bridge playAudio*)inUserData; [player audioQueueOutputWithQueue:inAQ queueBuffer:buffer]; }
//缓存数据读取方法的实现 -(void) audioQueueOutputWithQueue:(AudioQueueRef)audioQueue queueBuffer:(AudioQueueBufferRef)audioQueueBuffer{ //读取包数据 UInt32 numBytes; // UInt32 numPackets=numPacketsToRead; UInt32 numPackets=numPacketsToRead;
//成功读取时 numBytes=fread(inbuf, 1, numPackets*4,wavFile); AudioQueueBufferRef outBufferRef=audioQueueBuffer; NSData *aData=[[NSData alloc]initWithBytes:inbuf length:numBytes];
if(numBytes>0){ memcpy(outBufferRef->mAudioData, aData.bytes, aData.length);
outBufferRef->mAudioDataByteSize=numBytes; AudioQueueEnqueueBuffer(audioQueue, outBufferRef, 0, nil); packetIndex += numPackets; } else{ } }
//音频播放方法的实现 -(id) initWithAudio:(NSString *)path{ if (!(self=[super init])) return nil; int i;
wavFile=fopen([path cStringUsingEncoding:NSASCIIStringEncoding], "rb"); if (wavFile==NULL) { printf("open wavFile error in current file %s,in line %d",__FILE__,__LINE__); return nil; } //跳过wav文件的44字节的文件头 fseek(wavFile, 44, SEEK_SET);
for (int i=0; i<NUM_BUFFERS; i++) { AudioQueueEnqueueBuffer(queue, buffers[i], 0, nil); }
//取得音频数据格式 { dataFormat.mSampleRate=44100;//采样频率 dataFormat.mFormatID=kAudioFormatLinearPCM; dataFormat.mFormatFlags=kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; dataFormat.mBytesPerFrame=4; dataFormat.mBytesPerPacket=4; dataFormat.mFramesPerPacket=1;//wav 通常为1 dataFormat.mChannelsPerFrame=2;//通道数 dataFormat.mBitsPerChannel=16;//采样的位数 dataFormat.mReserved=0; }
//创建播放用的音频队列 AudioQueueNewOutput(&dataFormat, BufferCallback, self, nil, nil, 0, &queue); //计算单位时间包含的包数
// numPacketsToRead= gBufferSizeBytes/dataFormat.mBytesPerPacket; // numPacketsToRead=AVCODEC_MAX_AUDIO_FRAME_SIZE numPacketsToRead=AVCODEC_MAX_AUDIO_FRAME_SIZE; packetDescs=nil;
//设置Magic Cookie,参见第二十七章的相关介绍
//创建并分配缓冲控件 packetIndex=0; for (i=0; i<NUM_BUFFERS; i++) { AudioQueueAllocateBuffer(queue, gBufferSizeBytes, &buffers[i]); //读取包数据 if ([self readPacketsIntoBuffer:buffers[i]]==1) { break; } }
Float32 gain=1.0; //设置音量 AudioQueueSetParameter(queue, kAudioQueueParam_Volume, gain); //队列处理开始,此后系统开始自动调用回调(Callback)函数 AudioQueueStart(queue, nil); return self; }
-(UInt32)readPacketsIntoBuffer:(AudioQueueBufferRef)buffer { UInt32 numBytes,numPackets; //从文件中接受数据并保存到缓存(buffer)中 //AVCODEC_MAX_AUDIO_FRAME_SIZE*100 numPackets = numPacketsToRead; inbuf=(uint8_t *)malloc(numPackets); AudioQueueBufferRef outBufferRef=buffer; numBytes=fread(inbuf, 1, numPackets*4,wavFile); NSData *aData=[[NSData alloc]initWithBytes:inbuf length:numBytes];
if(numBytes>0){ memcpy(outBufferRef->mAudioData, aData.bytes, aData.length); outBufferRef->mAudioDataByteSize=numBytes; AudioQueueEnqueueBuffer(queue, outBufferRef, 0, nil); packetIndex += numPackets; } else{ return 1;//意味着我们没有读到任何的包 } return 0;//0代表正常的退出 } @end |