上篇是视频解码播放,这篇讲音频解码播放,解码过程同视频解码,不过取的是音频流,android播放用的audiotracker,因为实现简单。
编写Android代码
因为是audiotracker播放,在代理类里除了编写native方法,还要提供给c调用的返回audiotracker的设定采样率和声道的createAudioTrack()。
public class YoungPlayer {
public native void render(String input,Surface surface);
public native void sound(String input,String output);
public native void play(String input,Surface surface);
static{
System.loadLibrary("avutil-54");
System.loadLibrary("swresample-1");
System.loadLibrary("avcodec-56");
System.loadLibrary("avformat-56");
System.loadLibrary("swscale-3");
System.loadLibrary("postproc-53");
System.loadLibrary("avfilter-5");
System.loadLibrary("avdevice-56");
System.loadLibrary("yuv");
System.loadLibrary("myffmpeg");
}
public AudioTrack createAudioTrack(int sampleRateInHz, int nb_channels){
//固定格式的音频码流
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
Log.i("yang", "nb_channels:"+nb_channels);
//声道布局
int channelConfig;
if(nb_channels == 1){
channelConfig = android.media.AudioFormat.CHANNEL_OUT_MONO;
}else if(nb_channels == 2){
channelConfig = android.media.AudioFormat.CHANNEL_OUT_STEREO;
}else{
channelConfig = android.media.AudioFormat.CHANNEL_OUT_STEREO;
}
int bufferSizeInBytes = AudioTrack.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
AudioTrack audioTrack = new AudioTrack(
AudioManager.STREAM_MUSIC,
sampleRateInHz, channelConfig,
audioFormat,
bufferSizeInBytes, AudioTrack.MODE_STREAM);
//播放
//audioTrack.play();
//写入PCM
//audioTrack.write(audioData, offsetInBytes, sizeInBytes);
return audioTrack;
}
}
调用代码:
public void sound(View btn){
String video = sp_video.getSelectedItem().toString();
final String input = new File(Environment.getExternalStorageDirectory(),video).getAbsolutePath();
final String output = new File(Environment.getExternalStorageDirectory(),"Output.pcm").getAbsolutePath();
//Surface传入到Native函数中,用于绘制
new Thread(new Runnable() {
public void run() {
player.sound(input,output);
}
}).start();
}
C/C++代码实现音频文件播放
#include "com_yang_ffmpegDemo_YoungPlayer.h"
#include <stdlib.h>
#include <unistd.h>
#include <android/log.h>
#define LOGI(FORMAT,...) __android_log_print(ANDROID_LOG_INFO,"jason",FORMAT,##__VA_ARGS__);
#define LOGE(FORMAT,...) __android_log_print(ANDROID_LOG_ERROR,"jason",FORMAT,##__VA_ARGS__);
#define MAX_AUDIO_FRME_SIZE 48000 * 4
//封装格式
#include "libavformat/avformat.h"
//解码
#include "libavcodec/avcodec.h"
//缩放
#include "libswscale/swscale.h"
//重采样
#include "libswresample/swresample.h"
JNIEXPORT void JNICALL Java_com_yang_ffmpegDemo_YoungPlayer_sound
(JNIEnv *env, jobject jthiz, jstring input_jstr, jstring output_jstr){
const char* input_cstr = (*env)->GetStringUTFChars(env,input_jstr,NULL);
const char* output_cstr = (*env)->GetStringUTFChars(env,output_jstr,NULL);
LOGI("%s","sound");
//注册组件
av_register_all();
AVFormatContext *pFormatCtx = avformat_alloc_context();
//打开音频文件
if(avformat_open_input(&pFormatCtx,input_cstr,NULL,NULL) != 0){
LOGI("%s","无法打开音频文件");
return;
}
//获取输入文件信息
if(avformat_find_stream_info(pFormatCtx,NULL) < 0){
LOGI("%s","无法获取输入文件信息");
return;
}
//获取音频流索引位置
int i = 0, audio_stream_idx = -1;
for(; i < pFormatCtx->nb_streams;i++){
if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){
audio_stream_idx = i;
break;
}
}
//获取解码器
AVCodecContext *codecCtx = pFormatCtx->streams[audio_stream_idx]->codec;
AVCodec *codec = avcodec_find_decoder(codecCtx->codec_id);
if(codec == NULL){
LOGI("%s","无法获取解码器");
return;
}
//打开解码器
if(avcodec_open2(codecCtx,codec,NULL) < 0){
LOGI("%s","无法打开解码器");
return;
}
//压缩数据
AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));
//解压缩数据
AVFrame *frame = av_frame_alloc();
//frame->16bit 44100 PCM 统一音频采样格式与采样率
SwrContext *swrCtx = swr_alloc();
//重采样设置参数-------------start
//输入的采样格式
enum AVSampleFormat in_sample_fmt = codecCtx->sample_fmt;
//输出采样格式16bit PCM
enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
//输入采样率
int in_sample_rate = codecCtx->sample_rate;
//输出采样率
int out_sample_rate = in_sample_rate;
//获取输入的声道布局
//根据声道个数获取默认的声道布局(2个声道,默认立体声stereo)
//av_get_default_channel_layout(codecCtx->channels);
uint64_t in_ch_layout = codecCtx->channel_layout;
//输出的声道布局(立体声)
uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;
swr_alloc_set_opts(swrCtx,
out_ch_layout,out_sample_fmt,out_sample_rate,
in_ch_layout,in_sample_fmt,in_sample_rate,
0, NULL);
swr_init(swrCtx);
//输出的声道个数
int out_channel_nb = av_get_channel_layout_nb_channels(out_ch_layout);
//重采样设置参数-------------end
//JNI begin------------------
//JasonPlayer
jclass player_class = (*env)->GetObjectClass(env,jthiz);
//AudioTrack对象
jmethodID create_audio_track_mid = (*env)->GetMethodID(env,player_class,"createAudioTrack","(II)Landroid/media/AudioTrack;");
jobject audio_track = (*env)->CallObjectMethod(env,jthiz,create_audio_track_mid,out_sample_rate,out_channel_nb);
//调用AudioTrack.play方法
jclass audio_track_class = (*env)->GetObjectClass(env,audio_track);
jmethodID audio_track_play_mid = (*env)->GetMethodID(env,audio_track_class,"play","()V");
(*env)->CallVoidMethod(env,audio_track,audio_track_play_mid);
//AudioTrack.write
jmethodID audio_track_write_mid = (*env)->GetMethodID(env,audio_track_class,"write","([BII)I");
//JNI end------------------
FILE *fp_pcm = fopen(output_cstr,"wb");
//16bit 44100 PCM 数据
uint8_t *out_buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRME_SIZE);
int got_frame = 0,index = 0, ret;
//不断读取压缩数据
while(av_read_frame(pFormatCtx,packet) >= 0){
//解码音频类型的Packet
if(packet->stream_index == audio_stream_idx){
//解码
ret = avcodec_decode_audio4(codecCtx,frame,&got_frame,packet);
if(ret < 0){
LOGI("%s","解码完成");
}
//解码一帧成功
if(got_frame > 0){
LOGI("解码:%d",index++);
swr_convert(swrCtx, &out_buffer, MAX_AUDIO_FRME_SIZE,(const uint8_t **)frame->data,frame->nb_samples);
//获取sample的size
int out_buffer_size = av_samples_get_buffer_size(NULL, out_channel_nb,
frame->nb_samples, out_sample_fmt, 1);
fwrite(out_buffer,1,out_buffer_size,fp_pcm);
//out_buffer缓冲区数据,转成byte数组
jbyteArray audio_sample_array = (*env)->NewByteArray(env,out_buffer_size);
jbyte* sample_bytep = (*env)->GetByteArrayElements(env,audio_sample_array,NULL);
//out_buffer的数据复制到sampe_bytep
memcpy(sample_bytep,out_buffer,out_buffer_size);
//同步
(*env)->ReleaseByteArrayElements(env,audio_sample_array,sample_bytep,0);
//AudioTrack.write PCM数据
(*env)->CallIntMethod(env,audio_track,audio_track_write_mid,
audio_sample_array,0,out_buffer_size);
//释放局部引用
(*env)->DeleteLocalRef(env,audio_sample_array);
usleep(1000 * 16);
}
}
av_free_packet(packet);
}
av_frame_free(&frame);
av_free(out_buffer);
swr_free(&swrCtx);
avcodec_close(codecCtx);
avformat_close_input(&pFormatCtx);
(*env)->ReleaseStringUTFChars(env,input_jstr,input_cstr);
(*env)->ReleaseStringUTFChars(env,output_jstr,output_cstr);
}
以上通过C调用Java类audiotracker的播放功能实现,仅仅几行代码就实现了。
PCM 数据播放在开发中也经常使用,例如自己编写播放器,解码之后的音频PCM数据,就可以通过OpenSL 播放,比用Java层的AudioTrack更快,延迟更低。如果要达到更好的效果可以参考Android+FFmpeg+OpenSL ES音频解码播放。