本文基于以下文章进行的开发:
三、FFMPEG视频解码及播放开发出色的音频应用
OpenSL ES API 可以帮助您制定和提升应用的音频性能。 某些典型用例包括以下组成部分:
- 数字音频工作站 (DAW)。
- 合成器。
- 电子鼓。
- 音乐学习应用。
- 卡拉 OK 应用。
- DJ 混合。
- 音频效果。
- 视频/音频会议。
1.cpp目录下新建FFmpegMusic.h和FFmpegMusic.cpp
FFmpegMusic.h
//
// Created by ygdx_lk on 17/11/3.
//
#ifndef FFMPEG_FFMPEGMUSIC_H
#define FFMPEG_FFMPEGMUSIC_H
#include <jni.h>
#include <string>
#include <android/log.h>
extern "C" {
//编码
#include "libavcodec/avcodec.h"
//封装格式处理
#include "libavformat/avformat.h"
#include "libswresample/swresample.h"
//像素处理
#include "libswscale/swscale.h"
#include <android/native_window_jni.h>
#include <unistd.h>
}
#define LOGI(FORMAT,...) __android_log_print(ANDROID_LOG_INFO,"jason",FORMAT,##__VA_ARGS__);
#define LOGE(FORMAT,...) __android_log_print(ANDROID_LOG_ERROR,"jason",FORMAT,##__VA_ARGS__);
int createFFmpeg(int *rate,int *channel);
int getPcm(void **pcm,size_t *pcm_size);
void realseFFmpeg();
#endif //FFMPEG_FFMPEGMUSIC_H
FFmpegMusic.h
//
// Created by ygdx_lk on 17/11/3.
//
#include "FFmpegMusic.h"
AVFormatContext *avFormatContext;
AVCodecContext *avCodecContext;
AVCodec *aVCodec;
AVPacket *packet;
AVFrame *frame;
SwrContext *swrContext;
uint8_t *out_buffer;
int out_channer_nb;
int audio_stream_idx = -1;
int createFFmpeg(int *rate,int *channel){
av_register_all();
char *input = "/sdcard/input.mp3";
avFormatContext = avformat_alloc_context();
//0 on success, a negative AVERROR on failure
if(avformat_open_input(&avFormatContext, input, NULL, NULL) < 0){
LOGE("%s", "打开输入音频失败");
return -1;
}
//>=0 if OK
if(avformat_find_stream_info(avFormatContext, NULL) < 0){
LOGE("%s", "获取资源信息失败");
return -1;
}
for (int i = 0; i < avFormatContext->nb_streams; ++i) {
if(avFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){
audio_stream_idx = i;
break;
}
}
avCodecContext = avFormatContext->streams[audio_stream_idx]->codec;
aVCodec = avcodec_find_decoder(avCodecContext->codec_id);
//zero on success, a negative value on error
if(avcodec_open2(avCodecContext, aVCodec, NULL) < 0){
LOGE("%s", "打开解码器失败");
return -1;
}
//设置输入输出转换信息
// struct SwrContext *swr_alloc_set_opts(struct SwrContext *s,
// int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,
// int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate,
// int log_offset, void *log_ctx);
swrContext = swr_alloc();
int64_t out_ch_layout = AV_CH_LAYOUT_STEREO;//立体声
enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;//输出采样率16位
int out_sample_rate = avCodecContext->sample_rate;
swr_alloc_set_opts(swrContext, out_ch_layout, out_sample_fmt, out_sample_rate,
avCodecContext->channel_layout, avCodecContext->sample_fmt, avCodecContext->sample_rate,
0, NULL);
swr_init(swrContext);
//获取声道数
out_channer_nb = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);
*rate = avCodecContext->sample_rate;
*channel = avCodecContext->channels;
packet = (AVPacket *) av_malloc(sizeof(AVPacket));
frame = av_frame_alloc();
out_buffer = (uint8_t *) av_malloc(44100 * 2);
return 0;
}
int getPcm(void **pcm,size_t *pcm_size){
LOGI("%s", "getPcm--");
int got_frame;
//0 if OK, < 0 on error or end of file
while (av_read_frame(avFormatContext, packet) == 0){
LOGI("%s", "getPcm——read_frame")
if(packet->stream_index == audio_stream_idx){
// int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
// int *got_frame_ptr, const AVPacket *avpkt);
avcodec_decode_audio4(avCodecContext, frame, &got_frame, packet);
if(got_frame){
// int swr_convert(struct SwrContext *s, uint8_t **out, int out_count,
// const uint8_t **in , int in_count);
swr_convert(swrContext, &out_buffer, 44100 * 2,
(const uint8_t **) frame->data, frame->nb_samples);
// int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
// enum AVSampleFormat sample_fmt, int align);
int size = av_samples_get_buffer_size(NULL, out_channer_nb, frame->nb_samples, AV_SAMPLE_FMT_S16, 1);
*pcm = out_buffer;
*pcm_size = size;
break;
}
}
}
return 0;
}
void realseFFmpeg(){
av_free_packet(packet);
av_free(out_buffer);
av_frame_free(&frame);
swr_free(&swrContext);
avcodec_close(avCodecContext);
avformat_close_input(&avFormatContext);
}
2.native-lib.h添加如下代码
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
3.修改CMakeLists.txt
add_library添加:src /main/cpp/FFmpegMusic.cpp
修改前:
add_library( native-lib SHARED src/main/cpp/native-lib.cpp )
修改后:
add_library( native-lib SHARED src/main/cpp/native-lib.cpp src/main/cpp/FFmpegMusic.cpp )
target_link_libraries添加OpenSLES:
修改前:
target_link_libraries( native-lib avcodec-56 avdevice-56 avformat-56 avutil-54 swresample-1 swscale-3 -landroid ${log-lib} )修改后:
target_link_libraries( native-lib avcodec-56 avdevice-56 avformat-56 avutil-54 swresample-1 swscale-3 OpenSLES -landroid ${log-lib} )
4.native-lib.h中添加方法:
JNIEXPORT void JNICALL Java_com_test_ffmpeg_AudioPlayer_openSlesPlay(JNIEnv *env, jobject instance);
5.native-lib.cpp实现
SLObjectItf slObjectItf = NULL;
SLEngineItf slEngineItf = NULL;
//混音器
SLObjectItf pMix = NULL;
SLEnvironmentalReverbItf slEnvironmentalReverbItf = NULL;
SLEnvironmentalReverbSettings settings = SL_I3DL2_ENVIRONMENT_PRESET_DEFAULT;
//播放器
SLObjectItf bgPlayerObject;
SLPlayItf bqPlayerPlay;
//队列缓冲区
SLAndroidSimpleBufferQueueItf bqPlayerQueue;
//音量对象
SLVolumeItf bqPalyerVolume;
//只要喇叭一读完 就会回调此函数,添加pcm数据到缓冲区
void *buffer;
size_t bufferSize = 0;
void bqPlayerCallBack(SLAndroidSimpleBufferQueueItf bq, void *context){
bufferSize = 0;
getPcm(&buffer, &bufferSize);
if(buffer != NULL && 0 != bufferSize){
//播放的关键地方
SLresult sLresult = (*bqPlayerQueue)->Enqueue(bqPlayerQueue, buffer, bufferSize);
LOGI("正在播放 %d", bufferSize);
} else{
LOGI("播放结束 %d", bufferSize)
realseFFmpeg();
}
};
void JNICALL Java_com_test_ffmpeg_AudioPlayer_openSlesPlay(JNIEnv *env, jobject instance) {
//初始化一个引擎
SLresult sLresult;
// SL_API SLresult SLAPIENTRY slCreateEngine(
// SLObjectItf *pEngine,
// SLuint32 numOptions,
// const SLEngineOption *pEngineOptions,
// SLuint32 numInterfaces,
// const SLInterfaceID *pInterfaceIds,
// const SLboolean * pInterfaceRequired
// );
slCreateEngine(&slObjectItf, 0, NULL, 0, NULL, NULL);
(*slObjectItf)->Realize(slObjectItf, SL_BOOLEAN_FALSE);
// SLresult (*GetInterface) (
// SLObjectItf self,
// const SLInterfaceID iid,
// void * pInterface
// );
//获取引擎接口
sLresult = (*slObjectItf)->GetInterface(slObjectItf, SL_IID_ENGINE, &slEngineItf);
LOGI("引擎地址%p sLresult %d", slEngineItf, sLresult);
//创建混音器
// SLresult (*CreateOutputMix) (
// SLEngineItf self,
// SLObjectItf * pMix,
// SLuint32 numInterfaces,
// const SLInterfaceID * pInterfaceIds,
// const SLboolean * pInterfaceRequired
// );
(*slEngineItf)->CreateOutputMix(slEngineItf, &pMix, 0, 0, 0);
(*pMix)->Realize(pMix, SL_BOOLEAN_FALSE);
//设置环境混响
sLresult = (*pMix)->GetInterface(pMix, SL_IID_ENVIRONMENTALREVERB, &slEnvironmentalReverbItf);
LOGI("sLresult 1 %d", sLresult);
if(SL_RESULT_SUCCESS == sLresult){
// SLresult (*SetEnvironmentalReverbProperties) (
// SLEnvironmentalReverbItf self,
// const SLEnvironmentalReverbSettings *pProperties
// );
(*slEnvironmentalReverbItf)->SetEnvironmentalReverbProperties(slEnvironmentalReverbItf, &settings);
}
//初始化ffmpeg
int rate;
int channels;
createFFmpeg(&rate, &channels);
LOGI("%s", "初始化FFMPEG完毕");
SLDataLocator_AndroidBufferQueue androidBufferQueue = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
// typedef struct SLDataFormat_PCM_ {
// SLuint32 formatType;
// SLuint32 numChannels;
// SLuint32 samplesPerSec;
// SLuint32 bitsPerSample;
// SLuint32 containerSize;
// SLuint32 channelMask;
// SLuint32 endianness;
// } SLDataFormat_PCM;
SLDataFormat_PCM pcm = {
SL_DATAFORMAT_PCM,
channels,
SL_SAMPLINGRATE_44_1,
SL_PCMSAMPLEFORMAT_FIXED_16,
SL_PCMSAMPLEFORMAT_FIXED_16,
SL_SPEAKER_FRONT_LEFT|SL_SPEAKER_FRONT_RIGHT,
SL_BYTEORDER_LITTLEENDIAN
};
// typedef struct SLDataSource_ {
// void *pLocator;
// void *pFormat;
// } SLDataSource;
SLDataSource slDataSource = {&androidBufferQueue, &pcm};
//设置混音器
// typedef struct SLDataLocator_OutputMix {
// SLuint32 locatorType;
// SLObjectItf outputMix;
// } SLDataLocator_OutputMix;
SLDataLocator_OutputMix outputMix = {SL_DATALOCATOR_OUTPUTMIX, pMix};
// typedef struct SLDataSink_ {
// void *pLocator;
// void *pFormat;
// } SLDataSink;
SLDataSink audioSnk = {&outputMix, NULL};
const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_EFFECTSEND, SL_IID_VOLUME};
const SLboolean req[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
//混音器关联
//创建一个播放器
// SLresult (*CreateAudioPlayer) (
// SLEngineItf self,
// SLObjectItf * pPlayer,
// SLDataSource *pAudioSrc,
// SLDataSink *pAudioSnk,
// SLuint32 numInterfaces,
// const SLInterfaceID * pInterfaceIds,
// const SLboolean * pInterfaceRequired
// );
sLresult = (*slEngineItf)->CreateAudioPlayer(slEngineItf, &bgPlayerObject, &slDataSource, &audioSnk, 3, ids, req);
sLresult = (*bgPlayerObject)->Realize(bgPlayerObject, SL_BOOLEAN_FALSE);
//创建一个播放器接口
sLresult = (*bgPlayerObject)->GetInterface(bgPlayerObject, SL_IID_PLAY, &bqPlayerPlay);
//注册缓冲区
sLresult = (*bgPlayerObject)->GetInterface(bgPlayerObject, SL_IID_BUFFERQUEUE, &bqPlayerQueue);
//设置接口回调
sLresult = (*bqPlayerQueue)->RegisterCallback(bqPlayerQueue, bqPlayerCallBack, NULL);
(*bgPlayerObject)->GetInterface(bgPlayerObject, SL_IID_VOLUME, &bqPalyerVolume);
(*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_PLAYING);
//播放第一帧
bqPlayerCallBack(bqPlayerQueue, NULL);
}
6.AudioPlayer添加native方法
public native void openSlesPlay();
7.MainActivity调用:
new Thread(new Runnable() { @Override public void run() { new AudioPlayer().openSlesPlay(); } }).start();