AudioRecord.java
static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
int channelCount = 0;
switch (channelConfig) {
case AudioFormat.CHANNEL_IN_DEFAULT: // AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
case AudioFormat.CHANNEL_IN_MONO:
case AudioFormat.CHANNEL_CONFIGURATION_MONO:
channelCount = 1;
break;
case AudioFormat.CHANNEL_IN_STEREO:
case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
case (AudioFormat.CHANNEL_IN_FRONT | AudioFormat.CHANNEL_IN_BACK):
channelCount = 2;
break;
case AudioFormat.CHANNEL_INVALID:
default:
loge("getMinBufferSize(): Invalid channel configuration.");
return ERROR_BAD_VALUE;
}
int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
if (size == 0) {
return ERROR_BAD_VALUE;
}
else if (size == -1) {
return ERROR;
}
else {
return size;
}
}
我们可以看到计算调用到了native_get_min_buff_size
对应了/frameworks/base/core/jni/android_media_AudioRecord.cpp中的
{"native_get_min_buff_size",
"(III)I", (void *)android_media_AudioRecord_get_min_buff_size},
可以知道对应
static jint android_media_AudioRecord_get_min_buff_size(JNIEnv *env, jobject thiz,
662 jint sampleRateInHertz, jint channelCount, jint audioFormat) {
663
664 ALOGV(">> android_media_AudioRecord_get_min_buff_size(%d, %d, %d)",
665 sampleRateInHertz, channelCount, audioFormat);
666
667 size_t frameCount = 0;
668 audio_format_t format = audioFormatToNative(audioFormat);
669 status_t result = AudioRecord::getMinFrameCount(&frameCount,
670 sampleRateInHertz,
671 format,
672 audio_channel_in_mask_from_count(channelCount));
673
674 if (result == BAD_VALUE) {
675 return 0;
676 }
677 if (result != NO_ERROR) {
678 return -1;
679 }
680 return frameCount * channelCount * audio_bytes_per_sample(format);
681}
我们可以知道重点是求669
status_t result = AudioRecord::getMinFrameCount(&frameCount,
670 sampleRateInHertz,
671 format,
672 audio_channel_in_mask_from_count(channelCount));
函数当中对frameCount指针进行了复制
我们分析下getMinFrameCount
/ static
38status_t AudioRecord::getMinFrameCount(
39 size_t* frameCount,
40 uint32_t sampleRate,
41 audio_format_t format,
42 audio_channel_mask_t channelMask)
43{
44 if (frameCount == NULL) {
45 return BAD_VALUE;
46 }
47
48 size_t size;
49 status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size);
50 if (status != NO_ERROR) {
51 ALOGE("AudioSystem could not query the input buffer size for sampleRate %u, format %#x, "
52 "channelMask %#x; status %d", sampleRate, format, channelMask, status);
53 return status;
54 }
55
56 // We double the size of input buffer for ping pong use of record buffer.
57 // Assumes audio_is_linear_pcm(format)
58 if ((*frameCount = (size * 2) / (audio_channel_count_from_in_mask(channelMask) *
59 audio_bytes_per_sample(format))) == 0) {
60 ALOGE("Unsupported configuration: sampleRate %u, format %#x, channelMask %#x",
61 sampleRate, format, channelMask);
62 return BAD_VALUE;
63 }
64
65 return NO_ERROR;
66}
我们通过代码
5
56 // We double the size of input buffer for ping pong use of record buffer.
57 // Assumes audio_is_linear_pcm(format)
58 if ((*frameCount = (size * 2) / (audio_channel_count_from_in_mask(channelMask) *
59 audio_bytes_per_sample(format))) == 0) {
60 ALOGE("Unsupported configuration: sampleRate %u, format %#x, channelMask %#x",
61 sampleRate, format, channelMask);
62 return BAD_VALUE;
63 }
可以知道frameCount = (size * 2)/( 声道数 * 采样大小占的字节数);
size在
AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size);中赋值
status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
392 audio_channel_mask_t channelMask, size_t* buffSize)
393{
394 const sp<AudioFlingerClient> afc = getAudioFlingerClient();
395 if (afc == 0) {
396 return NO_INIT;
397 }
398 return afc->getInputBufferSize(sampleRate, format, channelMask, buffSize);
399}
size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
audio_channel_mask_t channelMask) const
{
status_t ret = initCheck();
if (ret != NO_ERROR) {
return 0;
}
if ((sampleRate == 0) ||
!audio_is_valid_format(format) || !audio_has_proportional_frames(format) ||
!audio_is_input_channel(channelMask)) {
return 0;
}
AutoMutex lock(mHardwareLock);
if (mPrimaryHardwareDev == nullptr) {
return 0;
}
mHardwareStatus = AUDIO_HW_GET_INPUT_BUFFER_SIZE;
sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
std::vector<audio_channel_mask_t> channelMasks = {channelMask};
if (channelMask != AUDIO_CHANNEL_IN_MONO)
channelMasks.push_back(AUDIO_CHANNEL_IN_MONO);
if (channelMask != AUDIO_CHANNEL_IN_STEREO)
channelMasks.push_back(AUDIO_CHANNEL_IN_STEREO);
std::vector<audio_format_t> formats = {format};
if (format != AUDIO_FORMAT_PCM_16_BIT)
formats.push_back(AUDIO_FORMAT_PCM_16_BIT);
std::vector<uint32_t> sampleRates = {sampleRate};
static const uint32_t SR_44100 = 44100;
static const uint32_t SR_48000 = 48000;
if (sampleRate != SR_48000)
sampleRates.push_back(SR_48000);
if (sampleRate != SR_44100)
sampleRates.push_back(SR_44100);
mHardwareStatus = AUDIO_HW_IDLE;
// Change parameters of the configuration each iteration until we find a
// configuration that the device will support.
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
for (auto testChannelMask : channelMasks) {
config.channel_mask = testChannelMask;
for (auto testFormat : formats) {
config.format = testFormat;
for (auto testSampleRate : sampleRates) {
config.sample_rate = testSampleRate;
size_t bytes = 0;
status_t result = dev->getInputBufferSize(&config, &bytes);
if (result != OK || bytes == 0) {
continue;
}
if (config.sample_rate != sampleRate || config.channel_mask != channelMask ||
config.format != format) {
uint32_t dstChannelCount = audio_channel_count_from_in_mask(channelMask);
uint32_t srcChannelCount =
audio_channel_count_from_in_mask(config.channel_mask);
size_t srcFrames =
bytes / audio_bytes_per_frame(srcChannelCount, config.format);
size_t dstFrames = destinationFramesPossible(
srcFrames, config.sample_rate, sampleRate);
bytes = dstFrames * audio_bytes_per_frame(dstChannelCount, format);
}
return bytes;
}
}
}
ALOGW("getInputBufferSize failed with minimum buffer size sampleRate %u, "
"format %#x, channelMask %#x",sampleRate, format, channelMask);
return 0;
}
for (auto testChannelMask : channelMasks) {
config.channel_mask = testChannelMask;
for (auto testFormat : formats) {
config.format = testFormat;
for (auto testSampleRate : sampleRates) {
config.sample_rate = testSampleRate;
size_t bytes = 0;
status_t result = dev->getInputBufferSize(&config, &bytes);
if (result != OK || bytes == 0) {
continue;
}
if (config.sample_rate != sampleRate || config.channel_mask != channelMask ||
config.format != format) {
uint32_t dstChannelCount = audio_channel_count_from_in_mask(channelMask);
uint32_t srcChannelCount =
audio_channel_count_from_in_mask(config.channel_mask);
size_t srcFrames =
bytes / audio_bytes_per_frame(srcChannelCount, config.format);
size_t dstFrames = destinationFramesPossible(
srcFrames, config.sample_rate, sampleRate);
bytes = dstFrames * audio_bytes_per_frame(dstChannelCount, format);
}
return bytes;
}
如果硬件层支持我们设置的采样率,采样大小,声道,那么就从
status_t result = dev->getInputBufferSize(&config, &bytes);
直接返回bytes。
如果不是我们设置的我们就将我们能获取到进行转化
我们通过audio_channel_count_from_in_mask获取到目前的声道数srcChannelCount和我们需要的声道数dstChannelCount
我们计算当前的buffersize可以有多少个采样点
size_t srcFrames =
bytes / audio_bytes_per_frame(srcChannelCount, config.format);
目的采样点数的换算公式是
srcFrames/dstFrames = src采样率/dst采样率
我们计算出目的采样点数
所以最后的计算出buffersize=采样点数 * 通道数 * 采样大小(多少字节)。
我们最后总结一下
AudioFlinger::getInputBufferSize 值是采样点数 * 通道数 * 采样大小
返回到AudioRecord::getMinFrameCount 由于要ping pong use。上一步算出的buffersize * 2
导致最小采样点数扩大了2倍,相对于getInputBufferSize中的采样点数。
一直回到最上层 native_get_min_buff_size = 最终计算的采样点数 * 声道数 * 采样大小我们计算出我们每个缓冲区的大小。这个缓冲区大小是底层每帧音频数据采样点数的2倍。