Android Audio System 之三: AudioPolicyService 和 AudioPolicyManager(转)
http://www.cnblogs.com/yangjies145/p/6436209.html
Android AudioPolicyService服务启动过程
http://www.thinksaas.cn/topics/0/750/750089.html
Android 7.0 Audio: AudioFlinger介绍和初始
http://blog.youkuaiyun.com/xiashaohua/article/details/53818568
audioflinger : AudioHwDevice(HAL层对象)
OnFirstRef{
// start tone playback thread //用于tone音播放
mTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this);
// start audio commands thread //路由切换、音量调节
mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
// start output activity command thread
mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
mAudioPolicyClient = new AudioPolicyClient(this);//AudioPolicyClientImpl
mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient);
}
frameworks/av/services/audiopolicy/manager/AudioPolicyFactory.cpp:21:extern "C" AudioPolicyInterface* createAudioPolicyManager(
hardware/xxx/audio/policy_hal/AudioPolicyManager.cpp:138:extern "C" AudioPolicyInterface* createAudioPolicyManager(
AudioPolicyFactory.cpp
extern "C" AudioPolicyInterface* createAudioPolicyManager(
AudioPolicyClientInterface *clientInterface)
{
return new AudioPolicyManager(clientInterface);//原生的HAL对象
}
hardware/xxx/audio/policy_hal/ AudioPolicyManager.cpp
// ----------------------------------------------------------------------------
// AudioPolicyInterface implementation
// ----------------------------------------------------------------------------
extern "C" AudioPolicyInterface* createAudioPolicyManager(
AudioPolicyClientInterface *clientInterface)
{
return new AudioPolicyManagerCustom(clientInterface);//硬件xxx的HAL的对象
}
#include <audiopolicy/managerdefault/AudioPolicyManager.h>
class AudioPolicyManagerCustom: public AudioPolicyManager
class AudioPolicyManager : public AudioPolicyInterface, public AudioPolicyManagerObserver
AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
AudioPolicyClientImpl.cpp
/* implementation of the client interface from the policy manager */
audio_module_handle_t AudioPolicyService::AudioPolicyClient::loadHwModule(const char *name)
{
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af == 0) {
ALOGW("%s: could not get AudioFlinger", __func__);
return AUDIO_MODULE_HANDLE_NONE;
}
return af->loadHwModule(name);//加载HAL层设备模块
}
为什么Audio需要特别强调Phone的状态呢?这必须和智能手机的硬件架构联系上。先看智能手机的硬件架构,如图7-13所示:
图7-13 智能手机的硬件架构图
SwAudioOutputDescriptor : DSP设备
frameworks/av/services/audiopolicy/enginedefault/src/Engine.cpp
routing_strategy Engine::getStrategyForStream(audio_stream_type_t stream)
{
// stream to strategy mapping
switch (stream) {
case AUDIO_STREAM_VOICE_CALL:
case AUDIO_STREAM_BLUETOOTH_SCO:
return STRATEGY_PHONE;
case AUDIO_STREAM_RING:
case AUDIO_STREAM_ALARM:
return STRATEGY_SONIFICATION;
case AUDIO_STREAM_NOTIFICATION:
return STRATEGY_SONIFICATION_RESPECTFUL;
case AUDIO_STREAM_DTMF:
return STRATEGY_DTMF;
default:
ALOGE("unknown stream type %d", stream);
case AUDIO_STREAM_SYSTEM:
// NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
// while key clicks are played produces a poor result
case AUDIO_STREAM_MUSIC:
return STRATEGY_MEDIA;
case AUDIO_STREAM_ENFORCED_AUDIBLE:
return STRATEGY_ENFORCED_AUDIBLE;
case AUDIO_STREAM_TTS:
return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
case AUDIO_STREAM_ACCESSIBILITY:
return STRATEGY_ACCESSIBILITY;
case AUDIO_STREAM_REROUTING:
return STRATEGY_REROUTING;
}
}
图7-14展示了AMB和AF及MixerThread之间的关系:
图7-14 AF、AMB及MixerThread之间的关系
· 图7-13展示了智能手机的硬件架构,通过和Audio相关的架构设计,我们能理解Audio系统设计的缘由。
· 图7-14展示了APS和AF内部联系的纽带,后续APS的控制无非就是找到对应的MixerThread,给它发送控制消息,最终由MixerThread将控制信息传给对应的代表音频输出设备的HAL对象。
下面用图7-15来回顾一下上面AT、AF、AP之间的交互关系。
图7-15 Audio三巨头的交互关系
图7-15充分展示了AT、AF和AP之间复杂微妙的关系。关系虽复杂,但目的却单纯。读者在分析时一定要明确目的。下面从目的开始,反推该流程:
· AT的目的是把数据发送给对应的设备,例如是蓝牙、DSP等。
· 代表输出设备的HAL对象由MixerThread线程持有,所以要找到对应的MixerThread。
· AP维护流类型和输出设备(耳机、蓝牙耳机、听筒等)之间的关系,不同的输出设备使用不同的混音线程。
· AT根据自己的流类型,向AudioSystem查询,希望得到对应的混音线程号。
这样,三者精妙配合,便达到了预期目的。
· 为什么startOutput函数会和设备切换有关系呢?
仅举一个例子,帮助理解这一问题。AudioTrack创建时可设置音频流类型,假设第一个AT创建时使用的是MUSIC类型,那么它将使用耳机出声(假设耳机已经连接上)。这时第二个AT创建了,它使用的是RING类型,它对应的策略应是SONIFACATION,这个策略的优先级比MUSIC要高(因为getNewDevice的判断语句首先会判断isUsedByStrategy(STRATEGY_SONIFICATION)),所以这时需要把设备切换为耳机加扬声器(假设这种类型的声音需要从耳机和扬声器同时输出)。startOutput的最终结果,是这两路的Track声音都将从耳机和扬声器中听到。当第二路AT调用stop时,对应音频流类型使用计数会减一,这会导致新的路由切换,并重新回到只有耳机的情况,这时第一路AT的声音会恢复为只从耳机输出。
提醒:读者可自行分析stop的处理方式,基本上是start的逆向处理过程。
这一节主要讲解了AudioTrack和AP之间的交互,总结为以下两点:
· AT通过AP获取AF中的工作线程索引号,这决定了数据传输的最终目标是谁,比如是音频DSP或是蓝牙。
· AT的start和stop会影响Audio系统的路由切换。
耳机插入事件:
frameworks/base/services/core/java/com/android/server/audio/AudioService.java
private boolean handleDeviceConnection(boolean connect, int device, String address,
String deviceName) {
if (DEBUG_DEVICES) {
Slog.i(TAG, "handleDeviceConnection(" + connect + " dev:" + Integer.toHexString(device)
+ " address:" + address + " name:" + deviceName + ")");
}
synchronized (mConnectedDevices) {
String deviceKey = makeDeviceListKey(device, address);
if (DEBUG_DEVICES) {
Slog.i(TAG, "deviceKey:" + deviceKey);
}
DeviceListSpec deviceSpec = mConnectedDevices.get(deviceKey);
boolean isConnected = deviceSpec != null;
if (DEBUG_DEVICES) {
Slog.i(TAG, "deviceSpec:" + deviceSpec + " is(already)Connected:" + isConnected);
}
if (connect && !isConnected) {
final int res = AudioSystem.setDeviceConnectionState(device,
AudioSystem.DEVICE_STATE_AVAILABLE, address, deviceName);
if (res != AudioSystem.AUDIO_STATUS_OK) {
Slog.e(TAG, "not connecting device 0x" + Integer.toHexString(device) +
" due to command error " + res );
return false;
}
mConnectedDevices.put(deviceKey, new DeviceListSpec(device, deviceName, address));
return true;
} else if (!connect && isConnected) {
AudioSystem.setDeviceConnectionState(device,
AudioSystem.DEVICE_STATE_UNAVAILABLE, address, deviceName);
// always remove even if disconnection failed
mConnectedDevices.remove(deviceKey);
return true;
}
}
return false;
}
AudioPolicyManager.cpp
setDeviceConnectionState{
updateDevicesAndOutputs;
getNewOutputDevice;
setOutputDevice;
}
setOutputDevice-->setParameters最终会调用APS的setParameters
2. AudioCommandThread
AudioCommandThread有一个请求处理队列,AP负责往该队列中提交请求,而AudioCommandThread在它的线程函数threadLoop中处理这些命令。请直接看命令是如何处理的。
说明:这种通过一个队列来协调两个线程的方法,在多线程编程中非常常见,它也属于生产者/消费者模型。
// checkForNewParameter_l() must be called with ThreadBase::mLock held
bool AudioFlinger::MixerThread::checkForNewParameter_l()
hardware/xxx/audio/hal/audio_hw.c
static int out_set_parameters(struct audio_stream *stream, const char *kvpairs)
校准
@ubuntu-server:~/android$ grep -rsn "msm_audio_cal" hardware/
hardware/xxx/audio/hal/audio_extn/spkr_protection.c:47:#include <linux/msm_audio_calibration.h>
hardware/xxx/audio/hal/audio_extn/spkr_protection.c:325: acdb_fd = open("/dev/msm_audio_cal",O_RDWR | O_NONBLOCK);
hardware/xxx/audio/hal/audio_extn/spkr_protection.c:551: acdb_fd = open("/dev/msm_audio_cal",O_RDWR | O_NONBLOCK);
@ubuntu-server:~/android$ grep -rsn "msm_audio_cal" kernel/
kernel/sound/soc/msm/xxx/rtac.c:21:#include <linux/msm_audio_calibration.h>
kernel/sound/soc/msm/xxx/audio_calibration.c:589: .name = "msm_audio_cal",
kernel/include/uapi/linux/Kbuild:286:header-y += msm_audio_calibration.h
kernel/include/sound/apr_audio-v2.h:40:/* make sure this matches with msm_audio_calibration */
kernel/include/sound/audio_calibration.h:16:#include <linux/msm_audio_calibration.h>
kernel/include/sound/audio_cal_utils.h:18:#include <linux/msm_audio_calibration.h>
frameworks/av/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
checkOutputsForDevice;//a2dp设备
DT是从MT中派生的,根据AP和AT的交互流程,当AT创建的流类型对应策略为SONIFACATION时,它会从AP中得到代表DT的线程索引号。由于DT没有重载createTrack_l,所以这个过程也会创建一个Track对象(和MT创建Track对象一样)。此时的结果,将导致图7-16变成图7-17。
图7-17 有AT的DT全景图