AudioTrack播放流程–AudioBuffer 的传递
本文是在梳理AudioTrack的创建、play和write,和audiotrackProxy共享buffer机制,简单梳理混音mix和重采样Resampler流程,属于学习笔记,如有错误欢迎指正。
文章目录
- AudioTrack播放流程--AudioBuffer 的传递
-
- 一、AudioTrack的创建过程
- 二、AudioTrack Proxy 初始化
- 三、AudioTrack start
-
- 3.1 start的传递
- 3.2 Track的start
- 3.3 Thread的addTrack
- 3.4 Playback的threadLoop
- 3.5 MixerThread的prepareTracks_l
- 3.6 MixerThread 的 threadLoop_mix
- 3.7 MixerThread 的 threadLoop_write
- 3.8 Track的 getNextBuffer
- 3.9 Track的 releaseBuffer
- 四、AudioTrack write
- 五、FIFO环形buffer的机制
一、AudioTrack的创建过程
1.1 track的创建简单流程
在AudioTrack.cpp中,通过AudioFinger创建track,此处的input 不是IO上的input,而是audioTrack的输入参数,output则是AudioFlinger对AudioTrack的输出参数,因为输入参数input有些情况不符,或者根据策略,或者为空等等原因会发生变动,所以需要output来通知上层实际参数设置是什么。
status_t AudioTrack::createTrack_l()
{
sp<IAudioTrack> track = audioFlinger->createTrack(input,
output,
&status);
看下audio_track_cblk_t 什么时候创建的
AudioFlinger里创建track,其中thread是通过AudioAttribute找到对应的output,然后通过对应的output获取对应的thread,如果是offload则新创建thread。因为AudioFlinger和AudioTrack属于两个进程,所以track在返回的时候需要创建TrackHandle,便于跨进程调用。TrackHandler是Bn端的 IAudioTrack。Bp端的IAudioTrack返回到AudioTrack里。
普通的Track使用的是MixerThread
track = thread->createTrack_l(client, streamType, input.attr, &output.sampleRate,
input.config.format, input.config.channel_mask,
&output.frameCount, &output.notificationFrameCount,
input.notificationsPerBuffer, input.speed,
input.sharedBuffer, sessionId, &output.flags,
input.clientInfo.clientTid, clientUid, &lStatus, portId);
// return handle to client
trackHandle = new TrackHandle(track);
以PlaybackThread为例 createTrack_l
track = new Track(this, client, streamType, attr, sampleRate, format,
channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
sessionId, uid, *flags, TrackBase::TYPE_DEFAULT, portId);
//init check
lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
mTracks.add(track);
Track是TrackBase的子类
(type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
1.2 TrackBase 创建mCblk
先看父类TrackBase的构造方法
//frameCount 在STREAM模式下是 一次最小的帧个数 STATIC是总帧个数
size_t minBufferSize = buffer == NULL ? roundup(frameCount) : frameCount;
minBufferSize *= mFrameSize;
//获取buffer的大小
if (buffer == nullptr) {
bufferSize = minBufferSize; // allocated here.
} else if (minBufferSize > bufferSize) {
android_errorWriteLog(0x534e4554, "38340117");
return;
}
//cblk的大小
size_t size = sizeof(audio_track_cblk_t);
if (buffer == NULL && alloc == ALLOC_CBLK) {
// check overflow when computing allocation size for streaming tracks.
if (size > SIZE_MAX - bufferSize) {
android_errorWriteLog(0x534e4554, "34749571");
return;
}
//加上buffer的大小
size += bufferSize;
}
//client是 audioflinger 为每一个track创建的用于分配内存
if (client != 0) {
//通过client申请堆内存
mCblkMemory = client->heap()->allocate(size);
if (mCblkMemory == 0 ||
(mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) {
ALOGE("not enough memory for AudioTrack size=%zu", size);
client->heap()->dump("AudioTrack");
mCblkMemory.clear();
return;
}
} else {
//一般不会走到这
mCblk = (audio_track_cblk_t *) malloc(size);
if (mCblk == NULL) {
ALOGE("not enough memory for AudioTrack size=%zu", size);
return;
}
}
if (mCblk != NULL) {
new(mCblk) audio_track_cblk_t();
switch (alloc) {
case ALLOC_CBLK:
// clear all buffers
if (buffer == NULL) {
mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
//初始化buffer 为0
memset(mBuffer, 0, bufferSize);
} else {
mBuffer = buffer;
#if 0
mCblk->mFlags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic
#endif
}
break;
1.3 AudioBufferProvider的继承关系
AudiobufferProvider有两个纯虚函数,一个是获取下个buffer,一个是release buffer
virtual status_t getNextBuffer(Buffer* buffer) = 0;
virtual void releaseBuffer(Buffer* buffer) = 0;
ExtendedAudioBufferProvider继承了AudioBufferProvider,又设置了三个virtual方法
class ExtendedAudioBufferProvider : public AudioBufferProvider{
public:
virtual size_t framesReady() const = 0; // see description at AudioFlinger.h
// Return the total number of frames that have been obtained and released
virtual int64_t framesReleased() const {
return 0; }
// Invoked by buffer consumer when a new timestamp is available.
// Default implementation ignores the timestamp.
virtual void onTimestamp(const ExtendedTimestamp& /*timestamp*/) {
}
}
而TrackBase继承了ExtendedAudioBufferProvider,只剩getNextBuffer一个纯虚函数了
// AudioBufferProvider interface
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) = 0;
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
playback.h里的Track继承了TrackBase,getNextBuffer =0去掉了
// playback track
class Track : public TrackBase, public VolumeProvider {
public:
virtual status_t start(AudioSystem::sync_event_t event =
AudioSystem::SYNC_EVENT_NONE,
audio_session_t triggerSession = AUDIO_SESSION_NONE);
virtual void stop();
void pause();
void flush();
void destroy();
// AudioBufferProvider interface
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
// releaseBuffer()not overridden
1.4 Track的代理Proxy
再看Track的构造方法,创建了AudioTrack的serverProxy
if (sharedBuffer == 0) {
mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
mFrameSize, !isExternalTrack(), sampleRate);
} else {
mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
mFrameSize);
}
mServerProxy = mAudioTrackServerProxy;
回到AudioTrack::createTrack_l 使用mcblk和buffer创建了 AudioTrack的ClientProxy
sp<IMemory> iMem = track->getCblk();
void *iMemPointer = iMem->pointer();
mAudioTrack = track;
mCblkMemory = iMem;
IPCThreadState::self()->flushCommands();
//获取 cblk
audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
mCblk = cblk;
//mSharedBuffer为0 是Stream模式 非0是Static模式
void* buffers;
if (mSharedBuffer == 0) {
// cblk +1 拿到 buffer
buffers = cblk + 1;
} else {
buffers = mSharedBuffer->pointer();
if (buffers == NULL) {
ALOGE("Could not get buffer pointer");
status = NO_INIT;
goto exit;
}
}
//创建audioTrackProxy
// update proxy
if (mSharedBuffer == 0) {
mStaticProxy.clear();
mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
} else {
mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
mProxy = mStaticProxy;
}
二、AudioTrack Proxy 初始化
先看继承关系
AudioTrackServerProxy :ServerProxy : Proxy
AudioTrackClientProxy : ClientProxy : Proxy
2.1 Proxy 初始化
Proxy里存放mBuffers 和 mcblk
class Proxy : public RefBase {
protected:
Proxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize, bool isOut,
bool clientInServer);
virtual ~Proxy() {
}
public:
struct Buffer {
size_t mFrameCount; // number of frames available in this buffer
void* mRaw; // pointer to first frame
size_t mNonContig; // number of additional non-contiguous frames available
};
size_t frameCount() const {
return mFrameCount; }
protected:
// These refer to shared memory, and are virtual addresses with respect to the current process.
// They may have different virtual addresses within the other process.
audio_track_cblk_t* const mCblk; // the control block
void* const mBuffers; // starting address of buffers
const size_t mFrameCount; // not necessarily a power of 2
const size_t mFrameSize; // in bytes
const size_t mFrameCountP2; // mFrameCount rounded to power of 2, streaming mode
const bool mIsOut; // true for AudioTrack, false for AudioRecord
const bool mClientInServer; // true for OutputTrack, false for AudioTrack & AudioRecord
bool mIsShutdown; // latch set to true when shared memory corruption detected
size_t mUnreleased; // unreleased frames remaining from most recent obtainBuffer
};
2.2 ServerProxy初始化
设置mBufferSize
ServerProxy::ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
size_t frameSize, bool isOut, bool clientInServer)
: Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer),
mAvailToClient(0), mFlush(0), mReleased(0), mFlushed(0)
, mTimestampMutator(&cblk->mExtendedTimestampQueue)
{
//赋值mBufferSizeInFrames
cblk->mBufferSizeInFrames = frameCount;
}
2.3 ClientProxy初始化
设置mBufferSize
ClientProxy::ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
size_t frameSize, bool isOut, bool clientInServer)
: Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer)
, mEpoch(0)
, mTimestampObserver(&cblk->mExtendedTimestampQueue)
{
//赋值buffersize frameCount 和cblk的buffersize大小相同
setBufferSizeInFrames(frameCount);
}
//这个 size 应该大于16 小于等 mCblk的size
uint32_t ClientProxy::setBufferSizeInFrames(uint32_t size)
{
// The minimum should be greater than zero and less than the size
// at which underruns will occur.
const uint32_t minimum = 16; // based on AudioMixer::BLOCKSIZE
const uint32_t maximum = frameCount();
uint32_t clippedSize = size;
if (maximum < minimum) {
clippedSize = maximum;
} else if (clippedSize < minimum) {
clippedSize = minimum;
} else if (clippedSize > maximum) {
clippedSize = maximum;
}
// for server to read
android_atomic_release_store(clippedSize, (int32_t *)&mCblk->mBufferSizeInFrames);
// for client to read
mBufferSizeInFrames = clippedSize;
return clippedSize;
}
2.4 AudioTrackServerProxy初始化
设置采样率,PlayerbackRate观察者 Underrun 和 Drained
AudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
size_t frameSize, bool clientInServer = false, uint32_t sampleRate = 0)
: ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer),
mPlaybackRateObserver(&cblk->mPlaybackRateQueue),
mUnderrunCount(0), mUnderrunning(false), mDrained(true) {
mCblk->mSampleRate = sampleRate;
mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
}
2.5 AudioTrackClientProxy 初始化
设置采样率 mPlaybackRateQueue设置后 Server的Observer会观察到
AudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
size_t frameSize, bool clientInServer = false)
: ClientProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/,
clientInServer),
mPlaybackRateMutator(&cblk->mPlaybackRateQueue) {
}
三、AudioTrack start
3.1 start的传递
AudioTrack start会调用 IAudioTrack的strat,AudioTrack里是Bp的IAudiotrack
TrackHandler是BnAudioTrack
status_t AudioFlinger::TrackHandle::start() {
return mTrack->start();
}
mTrack 是通过Thread创建出来的,Track的start函数自带两个参数
//PlaybackTracks.h
virtual status_t start(AudioSystem::sync_event_t event =
AudioSystem::SYNC_EVENT_NONE,
audio_session_t triggerSession = AUDIO_SESSION_NONE);
3.2 Track的start
有点长,只看buffer相关的,启动里AudioTrackServerProxy
status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused,
audio_session_t triggerSession __unused)
{
//将自己加入到Track里 mThread是MixerThread
sp<ThreadBase> thread = mThread.promote();
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
status = playbackThread->addTrack_l(this);
// AudioTrackServerProxy start
if (status == NO_ERROR || status == ALREADY_EXISTS) {
// for streaming tracks, remove the buffer read stop limit.
mAudioTrackServerProxy->start();
}
如果之前就加入到Thread里 直接返回no error,如果是第一次启动AudioTrackServerProxy obtainBuffer
// track was already in the active list, not a problem
if (status == ALREADY_EXISTS) {
status = NO_ERROR;
} else {
// Acknowledge any pending flush(), so that subsequent new data isn't discarded.
// It is usually unsafe to access the server proxy from a binder thread.
// But in this case we know the mixer thread (whether normal mixer or fast mixer)
// isn't looking at this track yet: we still hold the normal mixer thread lock,
// and for fast tracks the track is not yet in the fast mixer thread's active set.
// For static tracks, this is used to acknowledge change in position or loop.
ServerProxy::Buffer buffer;
buffer.mFrameCount = 1;
(void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
}
3.3 Thread的addTrack
addTrack_l和Buffer相关的代码,除掉 ExternalTrack和PatchTrack还有offloadTrack后,很简单
bool isExternalTrack() const {
return !isOutputTrack() && !isPatchTrack(); }
如果再mActiveTrack里找到了Track就返回ALREAD_EXISTS,没有的话就添加到mActiveTracks中。
status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
{
status_t status = ALREADY_EXISTS;
if (mActiveTracks.indexOf(track) < 0) {
//外置track判定
if (track->isExternalTrack())
//offload track判定
if (track->isOffloaded())
mActiveTracks.add(track);
status = NO_ERROR;
}
onAddNewTrack_l();
return status;
}
发出同步信号
void AudioFlinger::PlaybackThread::onAddNewTrack_l()
{
ALOGV("signal playback thread");
broadcast_l();
}
void AudioFlinger::ThreadBase::broadcast_l()
{
// Thread could be blocked waiting for async
// so signal it to handle state changes immediately
// If threadLoop is currently unlocked a signal of mWaitWorkCV will
// be lost so we also flag to prevent it blocking on mWaitWorkCV
mSignalPending = true;
mWaitWorkCV.broadcast();
}
threadLoop是一个线程的死循环,其中关于上面的信号量的改变处理的方法如下。有个wait的等待锁。但是playbackThread中的waitingAsyncCallback_l返回值是false
bool AudioFlinger::PlaybackThread::waitingAsyncCallback_l()
{
return false;
}
所以这段代码在playbackThread的threadLoop中作用不大。
if (mSignalPending) {
// A signal was raised while we were unlocked
mSignalPending = false;
} else if (waitingAsyncCallback_l()) {
if (exitPending()) {
break;
}
bool released = false;
if (!keepWakeLock()) {
releaseWakeLock_l();
released = true;
}
const int64_t waitNs = computeWaitTimeNs_l();
ALOGV("wait async completion (wait time: %lld)", (long long)waitNs);
status_t status = mWaitWorkCV.waitRelative(mLock, waitNs);
if (status == TIMED_OUT)