1.MediaServer进程
/*
**
** Copyright 2008, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#define LOG_TAG "mediaserver"
//#define LOG_NDEBUG 0
#include <binder/IPCThreadState.h>
#include <binder/ProcessState.h>
#include <binder/IServiceManager.h>
#include <utils/Log.h>
// from LOCAL_C_INCLUDES
#include "AudioFlinger.h"
#include "CameraService.h"
#include "MediaPlayerService.h"
#include "AudioPolicyService.h"
using namespace android;
int main(int argc, char** argv)
{
/* 1.打开binder设备 2.mmap使得可以与binder通信 3.ProcessState是单例模式 */
sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm = defaultServiceManager();
/* 2.服务的注册,业务层使用addService、通信层使用transact */
AudioFlinger::instantiate();
MediaPlayerService::instantiate();
CameraService::instantiate();
AudioPolicyService::instantiate();
/* 3.创建线程池,启动一个线程 */
ProcessState::self()->startThreadPool();
/* 4.主线程加入线程池 */
IPCThreadState::self()->joinThreadPool();
}
(frameworks/av/media/mediaservice/main_mediaserver.cpp)
上面的MediaServer进程做了四件事情,打开Binder设备、往ServiceeManager注册服务、创建线程池、主线程加入线程池
第一件事情:创建了一个ProcessState对象,ProcessState是一个单例模式类
self()函数: 创建了一个ProcessState
sp<ProcessState> ProcessState::self()
{
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) {
return gProcess;
}
gProcess = new ProcessState;
return gProcess;
}
ProcessState的构造函数: 其中open_driver用来打开binder驱动、mmap用来映射内存以接受数据
ProcessState::ProcessState()
: mDriverFD(open_driver())
, mVMStart(MAP_FAILED)
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
if (mDriverFD >= 0) {
// mmap the binder, providing a chunk of virtual address space to receive transactions.
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
if (mVMStart == MAP_FAILED) {
close(mDriverFD);
mDriverFD = -1;
}
}
LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened. Terminating.");
}
open_driver()函数:打开binder设备,返回文件描述符存放于mDriverFD
static int open_driver()
{
int fd = open("/dev/binder", O_RDWR);
if (fd >= 0) {
fcntl(fd, F_SETFD, FD_CLOEXEC);
int vers;
status_t result = ioctl(fd, BINDER_VERSION, &vers);
...
} else {
ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));
}
return fd;
}
第二件事情:首先找到ServiceManager,接着往ServiceManager中注册服务
defaultServiceManager()函数(frameworks/native/include/binder/IServiceManager.h、frameworks/native/libs/binder/IServiceManager.cpp)
sp<IServiceManager> defaultServiceManager()
{
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
AutoMutex _l(gDefaultServiceManagerLock);
if (gDefaultServiceManager == NULL) {
//interface_cast 模板类,通过BpBinder构造IServiceManager, 创建了一个BpServiceManager
gDefaultServiceManager = interface_cast<IServiceManager>(ProcessState::self()->getContextObject(NULL));
}
}
return gDefaultServiceManager;
}
getContextObject(NULL)函数:
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& caller)
{
return getStrongProxyForHandle(0);
}
getStrongProxyForHandle(0)函数:创建了一个BpBinder作为通信代表。其中Binder类没有直接与/dev/binder交互
因此前面的代码可以写成gDefaultServiceManager = interface_cast<IServiceManager>(new BpBinder(0));
又由于interface_cast模板类的作用代码可以写成IServiceManager::asInterface(new BpBinder(0));
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
BpBinder构造函数:BpBinder与BBinder都在通信层,是通信的代表,一个BpBinder对应一个BBinder
(frameworks/native/libs/binder/BpBinder.cpp)
BpBinder::BpBinder(int32_t handle)
: mHandle(handle)
, mAlive(1)
, mObitsSent(0)
, mObituaries(NULL)
{
ALOGV("Creating BpBinder %p handle %d\n", this, mHandle);
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
IPCThreadState::self()->incWeakHandle(handle);
}
IServiceManager::asInterface函数:深深的藏在这个宏里面
DECLARE_META_INTERFACE(ServiceManager);
展开asInterface如下:
return android::sp<IServiceManager> intr = new BpServiceManager(BpBinder(0));
分析派生关系:BpServiceManager-> IServiceManager & BpInterface -> BpRefBase( remote->BpBinder)
BnServiceManager-> IServiceManager & BBinder
遗留问题:通信层创建了BpBinder,业务层创建了BpServiceManager,那么通信的过程是怎样的呢
2.MediaPlayerService服务
以MediaPlayerService为例来分析服务的创建,以及通信过程。
MediaPlayerService::instantiate() 函数:调用BpServiceManager->addService函数来处理添加服务的业务,创建一个MediaPlayerService对象
void MediaPlayerService::instantiate() {
//bpServiceManager的addService属于业务层逻辑, 1.打包请求数据 2.通过bpBinder->tracact将数据交给通信处理
//bpBinder的tracact属于通信层逻辑,它把工作交给了 IPCThreadState
defaultServiceManager()->addService(
String16("media.player"), new MediaPlayerService());
}
BpServiceManager->addService函数:业务层函数,调用BpBinder->transact与Binder设备交互
(frameworks/native/libs/binder/IServiceManager.cpp)
virtual status_t addService(const String16& name, const sp<IBinder>& service,
bool allowIsolated)
{
//数据包
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
data.writeInt32(allowIsolated ? 1 : 0);
//remote()即是bpBiner, transact通过Binder文件与bnBinder交互
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
BpBinder::transact函数:通信层函数,利用IPCThreadState::self()->transact进行进程通信
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
IPCThreadState::transact函数:利用writeTransactionData写mOut缓冲区数据,利用waitForResponse发送数据循环等待应答
分析:关于IPCThreadState,每个进程都有一个用来处理进程间通信,主要的劳动者。其包含一个mIn接受数据缓冲区,和一个mOut发送数据缓冲区
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
//BC_TRANSACTION是应用向Binder发送的消息码,而Binder向应用发送的消息码是以BR开头的
if (err == NO_ERROR) {
//1.将数据封装到tr中,并写入mOut
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
//2.发送数据,while循环等待回应
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
writeTransactionData函数:打包binder通信数据到binder_transaction_data中,将数据写入缓冲区mOut
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
//和binder通讯的数据结构
binder_transaction_data tr;
//handle用来标示目标端,目标标识符
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = statusBuffer;
tr.offsets_size = 0;
tr.data.ptr.offsets = NULL;
} else {
return (mLastError = err);
}
//把命令和数据写到mOut中,并没有发送
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
waitForResponse函数:利用talkWithDriver发送mOut缓冲区中的数据,将接受的数据放入mIn
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
int32_t cmd;
int32_t err;
while (1) {
//与Binder驱动打交道了噢!!
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
//接收的数据在mIn中
cmd = mIn.readInt32();
switch (cmd) {
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;
break;
case BR_DEAD_REPLY:
err = DEAD_OBJECT;
goto finish;
case BR_FAILED_REPLY:
err = FAILED_TRANSACTION;
goto finish;
...
default:
//根据repley默认执行命令
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
return err;
}
与/dev/binder亲密接触啦!
talkWithDriver函数:talkWithDriver中的ioctl与/dev/binder交互啦,既不是read也不是write
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
//发送数据的填充
bwr.write_size = outAvail;
bwr.write_buffer = (long unsigned int)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
//接受数据的填充,收到数据放入mIn
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (long unsigned int)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
//开始与Binder交互了噢,这里是与Binder驱动交互的核心
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
} while (err == -EINTR);
...
return err;
}
从流程的角度:
1.首先在MediaServer进程中,首先创建了一个ProcessState,在其中打开了Binder设备并返回一个设备文件句柄mDriverFd
2.接着通过创建sp<IServiceManager> sm = new BpServiceManager(new BpBinder(0));来获取到了ServiceManager的句柄
3. 最后在创建MediaPlayerService服务时,利用BpServiceManager->addService来完成了服务的添加,该函数实际调用了BpBinder->transact函数
从层次的角度:
业务层 BpServiceManager->addService
通信层 BpBinder-transact
IPCThreadState->transact: writeTransactionData
waitForResponse: IPCThreadState->talkWithDriver
最终数据穿过业务层和通信层,利用IPCThreadState->talkWithDriver中的ioctl来与/dev/binder交互的
3.startThreadPool
第三件事情:开启线程池,线程池中只有一个线程
ProcessState::startThreadPool()函数:
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
//如果已经启动线程池,那么不会执行下面的动作
if (!mThreadPoolStarted) {
mThreadPoolStarted = true;
//创建线程池
spawnPooledThread(true);
}
}
spawnPooledThread函数:创建了一个Thread并调用了run函数
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
int32_t s = android_atomic_add(1, &mThreadPoolSeq);
char buf[16];
snprintf(buf, sizeof(buf), "Binder_%X", s);
//创建线程池
sp<Thread> t = new PoolThread(isMain);
//启动,并调用threadLoop
t->run(buf);
}
}
第四件事情:主线程加入线程池
void IPCThreadState::joinThreadPool(bool isMain)
{
//如果isMain为true,则进入循环处理; 如果isMain为false,则用于注册循环
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
set_sched_policy(mMyThreadId, SP_FOREGROUND);
status_t result;
do {
int32_t cmd;
// When we've cleared the incoming command queue, process any pending derefs
if (mIn.dataPosition() >= mIn.dataSize()) {
size_t numPending = mPendingWeakDerefs.size();
if (numPending > 0) {
for (size_t i = 0; i < numPending; i++) {
RefBase::weakref_type* refs = mPendingWeakDerefs[i];
refs->decWeak(mProcess.get());
}
mPendingWeakDerefs.clear();
}
//处理已经死亡的BBinder
numPending = mPendingStrongDerefs.size();
if (numPending > 0) {
for (size_t i = 0; i < numPending; i++) {
BBinder* obj = mPendingStrongDerefs[i];
obj->decStrong(mProcess.get());
}
mPendingStrongDerefs.clear();
}
}
// now get the next command to be processed, waiting if necessary
//发送数据,读取mIn
result = talkWithDriver();
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t)) continue;
cmd = mIn.readInt32();
//根据mIn执行命令
result = executeCommand(cmd);
}
set_sched_policy(mMyThreadId, SP_FOREGROUND);
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
mOut.writeInt32(BC_EXIT_LOOPER);
talkWithDriver(false);
}
分析:上述join过程即发送注册信息,并进入消息循环,抽象出消息循环过程很简单
do {
result = talkWithDriver();
if(result >= NO_ERROR) {
cmd = mIn.readInt32();
result = executeCommand(cmd);
}
} while(result == CONTINE)
4.ServiceManager
前面的分析已经很详尽了,但是这里ServiceManager没有用到业务层、只有通信层,原因是没有必须。
ServiceManager 进程(frameworks/base/cmds/servicemanager/service_manager.c)
int main(int argc, char **argv)
{
struct binder_state *bs;
void *svcmgr = BINDER_SERVICE_MANAGER;
//1.打开Binder设备
bs = binder_open(128*1024);
//2.设置handle号0, 采用ioctl与binder通信告知自己handle号
if (binder_become_context_manager(bs)) {
return -1;
}
//3.处理客户请求
svcmgr_handle = svcmgr;
binder_loop(bs, svcmgr_handler);
return 0;
}
过程很简单,稍微了解下
binder_loop函数:(frameworks/base/cmds/servicemanager/binder.c)
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
unsigned readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(unsigned));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (unsigned) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
break;
}
res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);
if (res == 0) {
break;
}
if (res < 0) {
break;
}
}
}
其中 binder_handler func = svcmgr_handler用来解析消息:
int svcmgr_handler(struct binder_state *bs,
struct binder_txn *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
unsigned len;
void *ptr;
uint32_t strict_policy;
int allow_isolated;
if (txn->target != svcmgr_handle)
return -1;
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s));
return -1;
}
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
ptr = do_find_service(bs, s, len, txn->sender_euid);
if (!ptr)
break;
bio_put_ref(reply, ptr);
return 0;
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
ptr = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, ptr, txn->sender_euid, allow_isolated))
return -1;
break;
case SVC_MGR_LIST_SERVICES: {
unsigned n = bio_get_uint32(msg);
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
分支中的不同变量就是不同的消息了:SVC_MGR_CHECK_SERVICE、SVC_MGR_ADD_SERVICE、SVC_MGR_LIST_SERVICES
分析可知,ServiceManager没有使用BBinder、BnServiceManager,简直就是裸奔呀
5.MediaServer的客户
IMediaDeathNotifier::getMediaPlayerService()
{
ALOGV("getMediaPlayerService");
Mutex::Autolock _l(sServiceLock);
if (sMediaPlayerService == 0) {
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
do {
binder = sm->getService(String16("media.player"));
if (binder != 0) {
break;
}
usleep(500000); // 0.5 s
} while (true);
if (sDeathNotifier == NULL) {
sDeathNotifier = new DeathNotifier();
}
binder->linkToDeath(sDeathNotifier);
sMediaPlayerService = interface_cast<IMediaPlayerService>(binder);
}
return sMediaPlayerService;
}
MediaPlayerService驻留在MediaServer进程中,当进程的通信层收到消息后,会递交数据给业务层处理。若进程收到一个消息,最终会调用executeCommand来处理。回顾一下消息循环的模板:
do {
result = talkWithDriver();
if(result >= NO_ERROR) {
cmd = mIn.readInt32();
result = executeCommand(cmd);
}
} while(result == CONTINE)
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch (cmd) {
...
case BR_TRANSACTION:
{
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
if (tr.target.ptr) {
//这里的b是BnServiceXXX的一个对象
sp<BBinder> b((BBinder*)tr.cookie);
const status_t error = b->transact(tr.code, buffer, &reply, tr.flags);
if (error < NO_ERROR) reply.setError(error);
...
return result;
}
通讯层BBinder->transact最终会调用业务层BnServiceMediaPlayer->OnTransact函数来执行请求。
status_t BBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
data.setDataPosition(0);
status_t err = NO_ERROR;
switch (code) {
case PING_TRANSACTION:
reply->writeInt32(pingBinder());
break;
default:
err = onTransact(code, data, reply, flags);
break;
}
return err;
}
status_t BnMediaPlayerService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch (code) {
case CREATE: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
pid_t pid = data.readInt32();
sp<IMediaPlayerClient> client =
interface_cast<IMediaPlayerClient>(data.readStrongBinder());
int audioSessionId = data.readInt32();
sp<IMediaPlayer> player = create(pid, client, audioSessionId);
reply->writeStrongBinder(player->asBinder());
return NO_ERROR;
} break;
case DECODE_URL: {
return NO_ERROR;
} break;
case DECODE_FD: {
return NO_ERROR;
} break;
case CREATE_MEDIA_RECORDER: {
return NO_ERROR;
} break;
case CREATE_METADATA_RETRIEVER: {
return NO_ERROR;
} break;
case LISTEN_FOR_REMOTE_DISPLAY: {
return NO_ERROR;
} break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
派生关系:MediaPlayerService -> BnMediaPlayerService -> IMediaPlayerService & BBinder
virtual sp<IMediaRecorder> createMediaRecorder(pid_t pid)
{
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
data.writeInt32(pid);
remote()->transact(CREATE_MEDIA_RECORDER, data, &reply);
return interface_cast<IMediaRecorder>(reply.readStrongBinder());
}
在BnMediaPlayerService中:
sp<IMediaRecorder> MediaPlayerService::createMediaRecorder(pid_t pid)
{
sp<MediaRecorderClient> recorder = new MediaRecorderClient(this, pid);
wp<MediaRecorderClient> w = recorder;
Mutex::Autolock lock(mLock);
mMediaRecorderClients.add(w);
ALOGV("Create new media recorder client from pid %d", pid);
return recorder;
}