前面的Activity的启动流程(http://blog.youkuaiyun.com/newhope1106/article/details/53355189),已经知道了Activity的启动将会调用下面代码:
try {
intent.migrateExtraStreamToClipData();
intent.prepareToLeaveProcess();
int result = ActivityManagerNative.getDefault()
.startActivity(whoThread, who.getBasePackageName(), intent,
intent.resolveTypeIfNeeded(who.getContentResolver()),
token, target != null ? target.mEmbeddedID : null,
requestCode, 0, null, options);
checkStartActivityResult(result, intent);
} catch (RemoteException e) {
throw new RuntimeException("Failure from system", e);
}
很显然这是一个跨进程调用方式,涉及到获取AMS和AMS的注册,下面将详细讲解。
四、AMS的注册
frameworks/base/core/java/android/os/ServiceManager.java
frameworks/base/services/java/com/android/server/SystemServer.java
frameworks/base/services/java/com/android/server/am/ActivityManagerService.java
frameworks/base/core/java/android/os/Binder.java
frameworks/base/core/jni/android_util_Binder.cpp
ActivityManagerService是在SystemServer中初始化的,SystemServer初始化会调用main方法,接着调用run方法,然后就是startBootstrapServices方法,此时就会调用AMS的setSystemProcess方法,最后向Service Manager进行注册。
/**
* The main entry point from zygote.
*/
public static void main(String[] args) {
new SystemServer().run();
}
private void run() {
......
Looper.prepareMainLooper();
......
// Start services.
try {
startBootstrapServices();//启动服务
startCoreServices();
startOtherServices();
} catch (Throwable ex) {
Slog.e("System", "******************************************");
Slog.e("System", "************ Failure starting system services", ex);
throw ex;
}
......
// Loop forever.
Looper.loop();
throw new RuntimeException("Main thread loop unexpectedly exited");
}
private void startBootstrapServices() {
.......
// Set up the Application instance for the system process and get started.
mActivityManagerService.setSystemProcess();
......
}
下面是ActivityManagerService.java中向Service Manager注册
public void setSystemProcess() {
try {
ServiceManager.addService(Context.ACTIVITY_SERVICE, this, true);//注册服务
......
} catch (PackageManager.NameNotFoundException e) {
throw new RuntimeException(
"Unable to find android system package", e);
}
}
接着调用addService方法
public static void addService(String name, IBinder service, boolean allowIsolated) {
try {
getIServiceManager().addService(name, service, allowIsolated);
} catch (RemoteException e) {
Log.e(TAG, "error in addService", e);
}
}
首先我们需要获取到Service Manager
private static IServiceManager getIServiceManager() {
if (sServiceManager != null) {
return sServiceManager;
}
// Find the service manager
sServiceManager = ServiceManagerNative.asInterface(BinderInternal.getContextObject());
return sServiceManager;
}
调用BinderInternal的getContextObject(),这个是native方法,也是在android_util.Binder.cpp中注册的
static const JNINativeMethod gBinderInternalMethods[] = {
/* name, signature, funcPtr */
{ "getContextObject", "()Landroid/os/IBinder;", (void*)android_os_BinderInternal_getContextObject },
{ "joinThreadPool", "()V", (void*)android_os_BinderInternal_joinThreadPool },
{ "disableBackgroundScheduling", "(Z)V", (void*)android_os_BinderInternal_disableBackgroundScheduling },
{ "handleGc", "()V", (void*)android_os_BinderInternal_handleGc }
};
此时调用的是android_util.Binder.cpp中的android_os_BinderInternal_getContextObject函数
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
return javaObjectForIBinder(env, b);
}
上述代码调用了ProcessState::self()->getContextObject(NULL),前面已经提到过ProcessState
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
return getStrongProxyForHandle(0);
}
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
.....
Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);//ping命令
if (status == DEAD_OBJECT)
return NULL;
}
b = new BpBinder(handle); //初始化BpBinder
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
// This little bit of nastyness is to allow us to add a primary
// reference to the remote proxy when this team doesn't have one
// but another team is sending the handle to us.
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
此处可以知道获取的是handle为0,上面Service Manager向binder驱动申请成为上下文,我们已经知道handle为0,也就是Service Manager。然后将handler为0作为参数创建一个BpBinder,而 javaObjectForIBinder(env, b)实际上是创建了一个BinderProxy,其中BinderProxy的成员变量mObject指向BpBinder,因此BinderInternal.getContextObject()相当于new BinderProxy();再来看ServiceManagerNative的asInterface方法,
static public IServiceManager asInterface(IBinder obj)
{
if (obj == null) {
return null;
}
IServiceManager in =
(IServiceManager)obj.queryLocalInterface(descriptor);
if (in != null) {
return in;
}
return new ServiceManagerProxy(obj);
}
因为不在同一个进程中,因此返回的是ServiceManager的代理,ServiceManagerProxy对象,其中obj也就是ServiceManagerProxy的成员遍历mRemote就是BinderProxy,接着调用了ServiceManagerProxy的addService方法
public void addService(String name, IBinder service, boolean allowIsolated)
throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
data.writeStrongBinder(service);
data.writeInt(allowIsolated ? 1 : 0);
mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0);//BinderProxy的transact方法
reply.recycle();
data.recycle();
}
然后调用了BinderProxy的transact方法
public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
Binder.checkParcel(this, code, data, "Unreasonably large binder buffer");
return transactNative(code, data, reply, flags);
}
transactNative也是一个native方法,还是在android_util.Binder.cpp注册,调用了android_os_BinderProxy_transact函数
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
jint code, jobject dataObj, jobject replyObj, jint flags) // throws RemoteException
{
.....
Parcel* data = parcelForJavaObject(env, dataObj);
if (data == NULL) {
return JNI_FALSE;
}
Parcel* reply = parcelForJavaObject(env, replyObj);
if (reply == NULL && replyObj != NULL) {
return JNI_FALSE;
}
IBinder* target = (IBinder*)
env->GetLongField(obj, gBinderProxyOffsets.mObject);//mObject是BpBinder
if (target == NULL) {
jniThrowException(env, "java/lang/IllegalStateException", "Binder has been finalized!");
return JNI_FALSE;
}
...
status_t err = target->transact(code, *data, reply, flags);
...
}
接着调用了BpBinder的transact函数,
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
然后调用到了IPCThreadState::self()->transact函数
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
.....
if (err == NO_ERROR) {
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {//是否需要等待返回值
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
...
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
上面的核心函数是writeTransactionData,如果需要返回值,会调用waitForResponse
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
tr.offsets_size = 0;
tr.data.ptr.offsets = 0;
} else {
return (mLastError = err);
}
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
将数据写入写缓存,上面我们看到不管是否需要返回值都会调用waitForResponse,只是传入的参数不一样,这是是因为我们需要唤醒binder驱动的线程来处理数据,waitForResponse里面调用了talkWithDriver.
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
.......
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
......
}
}
看看上面的talkWithDriver的内容
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
#if defined(HAVE_ANDROID_OS)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)//这里读写操作
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
} while (err == -EINTR);
if (err >= NO_ERROR) {
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
return NO_ERROR;
}
return err;
}
上述通过ioctl操作,向binder驱动发送对操作命令,此时就可以触发binder线程工作了,对应binder驱动的binder_ioctl函数,前面有提到。涉及两个核心函数
binder_thread_write
和
binder_thread_read
,下面来看看binder_ioctl
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;//将当前的binder_proc赋值给proc
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
binder_lock(__func__);
thread = binder_get_thread(proc);//获取binder线程,如有必要创建一个新的
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
........
}
首先是获取binder_thread,
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
struct rb_node **p = &proc->threads.rb_node;
while (*p) {
parent = *p;
thread = rb_entry(parent, struct binder_thread, rb_node);
if (current->pid < thread->pid)
p = &(*p)->rb_left;
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
break;
}
if (*p == NULL) {
thread = kzalloc_preempt_disabled(sizeof(*thread));
if (thread == NULL)
return NULL;
binder_stats_created(BINDER_STAT_THREAD);
thread->proc = proc;
thread->pid = current->pid;//将当期线程的pid赋值给binder_thread的pid
init_waitqueue_head(&thread->wait);//初始化wait队列
INIT_LIST_HEAD(&thread->todo);//初始化TODO队列
rb_link_node(&thread->rb_node, parent, p);
rb_insert_color(&thread->rb_node, &proc->threads);
thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
thread->return_error = BR_OK;
thread->return_error2 = BR_OK;
}
return thread;
}
如果没有的话,则创建一个新的binder_thread,上述的talkWithDriver看到命令是BINDER_WRITE_READ,因此将会调用binder_ioctl_write_read。
前面Service Manager的分析中已经看到了,最重要的是2个函数
binder_thread_write
和
binder_thread_read
。AMS注册的时候,有将数据写入写缓存中,因此写缓存是有数据的,调用
binder_thread_write,数据的命令是
BC_TRANSACTION,因此先只看这里的。
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
...
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))//将数据拷贝到binder_transaction_data
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
...
*consumed = ptr - buffer;
}
return 0;
}
来看看binder_transaction做了什么,binder_transaction内容比较长,只看主要的
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
binder_size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
...
if (reply) {//cmd == BC_TRANSACTION,因此调用else
.....
} else {
if (tr->target.handle) {//此时handle是service manager的handle,因此调用的else
...
} else {
target_node = binder_context_mgr_node;//将service Manager的binder_node节点赋值给target_node
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
}
}
target_proc = target_node->proc;//目标binder_proc
...
}
if (target_thread) {//此时,这里不走,走else
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
/* TODO: reuse incoming transaction for reply */
t = kzalloc_preempt_disabled(sizeof(*t));//创建binder_transaction节点
...
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc; //处理事务的目标进程
t->to_thread = target_thread; //处理事务的目标线程
t->code = tr->code; //这里是ADD_SERVICE_TRANSACTION
t->flags = tr->flags;
t->priority = task_nice(current);
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
//将服务端的parcel数据拷贝到内核空间
if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
//将偏移数组拷贝到内核空间
if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
off_end = (void *)offp + tr->offsets_size;
off_min = 0;
//遍历
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
....
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {//如果是binder实体
struct binder_ref *ref;
struct binder_node *node = binder_get_node(proc, fp->binder);
...
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0;
fp->handle = ref->desc;
fp->cookie = 0;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle,
fp->type == BINDER_TYPE_HANDLE);//根据handle获取引用,这里的handle是0
...
if (ref->node->proc == target_proc) {//服务端AMS和Service Manager不在一个进程,不走这里
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;//修改类型
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;//同一个进程,直接将实体赋值给binder
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);//需要重新给引用的节点,为目标进程创建一个引用
fp->binder = 0;
fp->handle = new_ref->desc;//将新引用的handle赋值给fp->handle
fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
}
} break;
...
}
if (reply) {
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
} else {
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
}
t->work.type = BINDER_WORK_TRANSACTION;
//将binder_transact节点加入到target_list即TODO队列中,此处target_list是proc->todo列表
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait) {
if (reply || !(t->flags & TF_ONE_WAY)) {
wake_up_interruptible_sync(target_wait);
}
else {
wake_up_interruptible(target_wait);
}
}
return;
......
}
从上面我们知道,AMS发送的注册数据,是作为一个binder_transact节点插入到Service Manager的binder线程的TODO队列中的,分析Service Manger知道,SM进入binder_loop循环不断的从binder驱动中读数据,由于Service Manager的读缓存此时有数据了,调用Service Manager的binder_thread_read函数。
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
...
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo)) {//优先处理thead->todo队列
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {//如果binder_thead的todo队列为空,则处理binder_proc的todo队列
w = list_first_entry(&proc->todo, struct binder_work,
entry);
} else {
/* no data added */
if (ptr - buffer == 4 &&
!(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
//获取transaction数据
t = container_of(w, struct binder_transaction, work);
} break;
...
}
if (!t)
continue;
BUG_ON(t->buffer == NULL);
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;//目标节点
tr.target.ptr = target_node->ptr;
tr.cookie = target_node->cookie;//实体binder
t->saved_priority = task_nice(current);
if (t->priority < target_node->min_priority &&
!(t->flags & TF_ONE_WAY))
binder_set_nice(t->priority);
else if (!(t->flags & TF_ONE_WAY) ||
t->saved_priority > target_node->min_priority)
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
tr.target.ptr = 0;
tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
...
//将cmd拷贝回用户空间,cmd应该是BR_TRANSACTION
if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
//这里是把binder_transaction_data即数据地址信息拷贝到用户空间
if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
...
list_del(&t->work.entry);//移除binder_transaction_data节点
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
} else {
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
}
...
}
我们在服务端发送的命令是BC_TRANSACTION,通过binder驱动转换,变成了BR_TRANSACTION,现在已经把数据拷贝到用户空间,我们再次回到Service Manager的binder_loop,此时数据返回之后,我们执行binder_parse
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
.....
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if (func) {//这里func是Service Manager的svcmgr_handler函数
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);//将结果返回给服务端
}
ptr += sizeof(*txn);
break;
}
...
}
return r;
}
上面应该走case BR_TRANSACTION,而且客户端发送的命令是SVC_MGR_ADD_SERVICE
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
...
switch(txn->code) {
....
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
......
}
bio_put_uint32(reply, 0);
return 0;
}
此时调用了do_add_service函数
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
struct svcinfo *si;
....
si = find_svc(s, len);//根据服务端名称查找是否存在
if (si) {//最开始是不存在的,不走这里
if (si->handle) {//如果存在
ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
str8(s, len), handle, uid);
svcinfo_death(bs, si);//清除前面的这个服务
}
si->handle = handle;
} else {
//分配内存,重新初始化一个svcinfo,保存服务端的handle和名称,并加入到链表中
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
si->handle = handle;
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
si->next = svclist;
svclist = si;
}
binder_acquire(bs, handle);
binder_link_to_death(bs, handle, &si->death);
return 0;
}
以上是AMS向SM注册的过程