ServiceManager添加service组件过程

本文详细介绍了在Android系统中ServiceManager如何添加服务组件的过程,包括defaultServiceManager、BpServiceManager的创建,以及一系列的Binder通信步骤,如Parcel操作、binder_transaction等,深入解析了服务注册的内核交互细节。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

FregService

int main(int argc, char **argv){
	FregService::instantiate();//创建FregService组件
	ProcessState::self()->startThreadPool();//启动一个binder线程池
	IPCThreadState::self()->joinThreadPool();//将主线程也加入到binder线程池
	return 0;
}

static void instantiate(){
	defaultServiceManager()->addService(String16(FREG_SERVICE), new FregService)
}
1、defaultServiceManager

获取一个ServiceManager代理对象BpServiceManager

IServiceManager

sp<IServiceManager> defaultServiceManager()
{
    if (gDefaultServiceManager != NULL) return gDefaultServiceManager;//是一个类型为IServiceManager的强指针,指向以BpServiceManager对象
        if (gDefaultServiceManager == NULL) {
            gDefaultServiceManager = interface_cast<IServiceManager>(//将sp<BpBinder>binder代理对象封装成sp<BpServiceManager>ServiceManager代理对象
                ProcessState::self()->getContextObject(NULL));//ProcessState.self()得到ProcessState对象 getContextObject得到BpBinder对象
        }
    return gDefaultServiceManager;
}
2、ProcessState::self

得到一个ProcessState对象

3、getContextObject

得到一个BpBinder对象

4、interface_cast

将得到的BpBinder对象转换为BpServiceManager对象

ProcessState

//获取一个ProcessState对象
sp<ProcessState> ProcessState::self()
{
    if (gProcess == NULL) gProcess = new ProcessState;
    return gProcess;
}

//创建ServiceManager代理对象,BpBinder对象
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& caller)
{
        return getStrongProxyForHandle(0);//创建一个binder代理对象
}

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{//handle==0,表示要创建ServiceManager代理对象
    sp<IBinder> result;
    handle_entry* e = lookupHandleLocked(handle);//binder库为每一个进程维护了一个handle_entry类型的binder代理对象列表。
    if (e != NULL) {
        IBinder* b = e->binder;
		//b为NULL表示尚未为句柄值handle创建代理对象,如果不为NULL调用attemptIncWeak()函数增加弱引用计数,
		//如果失败说明binder代理对象已被销毁,需要重新创建
        if (b == NULL || !e->refs->attemptIncWeak(this)) {
            b = new BpBinder(handle); //创建binder代理对象
            e->binder = b;
            if (b) e->refs = b->getWeakRefs();
            result = b;
        } else {
            result.force_set(b);
            e->refs->decWeak(this);
        }
    }
    return result;
}

IInterface

template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
    return INTERFACE::asInterface(obj);
}
5、IServiceManager::asInterface

可以推断出调用的是IServiceManager::asInterface(obj)

IServiceManager

根据宏定义 IMPLEMENT_META_INTERFACE可以推断出下面代码

    const android::String16 IServiceManager::descriptor("android.os.IServiceManager");            
    const android::String16&                                           
            IServiceManager::getInterfaceDescriptor() const {              
        return IServiceManager::descriptor;                                
    }                                                                   
    android::sp<IServiceManager> IServiceManager::asInterface(                
            const android::sp<android::IBinder>& obj)                   
    {                                                                   
        android::sp<IServiceManager> intr;                                 
        if (obj != NULL) {                                              
            intr = static_cast<IServiceManager>(                          
                obj->queryLocalInterface(                               
                        IServiceManager::descriptor).get());               
            if (intr == NULL) {                                         
                intr = new BpServiceManager(obj);                          
            }                                                           
        }                                                               
        return intr;                                                    
    }                                                                   
    IServiceManager::IServiceManager() { }                                    
    IServiceManager::~IServiceManager() { }  

BpServiceManager

    BpServiceManager(const sp<IBinder>& impl)
        : BpInterface<IServiceManager>(impl)
    {
    }

BpInterface 使用装饰者模式将BpBinder包装成BpServiceManager

template<typename INTERFACE>
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
    : BpRefBase(remote)
{
}

BpRefBase

class BpRefBase : public virtual RefBase{
	IBinder* const   mRemote;//指向BpBinder对象
}
6、new BpServiceManager

IServiceManager::asInterface,将BpBinder转换成BpServiceManager。由于obj是一个BpBinder对象,BpBinder没有实现queryLocalInterface方法,BpBinder.queryLocalInterface返回为NULL,所以执行的是intr = new BpServiceManager(obj)将BpBinder包装成BpServiceManager对象,同时将成员变量mRemote设置为BpBinder对象。

BpServiceManager

client进程和server进程的一次进程间通信过程可划分为5步
	1.client进程将数据封装成一个Parcel对象;
	2.client进程向binder驱动程序发送一个BC_TRANSACTION命令协议。
	3.binder驱动程序向client进程发送BR_TREANSACTION_COMPLETE返回协议,表示进程间通信请求已收到。
	同时,也会向server进程发送一个BR_TRANSACTION,请求server进程处理该进程间通信请求。
	4.server进程接收到binder驱动程序发来的BR_TRANSACTION返回协议,并且处理之后,向binder驱动程序返回一个BC_REPLY命令协议
	5.binder驱动程序向server进程发送BR_TRANSACTION_COMPLETE返回协议,
	同时,也向目标进程发送一个BR_REPLY返回协议,表示server进程已处理完它的进程间通信请求。*/
    virtual status_t addService(const String16& name, const sp<IBinder>& service)
    {
        Parcel data, reply;
        data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());//写入进程间通信请求头,"android.os.IServiceManager"
        data.writeString16(name);//添加service组件名,"shy.luo.FregService"
        data.writeStrongBinder(service);//将要注册的service组件封装成一个flat_binder_object结构体
        status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
        return err == NO_ERROR ? reply.readExceptionCode() : err;
    }
7、addService

添加FregService 服务组件

status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
    return flatten_binder(ProcessState::self(), val, this);
}

status_t flatten_binder(const sp<ProcessState>& proc,
    const sp<IBinder>& binder, Parcel* out)
{
    flat_binder_object obj;
    //0x7f表示将要注册的service组件在处理一个进程间通信请求,它所使用的线程优先级不能低于0x7f,
    //FLAT_BINDER_FLAG_ACCEPTS_FDS表示可以将包含文件描述符的进程间通信请求传递给service组件处理。
    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    if (binder != NULL) {
        IBinder *local = binder->localBinder();//返回一个binder本地对象接口,也就是当前要添加的service组件,如果binder是代理对象返回NULL,本地对象返回自身
        if (!local) {
            BpBinder *proxy = binder->remoteBinder();
            if (proxy == NULL) {
                LOGE("null proxy");
            }
            const int32_t handle = proxy ? proxy->handle() : 0;
            obj.type = BINDER_TYPE_HANDLE;
            obj.handle = handle;
            obj.cookie = NULL;
        } else {
            obj.type = BINDER_TYPE_BINDER;
            obj.binder = local->getWeakRefs();//弱引用计数地址
            obj.cookie = local;//本地binder对象地址
        }
    } else {
        obj.type = BINDER_TYPE_BINDER;//将binder类型设置为BINDER_TYPE_BINDER,表示传输的数据是一个binder对象
        obj.binder = NULL;
        obj.cookie = NULL;
    }
    return finish_flatten_binder(binder, obj, out);
}
8、Parcel::writeStrongBinder

将本地对象FregService封装成flat_binder_object对象,写入Parcel中。

9、BpBinder::transact

调用BpBinder::transact,传入协议名、进程间通信数据和返回数据地址

//transact函数将mHandler,以及进程间通信数据发送给Binder驱动程序,这样Binder驱动程序就能通过这个句柄值找到对应的Binder引用对象,
//进而找到Binder实体对象,最后就可以将进程间通信数据发送给service组件
status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)//data进程间通信数据,falg默认为0,表示这是一个同步请求
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;
}

IPCThreadState

status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    status_t err = data.errorCheck();//进程间通信数据data是否有问题
    flags |= TF_ACCEPT_FDS;//表示server进程在返回结果携带文件描述符
    if (err == NO_ERROR) {//没有问题
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);//将data写入到binder_transaction_data结构体中,还没发送到binder驱动程序
    }
    if (err != NO_ERROR) {
        if (reply) reply->setError(err);
        return (mLastError = err);
    }
    if ((flags & TF_ONE_WAY) == 0) {//判断是不是同步请求
        if (reply) {//是否有数据返回
            err = waitForResponse(reply);//向驱动程序发送一个BC_TRANSACTION命令协议
        } else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }
    } else {
        err = waitForResponse(NULL, NULL);
    }
    return err;
}

status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
    binder_transaction_data tr;

    tr.target.handle = handle;
    tr.code = code;
    tr.flags = binderFlags;
    const status_t err = data.errorCheck();//再次确认Parcel对象data中进程间通信数据的正确性
    if (err == NO_ERROR) {
        tr.data_size = data.ipcDataSize();
        tr.data.ptr.buffer = data.ipcData();
        tr.offsets_size = data.ipcObjectsCount()*sizeof(size_t);
        tr.data.ptr.offsets = data.ipcObjects();
    } else if (statusBuffer) {
        tr.flags |= TF_STATUS_CODE;
        *statusBuffer = err;
        tr.data_size = sizeof(status_t);
        tr.data.ptr.buffer = statusBuffer;
        tr.offsets_size = 0;
        tr.data.ptr.offsets = NULL;
    } else {
        return (mLastError = err);
    }
    mOut.writeInt32(cmd);
    mOut.write(&tr, sizeof(tr));
    return NO_ERROR;
}
10、IPCThreadState::writeTransactionData

将进程间通信数据和BC_TRANSACTION命令协议,写入到binder_transaction_data结构体中

mOut中的布局如下:

在这里插入图片描述

IPCThreadState

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    int32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;
        
        cmd = mIn.readInt32();//读出返回协议代码

        switch (cmd) {
        case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));
                LOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {//当前线程所发出的进程间通信请求被成功处理了
                        reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(size_t),
                            freeBuffer, this);
                    } else {
                        err = *static_cast<const status_t*>(tr.data.ptr.buffer);
                        freeBuffer(NULL,
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(size_t), this);
                    }
                } else {
                    freeBuffer(NULL,
                        reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                        tr.data_size,
                        reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                        tr.offsets_size/sizeof(size_t), this);
                    continue;
                }
            }
            goto finish;

        default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;
        }
    }
11、IPCThreadState::waitForResponse

通过while循环不断的调用成员函数talkWithDriver与binder程序进行交互。

IPCThreadState

status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    binder_write_read bwr;//使用BINDER_WRITE_READ IO控制命令,定义binder_write_read结构体来指定读端和写端
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();//返回协议缓冲区mIn的返回协议已经处理完成
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;//doReceive表示是否想要接收binder驱动程序发送给进程的返回协议,needRead如果为false表示mIn中的返回协议还没处理完,这是再往写端写数据,也没用,读端mIn处理不了
    
    bwr.write_size = outAvail;//写端缓冲区和读端缓冲区的大小分别为0和大于0,binder驱动程序不会处理进程发送给他的命令协议,只会向该进程发送返回协议,这样进程就达到了只接受返回协议的结果
    bwr.write_buffer = (long unsigned int)mOut.data();

    //doReceive如果为true表示想要接收binder的返回协议,
    //当然要能处理读端数据,返回协议缓冲区mIn中的数据也要处理完,否则往读端缓冲区写数据也处理不了,
    //所以如果mIn中的返回协议处理完了needRead为true,这时就可以设置读端缓冲区大小了
    if (doReceive && needRead) {
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (long unsigned int)mIn.data();
    } else {
        bwr.read_size = 0;//
    }
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;//判断写端和读端的缓冲区大小是否都为0,如果是就不用进入binder驱动程序了,因为没有数据传入binder程序,也不需要binder程序返回结果
    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    status_t err;
    do {
#if defined(HAVE_ANDROID_OS)
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)//while循环使用IO控制命令BINDER_WRITE_READ与binder驱动程序交互
            err = NO_ERROR;
        else
            err = -errno;
#else
        err = INVALID_OPERATION;
#endif
        IF_LOG_COMMANDS() {
            alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
        }
    } while (err == -EINTR);
    if (err >= NO_ERROR) {
        if (bwr.write_consumed > 0) {//将binder驱动程序已经处理命令协议从mOut中移除
            if (bwr.write_consumed < (ssize_t)mOut.dataSize())
                mOut.remove(0, bwr.write_consumed);
            else
                mOut.setDataSize(0);
        }
        if (bwr.read_consumed > 0) {//将冲binder驱动程序中读取出来的返回协议保存在mIn中
            mIn.setDataSize(bwr.read_consumed);
            mIn.setDataPosition(0);
        }
        return NO_ERROR;
    }
    
    return err;
}
12.IPCThreadState::talkWithDriver

IPCThreadState使用mOut来保存将要发送给binder驱动程序的命令协议,mIn保存从binder驱动程序接收到的返回协议。使用IO控制命令进入binder驱动程序,传递binder_read_write结构体。

binder.c

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	int ret;
	struct binder_proc *proc = filp->private_data;//获取驱动程序创建的一个binder_proc结构体
	struct binder_thread *thread;
	unsigned int size = _IOC_SIZE(cmd);
	void __user *ubuf = (void __user *)arg;//用户空间缓冲区地址

	switch (cmd) {
	case BINDER_WRITE_READ: {
		struct binder_write_read bwr;
		if (size != sizeof(struct binder_write_read)) {
			ret = -EINVAL;
			goto err;
		}
		if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {//将用户空间传进来的一个binder_write_read结构体复制出来
			ret = -EFAULT;
			goto err;
		}
		if (bwr.write_size > 0) {//传入的bwr写端有数据,有传入到binder驱动程序的数据
			ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);//client进程的proc和thread,现在还处于client进程
			if (ret < 0) {
				bwr.read_consumed = 0;
				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
					ret = -EFAULT;
				goto err;
			}
		}
		if (bwr.read_size > 0) {//传入的bwr读端有缓冲区,需要将binder驱动程序中数据写入读端缓冲区
			ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
			if (!list_empty(&proc->todo))
				wake_up_interruptible(&proc->wait);
			if (ret < 0) {
				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
					ret = -EFAULT;
				goto err;
			}
		}
		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
			ret = -EFAULT;
			goto err;
		}
		break;
	}
    ret = 0;
err:
	if (thread)
		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
	return ret;
}
    
int
binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
		    void __user *buffer, int size, signed long *consumed)//buffer执行进程传递给binder驱动程序的一个binder_write_read结构体的写缓冲区
{
	uint32_t cmd;
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	while (ptr < end && thread->return_error == BR_OK) {
		if (get_user(cmd, (uint32_t __user *)ptr))//读出传入的协议命令
			return -EFAULT;
		ptr += sizeof(uint32_t);
		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
			binder_stats.bc[_IOC_NR(cmd)]++;
			proc->stats.bc[_IOC_NR(cmd)]++;
			thread->stats.bc[_IOC_NR(cmd)]++;
		}
		switch (cmd) {
        case BC_TRANSACTION:
		case BC_REPLY: {
			struct binder_transaction_data tr;

			if (copy_from_user(&tr, ptr, sizeof(tr)))//读出进程间通信数据
				return -EFAULT;
			ptr += sizeof(tr);
			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
			break;
		}
       *consumed = ptr - buffer;
   }
	return 0;
}
13、binder_ioctl

IO控制命令,用户空间调用ioctl系统调用调用

14、copy_from_user

struct binder_transaction_data tr;

copy_from_user(&tr, ptr, sizeof(tr)),将binder_transaction_data结构体从用户空间拷贝到内核空间。

15、binder_thread_write

由于tr.write_size > 0,binder_ioctl调用binder_thread_write函数。

16、copy_from_user

struct binder_transaction_data tr;

copy_from_user(&tr, ptr, sizeof(tr)),读出进程间通信数据binder_transaction_data 从用户空间拷贝到内核空间。

static void
binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
	struct binder_transaction_data *tr, int reply)
{
	struct binder_transaction *t;
	struct binder_work *tcomplete;
	size_t *offp, *off_end;
	struct binder_proc *target_proc;
	struct binder_thread *target_thread = NULL;
	struct binder_node *target_node = NULL;
	struct list_head *target_list;
	wait_queue_head_t *target_wait;
	struct binder_transaction *in_reply_to = NULL;
	uint32_t return_error;

	if (reply) {//要处理的是BC_REPLY命令还是BC_TRANSACTION
		...
	} else {//BC_TRANSACTION
		if (tr->target.handle) {
			struct binder_ref *ref;
			ref = binder_get_ref(proc, tr->target.handle);
			target_node = ref->node;
		} else {//句柄值为0,找到ServiceManager的实体对象
			target_node = binder_context_mgr_node;
		}
		target_proc = target_node->proc;//找到目标进程

		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {//判断是不是同步进程间通信
			struct binder_transaction *tmp;
			tmp = thread->transaction_stack;
			while (tmp) {//找到最优目标线程
				if (tmp->from && tmp->from->proc == target_proc)
					target_thread = tmp->from;
				tmp = tmp->from_parent;
			}
		}
	}
	if (target_thread) {//有目标线程,指向目标线程
		target_list = &target_thread->todo;
		target_wait = &target_thread->wait;
	} else {//指向目标进程
		target_list = &target_proc->todo;
		target_wait = &target_proc->wait;
	}
	/* TODO: reuse incoming transaction for reply */
	t = kzalloc(sizeof(*t), GFP_KERNEL);//分配一个binder_transaction结构体
	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);//binder_work
	t->debug_id = ++binder_last_id;

	if (!reply && !(tr->flags & TF_ONE_WAY))//初始化binder_transaction
		t->from = thread;//from指向源线程,client线程以便以便目标线程或进程处理完该进程间通信请求后,能够找回发出该请求的线程
	else
		t->from = NULL;
	t->sender_euid = proc->tsk->cred->euid;
	t->to_proc = target_proc;
	t->to_thread = target_thread;
	t->code = tr->code;
	t->flags = tr->flags;
	t->priority = task_nice(current);
	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));//分配内核缓冲区,以便可以将进程间通信数据复制到里面
	t->buffer->allow_user_free = 0;
	t->buffer->debug_id = t->debug_id;
	t->buffer->transaction = t;
	t->buffer->target_node = target_node;
	if (target_node)
		binder_inc_node(target_node, 1, 0, NULL);//增加目标实体对象的强引用

	offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));//偏移数组起始位置

	if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {//将client数据缓冲区的数据复制到binder_transaction的内核缓冲区中
	}
	if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {//将client偏移数组内容复制到binder_transaction内核缓冲区中
	}
	off_end = (void *)offp + tr->offsets_size;
	for (; offp < off_end; offp++) {//遍历进程间通信数据中的binder对象
		struct flat_binder_object *fp;
		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
		switch (fp->type) {
		case BINDER_TYPE_BINDER:
		case BINDER_TYPE_WEAK_BINDER: {
			struct binder_ref *ref;
			struct binder_node *node = binder_get_node(proc, fp->binder);//根据service组件的弱引用计数接口找到service实体对象
			if (node == NULL) {//找不到,给service组件创建实体对象
				node = binder_new_node(proc, fp->binder, fp->cookie);
				if (node == NULL) {
					return_error = BR_FAILED_REPLY;
					goto err_binder_new_node_failed;
				}
				node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
			}
			ref = binder_get_ref_for_node(target_proc, node);//根据node找到目标进程引用对象
			if (fp->type == BINDER_TYPE_BINDER)
				fp->type = BINDER_TYPE_HANDLE;
			else
				fp->type = BINDER_TYPE_WEAK_HANDLE;
			fp->handle = ref->desc;
			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo);
		} break;
	}
	if (reply) {
		...
	} else {
		if (target_node->has_async_transaction) {
			target_list = &target_node->async_todo;
			target_wait = NULL;
		} else
			target_node->has_async_transaction = 1;
	}
	t->work.type = BINDER_WORK_TRANSACTION;//将事务中的工作项设置为BINDER_WORK_TRANSACTION类型
	list_add_tail(&t->work.entry, target_list);//添加到目标进程或线程的todo队列
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;//将工作项tcomplete设置为BINDER_WORK_TRANSACTION_COMPLETE类型
	list_add_tail(&tcomplete->entry, &thread->todo);//添加到源线程的todo队列
	if (target_wait)
		wake_up_interruptible(target_wait);
	return;
	if (in_reply_to) {
		thread->return_error = BR_TRANSACTION_COMPLETE;
		binder_send_failed_reply(in_reply_to, return_error);
	} else
		thread->return_error = return_error;
}
17、binder_transaction

通过句柄值handler=0找到ServiceManager的实体对象,通过ServiceManager的实体对象根据它的成员变量proc就能找到所对应的目标进程。创建一个进程间通信事务binder_transaction *t,分配内核缓冲区(有用户空间对应的用户空间地址),并将缓冲区的地址赋给t->buffer ,调用copy_from_user进行一次用户空间向内核空间的拷贝,将源进程中的数据拷贝到内核中,这就是binder机制中的唯一一次拷贝。

static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
	size_t data_size, size_t offsets_size, int is_async)
{
	struct rb_node *n = proc->free_buffers.rb_node;
	struct binder_buffer *buffer;
	size_t buffer_size;
	struct rb_node *best_fit = NULL;
	void *has_page_addr;
	void *end_page_addr;
	size_t size;

	if (proc->vma == NULL) {
		printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
		       proc->pid);
		return NULL;
	}

	size = ALIGN(data_size, sizeof(void *)) +
		ALIGN(offsets_size, sizeof(void *));//对齐相加

	if (size < data_size || size < offsets_size) {//判断相加后的大小是否溢出,请求的缓冲区太大会溢出
		binder_user_error("binder: %d: got transaction with invalid "
			"size %zd-%zd\n", proc->pid, data_size, offsets_size);
		return NULL;
	}
	//判断分配的缓冲区是否用于处理异步任务,如果是就要检查分配的缓冲区大小是否大于目标进程剩余的
	//可处理异步事务的缓冲区大小。size + sizeof(struct binder_buffer),binder驱动在分配size大小的
	//内核缓冲区时,还需要额外分配一个binder_buffer用于描述内核缓冲区。
	if (is_async &&
	    proc->free_async_space < size + sizeof(struct binder_buffer)) {
		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
			printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd f"
			       "ailed, no async space left\n", proc->pid, size);
		return NULL;
	}
//检查目标进程中的空闲内核缓冲区红黑树中有没有最适合的内核缓冲区可用
	while (n) {
		buffer = rb_entry(n, struct binder_buffer, rb_node);
		BUG_ON(!buffer->free);
		buffer_size = binder_buffer_size(proc, buffer);//获取到的是有效数据的大小,也就是buffer.data的大小
		/*第一次分配,buffer_size为4M,分配完请求的size后,剩余4M-size;第二次分配4M-size*/
		if (size < buffer_size) {
			best_fit = n;
			n = n->rb_left;
		} else if (size > buffer_size)
			n = n->rb_right;
		else {
			best_fit = n;
			break;
		}
	}
	if (best_fit == NULL) {
		printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
		       "no address space\n", proc->pid, size);
		return NULL;
	}
	if (n == NULL) {
		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
		buffer_size = binder_buffer_size(proc, buffer);
	}
	has_page_addr =
		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
	if (n == NULL) {//找到的空闲缓冲区较大,需要进行裁剪,第一块用来分配
		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)//如果裁剪后的第二块缓冲区小于4k就不需要裁剪
			buffer_size = size; /* no room for other buffers *///全部分给第一块,原来的空闲缓冲区(buffer)已经含有binder_buffer,所以buffer_size = size,使用的还是原来的空闲buffer
		else
			buffer_size = size + sizeof(struct binder_buffer);//第一块所需大小,申请的缓冲区大小加上用于描述缓冲区的binder_buffer的大小,需要重新分配缓冲区,所以要加上binder_buffer的大小
	}
	end_page_addr =
		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);//缓冲区的结束地址,对齐到页面边界
	if (end_page_addr > has_page_addr)
		end_page_addr = has_page_addr;
	if (binder_update_page_range(proc, 1,
	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))//分配物理地址,缓冲区为buffer->data到buffer->data+buffer_size之间
		return NULL;

	rb_erase(best_fit, &proc->free_buffers);//将空闲缓冲区从空闲红黑树中删除
	buffer->free = 0;
	binder_insert_allocated_buffer(proc, buffer);//将分配好的内核缓冲区加入到目标进程已分配物理页面的内核缓冲区红黑树中
	if (buffer_size != size) {//原本的空闲内核缓冲区分配后是否还有剩余,剩余部分封装成新的空闲缓冲区加入内核缓冲区列表和空闲红黑树
		struct binder_buffer *new_buffer = (void *)buffer->data + size;
		list_add(&new_buffer->entry, &buffer->entry);//加入内核缓冲区列表,缓冲区列表中缓冲区的顺序和缓冲区在分配的4M内核空间中的先后顺序对应
		new_buffer->free = 1;
		binder_insert_free_buffer(proc, new_buffer);//加入空闲红黑树
	}
	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
		printk(KERN_INFO "binder: %d: binder_alloc_buf size %zd got "
		       "%p\n", proc->pid, size, buffer);
	buffer->data_size = data_size;//将缓冲区返回给调用者前进行一些初始化操作
	buffer->offsets_size = offsets_size;
	buffer->async_transaction = is_async;
	if (is_async) {
		proc->free_async_space -= size + sizeof(struct binder_buffer);
		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)
			printk(KERN_INFO "binder: %d: binder_alloc_buf size %zd "
			       "async free %zd\n", proc->pid, size,
			       proc->free_async_space);
	}

	return buffer;
}
18、binder_alloc_buf

为进程间通信数据分配内核缓冲区,将物理页面分别映射到内核缓冲区和用户缓冲区

static struct binder_node *
binder_get_node(struct binder_proc *proc, void __user *ptr)
{
	struct rb_node *n = proc->nodes.rb_node;
	struct binder_node *node;

	while (n) {
		node = rb_entry(n, struct binder_node, rb_node);

		if (ptr < node->ptr)
			n = n->rb_left;
		else if (ptr > node->ptr)
			n = n->rb_right;
		else
			return node;
	}
	return NULL;
}
19、binder_get_node

根据service组件的弱引用计数接口,查找当前service组件所在的进程中是否有service组件对应的实体对象。

static struct binder_node * //proc 代表当前进程,ptr本地对象内的一个弱引用计数地址,cookie 本地对象
binder_new_node(struct binder_proc *proc, void __user *ptr, void __user *cookie)
{
	struct rb_node **p = &proc->nodes.rb_node;
	struct rb_node *parent = NULL;
	struct binder_node *node;

	while (*p) {//proc->nodes红黑树存储binder实体对象以本地对象内部的弱引用计数地址作为关键字
		parent = *p;
		node = rb_entry(parent, struct binder_node, rb_node);

		if (ptr < node->ptr)
			p = &(*p)->rb_left;
		else if (ptr > node->ptr)
			p = &(*p)->rb_right;
		else
			return NULL;
	}

	node = kzalloc(sizeof(*node), GFP_KERNEL);//创建实体对象
	if (node == NULL)
		return NULL;
	binder_stats.obj_created[BINDER_STAT_NODE]++;
	rb_link_node(&node->rb_node, parent, p);
	rb_insert_color(&node->rb_node, &proc->nodes);//加入proc->nodes中
	node->debug_id = ++binder_last_id;//初始化
	node->proc = proc;
	node->ptr = ptr;
	node->cookie = cookie;
	node->work.type = BINDER_WORK_NODE;
	INIT_LIST_HEAD(&node->work.entry);
	INIT_LIST_HEAD(&node->async_todo);
	if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
		printk(KERN_INFO "binder: %d:%d node %d u%p c%p created\n",
		       proc->pid, current->pid, node->debug_id,
		       node->ptr, node->cookie);
	return node;
}
20、binder_new_node

创建一个service组件对应的实体对象,并加入到service组件所在进程的实体对象列表中。

//一个service组件,在自己的server进程中只有一个实体对象,在一个client进程中只有一个代理对象,
//但可能会有多个client进程来引用同一个service实体对象,所以一个实体对象可能会有多个代理对象。
//一个client进程可能需要不同的service组件服务,会引用多个不同的service实体对象,所以一个client进程中可能会有多个不同的代理对象
static struct binder_ref *
binder_get_ref_for_node(struct binder_proc *proc, struct binder_node *node)
{
	struct rb_node *n;
	struct rb_node **p = &proc->refs_by_node.rb_node;
	struct rb_node *parent = NULL;
	struct binder_ref *ref, *new_ref;

	while (*p) {//进程是否给实体对象node创建过引用对象
		parent = *p;
		ref = rb_entry(parent, struct binder_ref, rb_node_node);

		if (node < ref->node)
			p = &(*p)->rb_left;
		else if (node > ref->node)
			p = &(*p)->rb_right;
		else
			return ref;//创建过,找到返回
	}
	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);//没有创建过,新建一个
	if (new_ref == NULL)
		return NULL;
	binder_stats.obj_created[BINDER_STAT_REF]++;
	new_ref->debug_id = ++binder_last_id;
	new_ref->proc = proc;
	new_ref->node = node;
	rb_link_node(&new_ref->rb_node_node, parent, p);
	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);

	new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;//实体对象node是否为ServiceManager的实体对象,如果是引用对象描述符为0,否则先设为1
	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {//找到未使用的最小句柄值作为新建引用对象的句柄值
		ref = rb_entry(n, struct binder_ref, rb_node_desc);
		if (ref->desc > new_ref->desc)
			break;
		new_ref->desc = ref->desc + 1;
	}

	p = &proc->refs_by_desc.rb_node;
	while (*p) {//判断分配的句柄值是否有效
		parent = *p;
		ref = rb_entry(parent, struct binder_ref, rb_node_desc);

		if (new_ref->desc < ref->desc)
			p = &(*p)->rb_left;
		else if (new_ref->desc > ref->desc)
			p = &(*p)->rb_right;
		else
			BUG();
	}
	rb_link_node(&new_ref->rb_node_desc, parent, p);
	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);//将新创建的引用对象插入到进程中的refs_by_desc中
	if (node) {
		hlist_add_head(&new_ref->node_entry, &node->refs);//新创建的引用对象插入到相应的实体对象的引用对象列表中
	}
	return new_ref;
}
21、binder_get_ref_for_node

根据目标进程,也就是ServiceManager对应的binder_proc对象和当前service组件的实体对象,获取service组件的实体对象在ServiceManager进程中所对应的引用对象。

static int
binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
	void  __user *buffer, int size, signed long *consumed, int non_block)
{
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	int ret = 0;
	int wait_for_proc_work;
...
	while (1) {
		uint32_t cmd;
		struct binder_transaction_data tr;
		struct binder_work *w;
		struct binder_transaction *t = NULL;

		if (!list_empty(&thread->todo))//当前线程检查自己的todo队列有没有新的工作项
			w = list_first_entry(&thread->todo, struct binder_work, entry);
		else if (!list_empty(&proc->todo) && wait_for_proc_work)//线程所在的宿主进程有没有新的工作项
			w = list_first_entry(&proc->todo, struct binder_work, entry);
		else {
			if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
				goto retry;
			break;
		}

		if (end - ptr < sizeof(tr) + 4)
			break;

		switch (w->type) {
		case BINDER_WORK_TRANSACTION: {
			t = container_of(w, struct binder_transaction, work);
		} break;
		case BINDER_WORK_TRANSACTION_COMPLETE: {
			cmd = BR_TRANSACTION_COMPLETE;
			if (put_user(cmd, (uint32_t __user *)ptr))//将BR_TRANSACTION_COMPLETE写入用户空间提供的缓冲区中
				return -EFAULT;
			ptr += sizeof(uint32_t);

			binder_stat_br(proc, thread, cmd);
			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION_COMPLETE)
				printk(KERN_INFO "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
				       proc->pid, thread->pid);

			list_del(&w->entry);//移除当前工作项
			kfree(w);
			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
		} break;
		}

		if (!t)
			continue;

done:

	*consumed = ptr - buffer;
	}
	return 0;
}
22、binder_thread_read

将一个BR_TRANSACTION_COMPLETE返回协议写入到用户空间提供的缓冲区中。执行完binder_thread_read返回binder_ioctl,然后回到用户空间IPCThreadState::talkWithDriver函数中,最后回到IPCThreadState::waitForResponse函数中。

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    int32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;
        
        cmd = mIn.readInt32();//读出返回协议代码
        
        switch (cmd) {
        case BR_TRANSACTION_COMPLETE:
            if (!reply && !acquireResult) goto finish;//跳出switch语句,再次进入外层循环执行talkWithDriver()来与binder驱动程序交互
            break;
             }
    }
    return err;
}
23、IPCThreadState::waitForResponse

从返回协议缓冲区mIn中读取到BR_TRANSACTION_COMPLETE返回协议。跳出switch继续执行while循环。再次调用talkWithDriver与binder驱动程序进行交互。

static int
binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
	void  __user *buffer, int size, signed long *consumed, int non_block)
{
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	int ret = 0;
	int wait_for_proc_work;

	if (*consumed == 0) {
		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
	}
	while (1) {
		uint32_t cmd;
		struct binder_transaction_data tr;
		struct binder_work *w;
		struct binder_transaction *t = NULL;
        
		if (!list_empty(&thread->todo))//当前线程检查自己的todo队列有没有新的工作项
			w = list_first_entry(&thread->todo, struct binder_work, entry);
		else if (!list_empty(&proc->todo) && wait_for_proc_work)//线程所在的宿主进程有没有新的工作项
			w = list_first_entry(&proc->todo, struct binder_work, entry);
		else {
			if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
				goto retry;
			break;
		}
        
		switch (w->type) {
		case BINDER_WORK_TRANSACTION: {
			t = container_of(w, struct binder_transaction, work);
		} break;
		
		if (!t)
			continue;
		BUG_ON(t->buffer == NULL);
		if (t->buffer->target_node) {//设置binder_transaction_data 进程间通信数据
			struct binder_node *target_node = t->buffer->target_node;
			tr.target.ptr = target_node->ptr;
			tr.cookie =  target_node->cookie;
			t->saved_priority = task_nice(current);
			if (t->priority < target_node->min_priority &&
			    !(t->flags & TF_ONE_WAY))
				binder_set_nice(t->priority);
			else if (!(t->flags & TF_ONE_WAY) ||
				 t->saved_priority > target_node->min_priority)
				binder_set_nice(target_node->min_priority);
			cmd = BR_TRANSACTION;
		} else {//当一个server线程将一个进程间通信数据返回给client线程时,是不需要在进程间通信数据中指定一个目标binder实体对象的
			tr.target.ptr = NULL;
			tr.cookie = NULL;
			cmd = BR_REPLY;
		}
		tr.code = t->code;
		tr.flags = t->flags;
		tr.sender_euid = t->sender_euid;

		if (t->from) {
			struct task_struct *sender = t->from->proc->tsk;
			tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);
		} else {
			tr.sender_pid = 0;
		}

		tr.data_size = t->buffer->data_size;
		tr.offsets_size = t->buffer->offsets_size;
		tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;//用户空间地址
		tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));

		if (put_user(cmd, (uint32_t __user *)ptr))//将binder_transaction_data所对应的返回协议BR_TRANSACTION及它复制到目标线程提供的一个用户缓冲区中
			return -EFAULT;
		ptr += sizeof(uint32_t);
		if (copy_to_user(ptr, &tr, sizeof(tr)))
			return -EFAULT;
		ptr += sizeof(tr);

		binder_stat_br(proc, thread, cmd);

		list_del(&t->work.entry);//将binder_work结构体w,从目标线程的todo队列中删除,因为他描述的工作项已经完成
		t->buffer->allow_user_free = 1;//表示binder驱动程序分配的内核缓冲区允许目标线程在用户空间发出BC_FREE_BUFFER命令协议来释放
		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {//binder向目标线程发送的是一个BR_TRANSACTION返回协议binder_transaction.falg的TF_ONE_WAY位为0
			t->to_parent = thread->transaction_stack;//说明binder正在执行一个进程间通信请求,将binder_transaction结构体t压入目标线程thread的事务堆栈
			t->to_thread = thread;
			thread->transaction_stack = t;
		} else {//处理的不是进程间同步请求,释放binder_transaction结构体t的内核空间
			t->buffer->transaction = NULL;
			kfree(t);
			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
		}
		break;
	}

done:

	*consumed = ptr - buffer;
	//检查当线程所属的进程是否需要请求增加一个新的binder线程来处理进程间通信请求
	if (proc->requested_threads + proc->ready_threads == 0 &&//空闲线程数(ready_threads)为0,binder驱动程序中正在处理请求的binder进程数(proc->requested_threads)之和为0,其实他们两个都为0,和才为0,空闲线程数为0
	    proc->requested_threads_started < proc->max_threads &&//binder驱动程序请求增加的binder线程数requested_threads_started 小于 预设的最大线程数
	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |//当前线程已经注册成了binder线程
	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
	     /*spawn a new thread if we leave this out */) {
		proc->requested_threads++;
		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))//将一个返回协议代码BR_SPAWN_LOOPER,写入到用户空间缓冲区buffer中,以便进程可以创建一个新的线程加入到binder线程池中。
			return -EFAULT;
	}
	return 0;
}
24、binder_thread_read

ServiceManager的binder线程的todo队列中加入binder_transaction中的BINDER_WORK_TRANSACTION类型的工作项,ServiceManager的binder线程就被唤醒,从binder线程的todo队列中取出工作项,通过该工作项得到包含它的binder_transaction结构体t。binder驱动程序处理BINDER_WORK_TRANSACTION类型的工作项是向目标线程发送一个BR_TRANSACTION返回协议。在向目标线程发送BR_TRANSACTION返回协议前,需要定义一个binder_transaction_data结构体tr,并且将binder_transaction结构体t中的进程间通信数据复制到它里面。

servicemanager/binder.c

//等待和处理service组件的注册请求,以及其代理对象的获取请求
void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    unsigned readbuf[32];//32*4=128字节,ServiceManager一次最多能接收128字节大小的请求

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;
    //一个线程需要通过协议BC_ENTER_LOOPER或者BC_REGISTER_LOOPER将自己注册成binder线程
    //以便binder驱动程序可以将进程间通信请求发给他处理。
    readbuf[0] = BC_ENTER_LOOPER;//ServiceManager主线程是主动成为一个binder线程的,因此他使用BC_ENTER_LOOPER协议将自己注册到binder驱动程序中
    binder_write(bs, readbuf, sizeof(unsigned));

    for (;;) {//循环不断使用IO控制命令BINDER_WRITE_READ来检查binder驱动程序是否有新的进程间通信请求需要它去处理
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (unsigned) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

        if (res < 0) {
            LOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }

        res = binder_parse(bs, 0, readbuf, bwr.read_consumed, func);//如果有就交给binder_parse函数处理,否则当前线程就会在binder驱动程序中睡眠直到有新的请求到达
        if (res == 0) {
            LOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            LOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}
25、binder_loop

ServiceManager的线程从binder驱动程序返回会用户空间,表示ioctrl函数执行完,返回到binder_loop函数,接着执行binder_loop函数。

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uint32_t *ptr, uint32_t size, binder_handler func)
{
    int r = 1;
    uint32_t *end = ptr + (size / 4);

    while (ptr < end) {
        uint32_t cmd = *ptr++;
        switch(cmd) {
        case BR_TRANSACTION: {
            struct binder_txn *txn = (void *) ptr;
            if ((end - ptr) * sizeof(uint32_t) < sizeof(struct binder_txn)) {
                LOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];//4*256/4 = 256 byt
                struct binder_io msg;//读取从binder驱动程序传递过来的数据
                struct binder_io reply;//将将进程间通信结果保存到缓冲区rdata中,以便后面可以将它返回给binder驱动程序
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);//初始化
                bio_init_from_txn(&msg, txn);
                res = func(bs, txn, &msg, &reply);//处理保存在msg中的BR_TRANSACTION返回协议
                binder_send_reply(bs, &reply, txn->data, res);//将注册结果返回给binder驱动程序
            }
            ptr += sizeof(*txn) / sizeof(uint32_t);
            break;
        }
        }
    }

    return r;
}
struct binder_txn//描述进程间通信数据,与binder_transaction_data对应
{
    void *target;
    void *cookie;
    uint32_t code;
    uint32_t flags;

    uint32_t sender_pid;
    uint32_t sender_euid;

    uint32_t data_size;
    uint32_t offs_size;
    void *data;
    void *offs;
};
struct binder_transaction_data {
	/* The first two are only used for bcTRANSACTION and brTRANSACTION,
	 * identifying the target and contents of the transaction.
	 */
	union {
		size_t	handle;	/* target descriptor of command transaction */
		void	*ptr;	/* target descriptor of return transaction */
	} target;
	void		*cookie;	/* target object cookie */
	unsigned int	code;		/* transaction command */

	/* General information about the transaction. */
	unsigned int	flags;
	pid_t		sender_pid;
	uid_t		sender_euid;
	size_t		data_size;	/* number of bytes of data */
	size_t		offsets_size;	/* number of bytes of offsets */

	/* If this transaction is inline, the data immediately
	 * follows here; otherwise, it ends with a pointer to
	 * the data buffer.
	 */
	union {
		struct {
			/* transaction data */
			const void	*buffer;
			/* offsets from buffer to flat_binder_object structs */
			const void	*offsets;
		} ptr;
		uint8_t	buf[8];
	} data;
};

struct binder_io//用来解析进程间通信数据,类似于binder库中的Parcel类
{
    char *data;            /* pointer to read/write from  正在解析的进程间通信数据缓冲区地址*/
    uint32_t *offs;        /* array of offsets 正在解析的偏移数组地址*/
    uint32_t data_avail;   /* bytes available in data buffer 缓冲区有多少内容未被解析*/
    uint32_t offs_avail;   /* entries available in offsets array 偏移数组有多少内容未被解析*/

    char *data0;           /* start of data buffer 进程间通信数据缓冲区起始地址*/
    uint32_t *offs0;       /* start of offsets buffer 偏移数组起始地址*/
    uint32_t flags;//数据缓冲区的属性
    uint32_t unused;
};
class Parcel
{
    status_t            mError;
    uint8_t*            mData;//数据缓冲区,可以保护整数,字符串或者binder对象,即flat_binder_object结构体
    size_t              mDataSize;
    size_t              mDataCapacity;//数据缓冲区mData的容量
    mutable size_t      mDataPos;//记录数据缓冲区mData中下一个用来写的位置
    size_t*             mObjects;//是一个偏移数组,保存了数据缓冲区mData中的所有binder对象的位置,binder驱动程序就是通过这个偏移数组找到进程间通信数据的binder对象的,以便对他们进程特殊处理。
    size_t              mObjectsSize;//记录偏移数组mObjects下一个用来写入数据的位置
    size_t              mObjectsCapacity;//偏移数组mObjects的容量
    mutable size_t      mNextObjectHint;

    mutable bool        mFdsKnown;
    mutable bool        mHasFds;
    
    release_func        mOwner;
    void*               mOwnerCookie;
};
struct binder_object//描述进程间通信数据中的一个binder对象,等同于flat_binder_object
{
    uint32_t type;
    uint32_t flags;
    void *pointer;
    void *cookie;
};
struct flat_binder_object {
	/* 8 bytes for large_flat_header. */
	unsigned long		type;
	unsigned long		flags;

	/* 8 bytes of data. */
	union {
		void		*binder;	/* local object */
		signed long	handle;		/* remote object */
	};

	/* extra data associated with local object */
	void			*cookie;
};
26、binder_parse

初始化binder_io结构体msg和reply调用函数func来处理保存在msg中的BR_TRACSACTION返回协议,并将处理结果reply中,最后调用函数binder_send_reply将进程间通信结果即reply返回给binder驱动程序。处理函数func就是svcmgr_handler。

service_manager.c

int svcmgr_handler(struct binder_state *bs,
                   struct binder_txn *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    unsigned len;
    void *ptr;
    uint32_t strict_policy;

    if (txn->target != svcmgr_handle)//检查binder驱动程序传进来的目标binder本地对象是否等于ServiceManager中定义的虚拟binder本地对象scvmgr_handle
        return -1;

    strict_policy = bio_get_uint32(msg);//检查进程间通信请求头是否合法
    s = bio_get_string16(msg, &len);
    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s));
        return -1;
    }

    switch(txn->code) {
    case SVC_MGR_ADD_SERVICE:
        s = bio_get_string16(msg, &len);//取出要注册的service组件名称
        ptr = bio_get_ref(msg);//获得一个binder引用对象的句柄值
        if (do_add_service(bs, s, len, ptr, txn->sender_euid))
            return -1;
        break;
    }

    bio_put_uint32(reply, 0);//成功,将代码0写入到binder_io结构体reply中
    return 0;
}
27、svcmgr_handler

处理操作码为SVC_MGR_ADD_SERVICE的情况,从binder_io结构体msg中取出要注册的service组件名称,调用bio_get_ref函数,从msg中获得一个binder引用对象的句柄值,这个binder引用对象是在binder驱动程序中创建的,它引用了即将要注册的service组件。

servicemanager/binder.c

void *bio_get_ref(struct binder_io *bio)
{
    struct binder_object *obj;

    obj = _bio_get_obj(bio);
    if (!obj)
        return 0;

    if (obj->type == BINDER_TYPE_HANDLE)
        return obj->pointer;

    return 0;
}
28、bio_get_ref

得到service组件的句柄值(描述符)。

//s 要注册的service组件的名称,uid 请求ServiceManager注册service组件的用户id
int do_add_service(struct binder_state *bs,
                   uint16_t *s, unsigned len,
                   void *ptr, unsigned uid)
{
    struct svcinfo *si;
//    LOGI("add_service('%s',%p) uid=%d\n", str8(s), ptr, uid);

    if (!ptr || (len == 0) || (len > 127))
        return -1;
    
    if (!svc_can_register(uid, s)) {//检查用户id为uid的进程是否有权限注册service组件
        LOGE("add_service('%s',%p) uid=%d - PERMISSION DENIED\n",
             str8(s), ptr, uid);
        return -1;
    }
    
    si = find_svc(s, len);//查找service是否已注册
    if (si) {//已注册
        if (si->ptr) {//service的句柄值是否为NULL
            LOGE("add_service('%s',%p) uid=%d - ALREADY REGISTERED\n",
                 str8(s), ptr, uid);
            return -1;
        }
        si->ptr = ptr;
    } else {
        si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
        if (!si) {
            LOGE("add_service('%s',%p) uid=%d - OUT OF MEMORY\n",
                 str8(s), ptr, uid);
            return -1;
        }
        si->ptr = ptr;
        si->len = len;
        memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
        si->name[len] = '\0';
        si->death.func = svcinfo_death;
        si->death.ptr = si;
        si->next = svclist;
        svclist = si;//前插法
    }

    binder_acquire(bs, ptr);//引用了新注册的service组件,增加相应binder引用对象的计数值
    binder_link_to_death(bs, ptr, &si->death);//service组件可能会以外死亡,向binder驱动程序注册一个binder本地对象死亡接收通知
    return 0;
}

struct svcinfo 
{
    struct svcinfo *next;
    void *ptr;//句柄值
    struct binder_death death;//描述一个死亡接收通知
    unsigned len;//长度
    uint16_t name[0];//service组件名称
};
29、do_add_service

可以看到要注册的组件名称长度不能大于127,每一个注册了的service组件都是用一个svcinfo结构体来描述,并保存在一个svclist全局队列中。

//reply包含了进程间通信结果数据 buffer_to_free用户地址空间,指向一块用于进程间通信数据的内核缓冲区
//status 是否成功处理了进程间通信请求
void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       void *buffer_to_free,
                       int status)
{
    struct {//用来描述一个BC_FREE_BUFFER和BC_REPLY命令协议
        uint32_t cmd_free;
        void *buffer;//指向内核缓冲区的用户空间地址
        uint32_t cmd_reply;
        struct binder_txn txn;//对应于一个binder_transaction_data结构体地址
    } __attribute__((packed)) data;

    data.cmd_free = BC_FREE_BUFFER;//设置BC_FREE_BUFFER协议的内容
    data.buffer = buffer_to_free;
    data.cmd_reply = BC_REPLY;//设置BC_REPLY协议的内容
    data.txn.target = 0;
    data.txn.cookie = 0;
    data.txn.code = 0;
    if (status) {//处理进程间通信请求发生错误
        data.txn.flags = TF_STATUS_CODE;
        data.txn.data_size = sizeof(int);
        data.txn.offs_size = 0;
        data.txn.data = &status;//错误代码写入内核缓冲区
        data.txn.offs = 0;
    } else {//成功
        data.txn.flags = 0;
        data.txn.data_size = reply->data - reply->data0;
        data.txn.offs_size = ((char*) reply->offs) - ((char*) reply->offs0);
        data.txn.data = reply->data0;//数据缓冲区
        data.txn.offs = reply->offs0;//偏移数组
    }
    binder_write(bs, &data, sizeof(data));//发送给binder驱动程序
}
30、binder_send_reply

ServiceManager添加service组件后,完成了进程间通信,需要将进程间通信数据所用到的内核缓冲区释放。将要释放的内核缓冲区对应的用户空间地址,和需要返回的处理结果用户空间地址传递给binder驱动程序。

servicemanager/binder.c

int binder_write(struct binder_state *bs, void *data, unsigned len)
{
    struct binder_write_read bwr;
    int res;
    bwr.write_size = len;
    bwr.write_consumed = 0;
    bwr.write_buffer = (unsigned) data;//将data作为bwr的一块写缓冲区
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;//读缓冲区设置为NULL,这样当前线程将自己注册到binder驱动程序后,就会马上返回用户空间,而不会在binder驱动程序中等待client进程的通信请求。
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//IO控制命令后面跟着struct binder_write_read结构体
    if (res < 0) {
        fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                strerror(errno));
    }
    return res;
}
31、binder_write

定义一个binder_write_read结构体bwr,将用户空间的写端数据,通过ioctl函数传递给binder驱动程序。

binder.c

int
binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
		    void __user *buffer, int size, signed long *consumed)//buffer执行进程传递给binder驱动程序的一个binder_write_read结构体的写缓冲区
{
	uint32_t cmd;
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	while (ptr < end && thread->return_error == BR_OK) {
		if (get_user(cmd, (uint32_t __user *)ptr))//读出传入的协议命令
			return -EFAULT;
		switch (cmd) {
		case BC_FREE_BUFFER: {
			void __user *data_ptr;
			struct binder_buffer *buffer;

			if (get_user(data_ptr, (void * __user *)ptr))//得到要释放的内核缓冲区用户空间地址
				return -EFAULT;
			ptr += sizeof(void *);

			buffer = binder_buffer_lookup(proc, data_ptr);//根据内核缓冲区用户空间地址得到对应的内核空间地址
			if (buffer == NULL) {
				binder_user_error("binder: %d:%d "
					"BC_FREE_BUFFER u%p no match\n",
					proc->pid, thread->pid, data_ptr);
				break;
			}
			if (!buffer->allow_user_free) {
				binder_user_error("binder: %d:%d "
					"BC_FREE_BUFFER u%p matched "
					"unreturned buffer\n",
					proc->pid, thread->pid, data_ptr);
				break;
			}
			if (binder_debug_mask & BINDER_DEBUG_FREE_BUFFER)
				printk(KERN_INFO "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
				       proc->pid, thread->pid, data_ptr, buffer->debug_id,
				       buffer->transaction ? "active" : "finished");

			if (buffer->transaction) {
				buffer->transaction->buffer = NULL;
				buffer->transaction = NULL;
			}
			if (buffer->async_transaction && buffer->target_node) {
				BUG_ON(!buffer->target_node->has_async_transaction);
				if (list_empty(&buffer->target_node->async_todo))
					buffer->target_node->has_async_transaction = 0;
				else
					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
			}
			binder_transaction_buffer_release(proc, buffer, NULL);//减少binder实体对象或引用对象的引用计数
			binder_free_buf(proc, buffer);//释放内核缓冲区
			break;
		}
                
        case BC_REPLY: {
			struct binder_transaction_data tr;

			if (copy_from_user(&tr, ptr, sizeof(tr)))//读出进程间通信数据,如果数据中有指针,只拷贝指针指向的地址,还没拷贝指针指向的内容
				return -EFAULT;
			ptr += sizeof(tr);
			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
			break;
		}
		}
		*consumed = ptr - buffer;
	}
	return 0;
}
32、binder_thread_write

将要释放的内核缓冲区对应的事务transaction置为NULL,释放用于进程间通信的内核缓冲区。

static void
binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer *buffer, size_t *failed_at)
{
	size_t *offp, *off_end;

	if (buffer->target_node)//内核缓冲区是不是分配给一个binder实体对象使用的
		binder_dec_node(buffer->target_node, 1, 0);//减少引用计数

	offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
	if (failed_at)
		off_end = failed_at;
	else
		off_end = (void *)offp + buffer->offsets_size;
	for (; offp < off_end; offp++) {//循环遍历将要释放的内核缓冲区中的binder对象,并减少他们的引用计数
		struct flat_binder_object *fp;
		if (*offp > buffer->data_size - sizeof(*fp) ||
		    buffer->data_size < sizeof(*fp) ||
		    !IS_ALIGNED(*offp, sizeof(void *))) {
			printk(KERN_ERR "binder: transaction release %d bad"
					"offset %zd, size %zd\n", debug_id, *offp, buffer->data_size);
			continue;
		}
		fp = (struct flat_binder_object *)(buffer->data + *offp);
		switch (fp->type) {
		
		case BINDER_TYPE_HANDLE:
		case BINDER_TYPE_WEAK_HANDLE: {
			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
			if (ref == NULL) {
				printk(KERN_ERR "binder: transaction release %d bad handle %ld\n", debug_id, fp->handle);
				break;
			}
			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
				printk(KERN_INFO "        ref %d desc %d (node %d)\n",
				       ref->debug_id, ref->desc, ref->node->debug_id);
			binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
		} break;
		}
	}
}
33、binder_transaction_buffer_release

循环遍历将要释放的内核缓冲区中的flat_binder_object对象,并且减少它们所对应的实体或引用对象。

static void binder_free_buf(
	struct binder_proc *proc, struct binder_buffer *buffer)
{
	size_t size, buffer_size;

	buffer_size = binder_buffer_size(proc, buffer);//计算要释放的缓冲区大小

	size = ALIGN(buffer->data_size, sizeof(void *)) +
		ALIGN(buffer->offsets_size, sizeof(void *));

	if (buffer->async_transaction) {
		proc->free_async_space += size + sizeof(struct binder_buffer);
		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)
			printk(KERN_INFO "binder: %d: binder_free_buf size %zd "
			       "async free %zd\n", proc->pid, size,
			       proc->free_async_space);
	}

	binder_update_page_range(proc, 0,
		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
		NULL);//释放内存
	rb_erase(&buffer->rb_node, &proc->allocated_buffers);//将要释放的缓冲区从已分配的缓冲区红黑树中移除
	buffer->free = 1;
	if (!list_is_last(&buffer->entry, &proc->buffers)) {//如果buffer不是空闲红黑树的最后一个元素
		struct binder_buffer *next = list_entry(buffer->entry.next,
						struct binder_buffer, entry);//获取buffer的下一个元素
		if (next->free) {//下一个元素也是空闲的
			rb_erase(&next->rb_node, &proc->free_buffers);//将next元素从空闲红黑树中移除
			binder_delete_free_buffer(proc, next);//将next元素从内核缓冲区队列中移除
		}
	}
	//proc->buffers中buffer的顺序和分配的内核缓冲区在4M内核空间的位置对应,所以不需要在buffer中保存
	//分配的内核缓冲区的大小,只需要将buffer.next-buffer.data,buffer.next-buffer就能带到有效数据的大小和buffer的大小
	if (proc->buffers.next != &buffer->entry) {//不是第一个元素
		struct binder_buffer *prev = list_entry(buffer->entry.prev,
						struct binder_buffer, entry);
		if (prev->free) {
			binder_delete_free_buffer(proc, buffer);//只有空闲缓冲区才被回收
			rb_erase(&prev->rb_node, &proc->free_buffers);
			buffer = prev;
		}
	}
	binder_insert_free_buffer(proc, buffer);//将buffer插入空闲红黑树
}
34、binder_free_buf

释放内核缓冲区的物理内存,并将内核缓冲区设置为空闲状态,插入空闲红黑树。

static void
binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
	struct binder_transaction_data *tr, int reply)
{
	struct binder_transaction *t;
	struct binder_work *tcomplete;
	size_t *offp, *off_end;
	struct binder_proc *target_proc;
	struct binder_thread *target_thread = NULL;
	struct binder_node *target_node = NULL;
	struct list_head *target_list;
	wait_queue_head_t *target_wait;
	struct binder_transaction *in_reply_to = NULL;
	struct binder_transaction_log_entry *e;
	uint32_t return_error;

	if (reply) {//要处理的是BC_REPLY命令还是BC_TRANSACTION
		in_reply_to = thread->transaction_stack;//binder驱动程序分发一个进程间通信请求给一个线程处理时,会将一个binder_transaction结构体压入它的事务栈
		binder_set_nice(in_reply_to->saved_priority);//恢复原来线程的优先级
		thread->transaction_stack = in_reply_to->to_parent;//下一个需要处理的事务
		target_thread = in_reply_to->from;//获得目标线程
		target_proc = target_thread->proc;
	}
	if (target_thread) {//有目标线程,指向目标线程
		e->to_thread = target_thread->pid;
		target_list = &target_thread->todo;
		target_wait = &target_thread->wait;
	} else {//指向目标进程
		target_list = &target_proc->todo;
		target_wait = &target_proc->wait;
	}
	e->to_proc = target_proc->pid;

	/* TODO: reuse incoming transaction for reply */
	t = kzalloc(sizeof(*t), GFP_KERNEL);//分配一个binder_transaction结构体
	if (t == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_t_failed;
	}
	binder_stats.obj_created[BINDER_STAT_TRANSACTION]++;

	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);//binder_work
	if (tcomplete == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_tcomplete_failed;
	}
	binder_stats.obj_created[BINDER_STAT_TRANSACTION_COMPLETE]++;

	t->debug_id = ++binder_last_id;
	e->debug_id = t->debug_id;

	if (!reply && !(tr->flags & TF_ONE_WAY))//初始化binder_transaction
		t->from = thread;//from指向源线程,client线程以便目标线程或进程处理完该进程间通信请求后,能够找回发出该请求的线程
	else
		t->from = NULL;
	t->sender_euid = proc->tsk->cred->euid;
	t->to_proc = target_proc;
	t->to_thread = target_thread;
	t->code = tr->code;
	t->flags = tr->flags;
	t->priority = task_nice(current);
	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));//分配内核缓冲区,以便可以将进程间通信数据复制到里面
	if (t->buffer == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_binder_alloc_buf_failed;
	}
	t->buffer->allow_user_free = 0;
	t->buffer->debug_id = t->debug_id;
	t->buffer->transaction = t;
	t->buffer->target_node = target_node;
	if (target_node)
		binder_inc_node(target_node, 1, 0, NULL);//增加目标实体对象的强引用

	offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));//偏移数组起始位置

	if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {//将client数据缓冲区的数据复制到binder_transaction的内核缓冲区中
		binder_user_error("binder: %d:%d got transaction with invalid "
			"data ptr\n", proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {//将client偏移数组内容复制到binder_transaction内核缓冲区中
		binder_user_error("binder: %d:%d got transaction with invalid "
			"offsets ptr\n", proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}

	off_end = (void *)offp + tr->offsets_size;
	for (; offp < off_end; offp++) {//遍历进程间通信数据中的binder对象
		struct flat_binder_object *fp;
		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
		switch (fp->type) {
		//源进程调用时,已经将原来的type BINDER_TYPE_BINDER设置为了BINDER_TYPE_HANDLE
		case BINDER_TYPE_HANDLE:
		case BINDER_TYPE_WEAK_HANDLE: {
			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
			if (ref->node->proc == target_proc) {
				if (fp->type == BINDER_TYPE_HANDLE)
					fp->type = BINDER_TYPE_BINDER;
				else
					fp->type = BINDER_TYPE_WEAK_BINDER;
				fp->binder = ref->node->ptr;
				fp->cookie = ref->node->cookie;
				binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
			} else {
				struct binder_ref *new_ref;
				new_ref = binder_get_ref_for_node(target_proc, ref->node);
				fp->handle = new_ref->desc;
				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
			}
		} break;
	}
	if (reply) {
		BUG_ON(t->buffer->async_transaction != 0);
		binder_pop_transaction(target_thread, in_reply_to);//从目标线程的事务堆栈中删除binder_transaction,因为in_reply_to描述的事务已经处理完
	} 
	t->work.type = BINDER_WORK_TRANSACTION;//将事务中的工作项设置为BINDER_WORK_TRANSACTION类型
	list_add_tail(&t->work.entry, target_list);//添加到目标进程或线程的todo队列
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;//将工作项tcomplete设置为BINDER_WORK_TRANSACTION_COMPLETE类型
	list_add_tail(&tcomplete->entry, &thread->todo);//添加到源线程的todo队列
	if (target_wait)
		wake_up_interruptible(target_wait);
	return;
}
35、binder_transaction

处理BC_REPLY命令协议。获取前面请求添加service组件的线程,将其作为目标线程,创建一个binder_transaction *t结构体,作为返回给目标进程进程间通信数据的事务,将这个事务添加到目标线程的的todo队列中;创建一个binder_work *tcomplete结构体,作为告知源进程这次进程间通信已完成的工作项,将它添加到源进程的todo队列中。

static void
binder_pop_transaction(
	struct binder_thread *target_thread, struct binder_transaction *t)
{
	if (target_thread) {
		BUG_ON(target_thread->transaction_stack != t);//要删除的事务是否位于目标线程的顶端
		BUG_ON(target_thread->transaction_stack->from != target_thread);//要删除的事务是否是目标线程创建,即要删除的事务是否是目标线程发起的
		target_thread->transaction_stack =
			target_thread->transaction_stack->from_parent;//将下一个事务移到移到事务栈的顶端
		t->from = NULL;
	}
	t->need_reply = 0;
	if (t->buffer)
		t->buffer->transaction = NULL;
	kfree(t);//释放事务栈所占的内存
	binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
}
36、binder_pop_transaction

判断正在处理的事务是否处于线程的事务栈栈顶,如果是,将下一个事务移至栈顶,删除当前事务,并释放其内存。

static int
binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
	void  __user *buffer, int size, signed long *consumed, int non_block)
{
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	int ret = 0;
	int wait_for_proc_work;

	while (1) {
		uint32_t cmd;
		struct binder_transaction_data tr;
		struct binder_work *w;
		struct binder_transaction *t = NULL;

		if (!list_empty(&thread->todo))//当前线程检查自己的todo队列有没有新的工作项
			w = list_first_entry(&thread->todo, struct binder_work, entry);
		else if (!list_empty(&proc->todo) && wait_for_proc_work)//线程所在的宿主进程有没有新的工作项
			w = list_first_entry(&proc->todo, struct binder_work, entry);
		else {
			break;
		}

		if (end - ptr < sizeof(tr) + 4)
			break;

		switch (w->type) {
		case BINDER_WORK_TRANSACTION: {
			t = container_of(w, struct binder_transaction, work);
		} break;
		case BINDER_WORK_TRANSACTION_COMPLETE: {
			cmd = BR_TRANSACTION_COMPLETE;
			if (put_user(cmd, (uint32_t __user *)ptr))//将BR_TRANSACTION_COMPLETE写入用户空间提供的缓冲区中
				return -EFAULT;
			ptr += sizeof(uint32_t);

			binder_stat_br(proc, thread, cmd);
			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION_COMPLETE)
				printk(KERN_INFO "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
				       proc->pid, thread->pid);

			list_del(&w->entry);//移除当前工作项
			kfree(w);
			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
		} break;
		}

		if (!t)
			continue;

		BUG_ON(t->buffer == NULL);
		if (t->buffer->target_node) {//设置binder_transaction_data 进程间通信数据
			struct binder_node *target_node = t->buffer->target_node;
			tr.target.ptr = target_node->ptr;
			tr.cookie =  target_node->cookie;
			t->saved_priority = task_nice(current);
			if (t->priority < target_node->min_priority &&
			    !(t->flags & TF_ONE_WAY))
				binder_set_nice(t->priority);
			else if (!(t->flags & TF_ONE_WAY) ||
				 t->saved_priority > target_node->min_priority)
				binder_set_nice(target_node->min_priority);
			cmd = BR_TRANSACTION;
		} else {//当一个server线程将一个进程间通信数据返回给client线程时,是不需要在进程间通信数据中指定一个目标binder实体对象的
			tr.target.ptr = NULL;
			tr.cookie = NULL;
			cmd = BR_REPLY;
		}
		tr.code = t->code;
		tr.flags = t->flags;
		tr.sender_euid = t->sender_euid;

		if (t->from) {
			struct task_struct *sender = t->from->proc->tsk;
			tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);
		} else {
			tr.sender_pid = 0;
		}

		tr.data_size = t->buffer->data_size;
		tr.offsets_size = t->buffer->offsets_size;
		tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;//用户空间地址,没复制,只是将物理地址对应的内核地址修改为对应的用户空间地址
		tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));

		if (put_user(cmd, (uint32_t __user *)ptr))//将binder_transaction_data所对应的返回协议BR_TRANSACTION及它复制到目标线程提供的一个用户缓冲区中
			return -EFAULT;
		ptr += sizeof(uint32_t);
		if (copy_to_user(ptr, &tr, sizeof(tr)))//复制的只是地址值
			return -EFAULT;
		ptr += sizeof(tr);

		binder_stat_br(proc, thread, cmd);

		list_del(&t->work.entry);//将binder_work结构体w,从目标线程的todo队列中删除,因为他描述的工作项已经完成
		t->buffer->allow_user_free = 1;//表示binder驱动程序分配的内核缓冲区允许目标线程在用户空间发出BC_FREE_BUFFER命令协议来释放
		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {//binder向目标线程发送的是一个BR_TRANSACTION返回协议binder_transaction.falg的TF_ONE_WAY位为0
			t->to_parent = thread->transaction_stack;//说明binder正在执行一个进程间通信请求,将binder_transaction结构体t压入目标线程thread的事务堆栈
			t->to_thread = thread;
			thread->transaction_stack = t;
		} else {//处理的不是进程间同步请求,释放binder_transaction结构体t的内核空间
			t->buffer->transaction = NULL;
			kfree(t);
			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
		}
		break;
	}

done:

	*consumed = ptr - buffer;
	//检查当线程所属的进程是否需要请求增加一个新的binder线程来处理进程间通信请求
	if (proc->requested_threads + proc->ready_threads == 0 &&//空闲线程数(ready_threads)为0,binder驱动程序中正在处理请求的binder进程数(proc->requested_threads)之和为0,其实他们两个都为0,和才为0,空闲线程数为0
	    proc->requested_threads_started < proc->max_threads &&//binder驱动程序请求增加的binder线程数requested_threads_started 小于 预设的最大线程数
	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |//当前线程已经注册成了binder线程
	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
	     /*spawn a new thread if we leave this out */) {
		proc->requested_threads++;
		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))//将一个返回协议代码BR_SPAWN_LOOPER,写入到用户空间缓冲区buffer中,以便进程可以创建一个新的线程加入到binder线程池中。
			return -EFAULT;
	}
	return 0;
}
37、binder_thread_read

取回工作项的宿主binder_transaction结构体t,用于设置binder_transaction_data结构体t,将BR_REPLY返回协议和binder_transaction_data结构体tr复制到由client线程提供的一个用户缓冲区中。与BR_TRANSACTION返回协议的处理方式不同,binder驱动程序向目标线程发送一个Br_REPLY返回协议后,与其关联 的binder_transaction结构体t就可以释放了,因为binder驱动程序不用等待BR_REPLY返回协议的处理结果。

IPCThreadState

38、talkWithDriver

将binder驱动程序返回回来的内容保存到IPCThreadState的返回协议缓冲区mIn中。

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    int32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;
        
        cmd = mIn.readInt32();//读出返回协议代码
        
        switch (cmd) {
...
        case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));
                LOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {//当前线程所发出的进程间通信请求被成功处理了
                        reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(size_t),
                            freeBuffer, this);
                    } else {
                        err = *static_cast<const status_t*>(tr.data.ptr.buffer);
                        freeBuffer(NULL,
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(size_t), this);
                    }
                } else {
                    freeBuffer(NULL,
                        reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                        tr.data_size,
                        reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                        tr.offsets_size/sizeof(size_t), this);
                    continue;
                }
            }
            goto finish;

   ...
        }
    }

finish:
 ...
    
    return err;
}
39、waitForResponse

从返回协议缓冲区mIn中读出BR_REPLY返回协议代码,并将返回协议的内容使用一个binder_transaction_data结构体tr来描述。调用Parcel.ipcSetDataReference将tr中的进程间通信结果保存到Parcel对象reply中。

Parcel

void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
    const size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
{
    freeDataNoInit();
    mError = NO_ERROR;
    mData = const_cast<uint8_t*>(data);
    mDataSize = mDataCapacity = dataSize;
    //LOGI("setDataReference Setting data size of %p to %lu (pid=%d)\n", this, mDataSize, getpid());
    mDataPos = 0;
    LOGV("setDataReference Setting data pos of %p to %d\n", this, mDataPos);
    mObjects = const_cast<size_t*>(objects);
    mObjectsSize = mObjectsCapacity = objectsCount;
    mNextObjectHint = 0;
    mOwner = relFunc;//IPCThreadState.freeBuffer函数
    mOwnerCookie = relCookie;//IPCThreadState对象
    scanForFds();
}
40、ipcSetDataReference

保存进程间通信结果。

ProcessState

void ProcessState::startThreadPool()
{
    AutoMutex _l(mLock);
    if (!mThreadPoolStarted) {
        mThreadPoolStarted = true;
        spawnPooledThread(true);
    }
}
41、startThreadPool

成员变量mThreadPoolStarted初始化为false,启动binder线程池时将其设置为true,防止成员函数spawnPooledThread被重复调用来启动binder线程池。

void ProcessState::spawnPooledThread(bool isMain)
{
    if (mThreadPoolStarted) {
        int32_t s = android_atomic_add(1, &mThreadPoolSeq);
        char buf[32];
        sprintf(buf, "Binder Thread #%d", s);
        LOGV("Spawning new pooled thread, name=%s\n", buf);
        sp<Thread> t = new PoolThread(isMain);//isMain为true,表示线程t是进程主动创建来加入到它的binder线程池的,区别于binder驱动程序请求进程创建新线程加入到binder线程池。
        t->run(buf);//启动一个新线程
    }
}
42、spawnPooledThread

PoolThread

class PoolThread : public Thread
{
public:
    PoolThread(bool isMain)
        : mIsMain(isMain)
    {
    }
    
protected:
    virtual bool threadLoop()//线程入口函数,当PoolThread对象t所对应的线程启动起来后threadLoop就会被调用
    {
        IPCThreadState::self()->joinThreadPool(mIsMain);//将当前线程注册到binder线程池中,成为binder线程,以便binder驱动程序可以分发进程间通信请求给它处理
        return false;
    }
    
    const bool mIsMain;
};
43、threadLoop

线程入口函数,调用joinThreadPool函数将当前线程注册到binder线程池中。

/*binder线程的生命周期可以划分为三个阶段,1、将自己注册到binder线程池中;2、在一个无限循环中不断等待和处理binder
进程间通信请求;3、退出binder线程池*/
void IPCThreadState::joinThreadPool(bool isMain)
{
    mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);//根据isMian的不同,将BC_ENTER_LOOPER、BC_REGISTER_LOOPER写入命令协议缓冲区mOut中
    
    // This thread may have been spawned by a thread that was in the background
    // scheduling group, so first we will make sure it is in the default/foreground
    // one to avoid performing an initial transaction in the background.
    androidSetThreadSchedulingGroup(mMyThreadId, ANDROID_TGROUP_DEFAULT);
        
    status_t result;
    do {
        int32_t cmd;
        
        // When we've cleared the incoming command queue, process any pending derefs
        if (mIn.dataPosition() >= mIn.dataSize()) {
            size_t numPending = mPendingWeakDerefs.size();
            if (numPending > 0) {
                for (size_t i = 0; i < numPending; i++) {
                    RefBase::weakref_type* refs = mPendingWeakDerefs[i];
                    refs->decWeak(mProcess.get());
                }
                mPendingWeakDerefs.clear();
            }

            numPending = mPendingStrongDerefs.size();
            if (numPending > 0) {
                for (size_t i = 0; i < numPending; i++) {
                    BBinder* obj = mPendingStrongDerefs[i];
                    obj->decStrong(mProcess.get());
                }
                mPendingStrongDerefs.clear();
            }
        }

        // now get the next command to be processed, waiting if necessary
        result = talkWithDriver();//与binder驱动程序进行交互,将命令协议发送给binder驱动程序
        if (result >= NO_ERROR) {
            size_t IN = mIn.dataAvail();
            if (IN < sizeof(int32_t)) continue;
            cmd = mIn.readInt32();
            IF_LOG_COMMANDS() {
                alog << "Processing top-level Command: "
                    << getReturnString(cmd) << endl;
            }
            result = executeCommand(cmd);//处理binder驱动程序的返回协议BR_
        }
        
        // After executing the command, ensure that the thread is returned to the
        // default cgroup before rejoining the pool.  The driver takes care of
        // restoring the priority, but doesn't do anything with cgroups so we
        // need to take care of that here in userspace.  Note that we do make
        // sure to go in the foreground after executing a transaction, but
        // there are other callbacks into user code that could have changed
        // our group so we want to make absolutely sure it is put back.
        androidSetThreadSchedulingGroup(mMyThreadId, ANDROID_TGROUP_DEFAULT);

        // Let this thread exit the thread pool if it is no longer
        // needed and it is not the main process thread.
        if(result == TIMED_OUT && !isMain) {
            break;
        }
    } while (result != -ECONNREFUSED && result != -EBADF);
    mOut.writeInt32(BC_EXIT_LOOPER);
    talkWithDriver(false);
}
44、joinThreadPool

与调用talkWithDriver与binder驱动程序进行交互,将线程注册到binder线程池中。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值