Service代理对象的获取过程

本文详细阐述了在Android中,client进程如何通过ServiceManager获取Service组件的代理对象的过程,包括从getService开始,经过binder驱动交互,再到服务端的处理和回复,最后再次通过binder通信返回代理对象的完整步骤。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Service代理对象的获取过程

Service组件将自己注册到ServiceManager中后,它就在server进程中等待client进程将进程间通信请求发送过来。client进程为了和service组件通信,首先需要通过ServiceManager的service组件查询服务,得到service组件的一个代理对象。

为了创建一个FregService代理对象,即BpFregService对象,首先要通过binder驱动程序来获得一个引用了运行在FregServer进程中的FregService组件的binder引用对象的句柄值,然后在通过这个句柄值创建一个binder代理对象,即一个BpBinder对象,最后将这个binder代理对象封装成一个FregService(BpFregService)代理对象。

	int main(){
		sp<IBinder> binder = defaultServiceManager()->getService(String16(FREG_SERVICE));
		if(binder == NULL){
			LOGE("Failed to get freg service %s.\n", FREG_SERVICE);
			return -1;
		}
		sp<IFregService> service = IFregService::asInterface(binder);
		if(service == NULL){
			LOGE("Failed to get freg service interface\n");
			return -2;
		}
		int32_t val = service->getVal();
		printf("val = %d\n", val);
		val+=1;
		service->setVal(val);
		val = service->getVal();
		printf("val = %d\n", val);
	}

IServiceManager.cpp

    virtual sp<IBinder> getService(const String16& name) const
    {
        unsigned n;
        for (n = 0; n < 5; n++){//最多尝试5次获取名称为name的Service组件代理对象
            sp<IBinder> svc = checkService(name);
            if (svc != NULL) return svc;
            LOGI("Waiting for service %s...\n", String8(name).string());
            sleep(1);//获取失败,睡1毫秒,重试
        }
        return NULL;
    }
1、getService

尝试获取Service组件代理对象。

/*ServiceManager代理对象的成员函数checkService实现的是一个标准的binder进程间通信过程,它可以划分为五步
	1.FregClient进程将进程间通信数据,即Service组件FregService的代理对象的名称,封装到Parcel对象中;
	2.FregClient进程向binder驱动程序发送BC_TRANSACTION命令,binder驱动程序根据协议内容找到ServiceManager进程后,
	就会向FregClient进程发送一个BR_TRANSACTION_COMPLETE返回协议,表示它的进程间通信请求已被binder驱动程序接受。
	FregClient进程在接受到BR_TRANSACTION_COMPLETE返回协议,并且对它进行处理后,就会再次进入binder驱动程序中去等待
	ServiceManager进程将它要获取的binder代理对象的句柄值返回回来。
	3.binder驱动程序在向FregClient进程发送BR_TRANSACTION_COMPLETE返回协议的同时,也会向ServiceManager进程发送BR_TRANSACTION
	返回协议,请求ServiceManager进程执行一个CHECK_SERVICE_TRANSACTION操作。
	4.ServiceManager进程在执行完FregClient进程请求的CHECK_SERVICE_TRANSACTION操作后,就会向binder驱动程序发送一个BC_REPLY命令协议,
	协议包含Service组件FregService的信息。binder驱动程序就会根据FregService的信息为FregClient进程创建一个binder引用对象,
	接着就会向ServiceManager进程发送一个BR_TRANSACTION_COMPLETE返回协议,表示它返回的FregService信息已经收到。ServiceManager进程
	收到BR_TRANSACTION_COMPLETE返回协议,并对它处理后,一次进程间通信过程就结束了,接着它再次进入binder驱动程序,等待下一次进程间通信请求。
	5.binder驱动程序在向ServiceManager进程发送BR_TRANSACTION_COMPLETE返回协议的同时,也向FregClient进程发送一个BR_REPLY返回协议,
	协议中包含了前面创建的一个binder引用对象的句柄值,这时候FregClient进程就可以通过这个句柄值来创建一个binder代理对象。*/
    virtual sp<IBinder> checkService( const String16& name) const
    {
        Parcel data, reply;
        data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
        data.writeString16(name);
        remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);//通过句柄值为0的binder代理对象与ServiceManager进行通信
        return reply.readStrongBinder();
    }
2、checkService

BpBinder

//transact函数将mHandler,以及进程间通信数据发送给Binder驱动程序,这样Binder驱动程序就能通过这个句柄值找到对应的Binder引用对象,
//进而找到Binder实体对象,最后就可以将进程间通信数据发送给service组件
status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)//data进程间通信数据,falg默认为0,表示这是一个同步请求
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;
}
3、transact
status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    status_t err = data.errorCheck();//进程间通信数据data是否有问题

    flags |= TF_ACCEPT_FDS;//表示server进程在返回结果携带文件描述符
    if (err == NO_ERROR) {//没有问题
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);//将data写入到binder_transaction_data结构体中,还没发送到binder驱动程序
    }
    if (err != NO_ERROR) {
        if (reply) reply->setError(err);
        return (mLastError = err);
    }
    if ((flags & TF_ONE_WAY) == 0) {//判断是不是同步请求
        if (reply) {//是否有数据返回
            err = waitForResponse(reply);//向驱动程序发送一个BC_TRANSACTION命令协议
        } else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }
    } else {
        err = waitForResponse(NULL, NULL);
    }
    
    return err;
}
4、transact
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    int32_t cmd;
    int32_t err;

    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;
        
        cmd = mIn.readInt32();//读出返回协议代码

        switch (cmd) {
        case BR_TRANSACTION_COMPLETE:
            if (!reply && !acquireResult) goto finish;//跳出switch语句,再次进入外层循环执行talkWithDriver()来与binder驱动程序交互
            break;
        
        case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));
                LOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {//当前线程所发出的进程间通信请求被成功处理了
                        reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(size_t),
                            freeBuffer, this);
                    } else {
                        err = *static_cast<const status_t*>(tr.data.ptr.buffer);
                        freeBuffer(NULL,
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(size_t), this);
                    }
                } else {
                    freeBuffer(NULL,
                        reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                        tr.data_size,
                        reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
                        tr.offsets_size/sizeof(size_t), this);
                    continue;
                }
            }
            goto finish;
        }
    }

finish:
    if (err != NO_ERROR) {
        if (acquireResult) *acquireResult = err;
        if (reply) reply->setError(err);
        mLastError = err;
    }
    
    return err;
}
5、waitForResponse
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    LOG_ASSERT(mProcess->mDriverFD >= 0, "Binder driver is not opened");
    
    binder_write_read bwr;//使用BINDER_WRITE_READ IO控制命令,定义binder_write_read结构体来指定读端和写端
    
    // Is the read buffer empty?
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();//返回协议缓冲区mIn的返回协议已经处理完成
    
    // We don't want to write anything if we are still reading
    // from data left in the input buffer and the caller
    // has requested to read the next data.
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;//doReceive表示是否想要接收binder驱动程序发送给进程的返回协议,needRead如果为false表示mIn中的返回协议还没处理完,这是再往写端写数据,也没用,读端mIn处理不了
    
    bwr.write_size = outAvail;//写端缓冲区和读端缓冲区的大小分别为0和大于0,binder驱动程序不会处理进程发送给他的命令协议,只会向该进程发送返回协议,这样进程就达到了只接受返回协议的结果
    bwr.write_buffer = (long unsigned int)mOut.data();

    // This is what we'll read.
    //doReceive如果为true表示想要接收binder的返回协议,
    //当然要能处理读端数据,返回协议缓冲区mIn中的数据也要处理完,否则往读端缓冲区写数据也处理不了,
    //所以如果mIn中的返回协议处理完了needRead为true,这时就可以设置读端缓冲区大小了
    if (doReceive && needRead) {
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (long unsigned int)mIn.data();
    } else {
        bwr.read_size = 0;//
    }
    
    // Return immediately if there is nothing to do.
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;//判断写端和读端的缓冲区大小是否都为0,如果是就不用进入binder驱动程序了,因为没有数据传入binder程序,也不需要binder程序返回结果
    
    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    status_t err;
    do {
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)//while循环使用IO控制命令BINDER_WRITE_READ与binder驱动程序交互
            err = NO_ERROR;
        else
            err = -errno;

    } while (err == -EINTR);

    if (err >= NO_ERROR) {
        if (bwr.write_consumed > 0) {//将binder驱动程序已经处理命令协议从mOut中移除
            if (bwr.write_consumed < (ssize_t)mOut.dataSize())
                mOut.remove(0, bwr.write_consumed);
            else
                mOut.setDataSize(0);
        }
        if (bwr.read_consumed > 0) {//将冲binder驱动程序中读取出来的返回协议保存在mIn中
            mIn.setDataSize(bwr.read_consumed);
            mIn.setDataPosition(0);
        }
        return NO_ERROR;
    }
    
    return err;
}
6、talkWithDriver

binder.c

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
   
	int ret;
	struct binder_proc *proc = filp->private_data;//获取驱动程序创建的一个binder_proc结构体
	struct binder_thread *thread;
	unsigned int size = _IOC_SIZE(cmd);
	void __user *ubuf = (void __user *)arg;//用户空间缓冲区地址

	/*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/

	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
	if (ret)
		return ret;

	mutex_lock(&binder_lock);
	thread = binder_get_thread(proc);//为当前线程创建一个binder_thread结构体
	if (thread == NULL) {
   
		ret = -ENOMEM;
		goto err;
	}

	switch (cmd) {
   
	case BINDER_WRITE_READ: {
   
		struct binder_write_read bwr;
		if (size != sizeof(struct binder_write_read)) {
   
			ret = -EINVAL;
			goto err;
		}
		if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
   //将用户空间传进来的一个binder_write_read结构体复制出来
			ret = -EFAULT;
			goto err;
		}
		if (bwr.write_size > 0) {
   //传入的bwr写端有数据,有传入到binder驱动程序的数据
			ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);//client进程的proc和thread,现在还处于client进程
			if (ret < 0) {
   
				bwr.read_consumed = 0;
				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
					ret = -EFAULT;
				goto err;
			}
		}
		if (bwr.read_size > 0) {
   //传入的bwr读端有缓冲区,需要将binder驱动程序中数据写入读端缓冲区
			ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
			if (!list_empty(&proc->todo))
				wake_up_interruptible(&proc->wait);
			if (ret < 0) {
   
				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
					ret = -EFAULT;
				goto err;
			}
		}
		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
   
			ret = -EFAULT;
			goto err;
		}
		break;
	}
	}
	ret = 0;
err:
	if (thread)
		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
	mutex_unlock(&binder_lock);
	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
	if (ret && ret != -ERESTARTSYS)
		printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
	return ret;
}
7、binder_ioctl
int
binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
		    void __user *buffer, int size, signed long *consumed)//buffer执行进程传递给binder驱动程序的一个binder_write_read结构体的写缓冲区
{
   
	uint32_t cmd;
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;

	while (ptr < end && thread->return_error == BR_OK) {
   
		if (get_user(cmd, (uint32_t __user *)ptr))//读出传入的协议命令
			return -EFAULT;
		ptr += sizeof(uint32_t);

		switch (cmd) {
   
		case BC_TRANSACTION:
		case BC_REPLY: {
   
			struct binder_transaction_data tr;

			if (copy_from_user(&tr, ptr, sizeof(tr)))//读出进程间通信数据,如果数据中有指针,只拷贝指针指向的地址,还没拷贝指针指向的内容
				return -EFAULT;
			ptr += sizeof(tr);
			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
			break;
		}

		default:
			printk(KERN_ERR "binder: %d:%d unknown command %d\n", proc->pid, thread->pid, cmd);
			return -EINVAL;
		}
		*consumed = ptr - buffer;
	}
	return 0;
}
8、binder_thread_write
static void
binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
	struct binder_transaction_data *tr, int reply)
{
   
	struct binder_transaction *t;
	struct binder_work *tcomplete;
	size_t *offp, *off_end;
	struct binder_proc *target_proc;
	struct binder_thread *target_thread = NULL;
	struct binder_node *target_node = NULL;
	struct list_head *target_list;
	wait_queue_head_t *target_wait;
	struct binder_transaction *in_reply_to = NULL;
	struct binder_transaction_log_entry *e;
	uint32_t return_error;

	if (reply) {
   //要处理的是BC_REPLY命令还是BC_TRANSACTION
	} else {
   //BC_TRANSACTION
		if (tr->target.handle) {
   
		} else {
   //句柄值为0,找到ServiceManager的实体对象
			target_node = binder_context_mgr_node;
			if (target_node == NULL) {
   
				return_error = BR_DEAD_REPLY;
				goto err_no_context_mgr_node;
			}
		}
		e->to_node = target_node->debug_id;
		target_proc = target_node->proc;//找到目标进程
		if (target_proc == NULL) {
   
			return_error = BR_DEAD_REPLY;
			goto err_dead_binder;
		}
		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
   //判断是不是同步进程间通信
			struct binder_transaction *tmp;
			tmp = thread->transaction_stack;
			if (tmp->to_thread != thread) {
   
				return_error = BR_FAILED_REPLY;
				goto err_bad_call_stack;
			}
			while (tmp) {
   //找到最优目标线程
				if (tmp->from && tmp->from->proc == target_proc)
					target_thread = tmp->from;
				tmp = tmp->from_parent;
			}
		}
	}
	if (target_thread) {
   //有目标线程,指向目标线程
		e->to_thread = target_thread->pid;
		target_list = &target_thread->todo;
		target_wait = &target_thread->wait;
	} else {
   //指向目标进程
		target_list = &target_proc->todo;
		target_wait = &target_proc->wait;
	}
	e->to_proc = target_proc->pid;

	/* TODO: reuse incoming transaction for reply */
	t = kzalloc(sizeof(*t), GFP_KERNEL);//分配一个binder_transaction结构体
	if (t == NULL) {
   
		return_error = BR_FAILED_REPLY;
		goto err_alloc_t_failed;
	}
	binder_stats.obj_created[BINDER_STAT_TRANSACTION]++;

	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);//binder_work
	if (tcomplete == NULL) {
   
		return_error = BR_FAILED_REPLY;
		goto err_alloc_tcomplete_failed;
	}
	binder_stats.obj_created[BINDER_STAT_TRANSACTION_COMPLETE]++;

	t->debug_id = ++binder_last_id;
	e->debug_id = t->debug_id;


	if (!reply && !(tr->flags & TF_ONE_WAY))//初始化binder_transaction
		t->from = thread;//from指向源线程,client线程以便以便目标线程或进程处理完该进程间通信请求后,能够找回发出该请求的线程
	else
		t->from = NULL;
	t->sender_euid = proc->tsk->cred->euid;
	t->to_proc = target_proc;
	t->to_thread = target_thread;
	t->code = tr->code;
	t->flags = tr->flags;
	t->priority = task_nice(current);
	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));//分配内核缓冲区,以便可以将进程间通信数据复制到里面
	if (t->buffer == NULL) {
   
		return_error = BR_FAILED_REPLY;
		goto err_binder_alloc_buf_failed;
	}
	t->buffer->allow_user_free = 0;
	t->buffer->debug_id = t->debug_id;
	t->buffer->transaction = t;
	t->buffer->target_node = target_node;
	if (target_node)
		binder_inc_node(target_node, 1, 0, NULL);//增加目标实体对象的强引用

	offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));//偏移数组起始位置

	if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
   //将client数据缓冲区的数据复制到binder_transaction的内核缓冲区中
		binder_user_error("binder: %d:%d got transaction with invalid "
			"data ptr\n", proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	if (copy_from_user(offp
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值