先把图放上来
当客户端想要调用系统的服务时,例如AMS服务(IBinder),客户端想直接拿到这个服务是拿不到的,这种情况下,就需要使用到ServiceManager
ServiceManager是什么呢?ServiceManager也是一个服务,像AMS这种服务,在app进程启动的时候,就已经注册到了ServiceManager中;那么在客户端 调用这个服务的时候,其实是ServiceManager将这个服务返回给客户端,就像是一个大管家,管理全部的服务,然后处理客户端的请求。
ServiceManager原理剖析
1 ServiceManager的启动和注册
同样,ServiceManager也是Init进程启动的时候创建,解析init.rc文件,然后ServiceManager启动入口,在service_manager.c的main方法中
/frameworks/native/cmds/servicemanager/service_manager.c
int main(int argc, char** argv)
{
struct binder_state *bs;
union selinux_callback cb;
char *driver;
……
//step 1 allocate memory is 128k
bs = binder_open(driver, 128*1024);
……
// setp 2
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
……
//step 3
binder_loop(bs, svcmgr_handler);
return 0;
}
1.1 binder_open干了什么事?
调用了binder_open方法(注意,这可不是Binder驱动里的binder_open),在这个方法中,开启binder驱动,然后将ServiceManager的虚拟内存与内核空间的虚拟内存映射,其中为ServiceManager分配的内存为128K
# /frameworks/native/cmds/servicemanager/binder.c
struct binder_state *binder_open(const char* driver, size_t mapsize)
{
//开启binder驱动
bs->fd = open(driver, O_RDWR | O_CLOEXEC);
……
bs->mapsize = mapsize;
//mmap内存映射
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
}
1.2 binder_become_context_manager干了什么事?
然后,调用 binder_become_context_manager ,将ServiceManager设置为大管家;
# /frameworks/native/cmds/servicemanager/binder.c
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
在binder_become_context_manager中,调用了binder驱动的ioctl,cmd指令为BINDER_SET_CONTEXT_MGR
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
goto err;
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto err;
static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
kuid_t curr_euid = current_euid();
……
if (uid_valid(binder_context_mgr_uid)) {
if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
binder_context_mgr_uid));
ret = -EPERM;
goto out;
}
} else {
binder_context_mgr_uid = curr_euid;
}
binder_context_mgr_node = binder_new_node(proc, 0, 0);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto out;
}
……
}
在BINDER_SET_CONTEXT_MGR指令下,创建了一个binder_node节点,node->proc = proc;
然后将当前进程的信息赋值给node节点,创建了work 和 todo 两个队列,类似于MessageQueue
static struct binder_node *binder_new_node(struct binder_proc *proc,
binder_uintptr_t ptr,
binder_uintptr_t cookie)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
……
node = kzalloc(sizeof(*node), GFP_KERNEL);
node->debug_id = ++binder_last_id;
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
……
}
1.3 binder_loop干了什么事?
最后调用binder_loop方法,用来处理数据,可以看到,binder_loop也是一个死循环
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
……
readbuf[0] = BC_ENTER_LOOPER;
//设置状态
binder_write(bs, readbuf, sizeof(uint32_t));
//死循环
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
//再次调用了 binder_ioctl方法
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
……
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
……
}
首先执行命令 BC_ENTER_LOOPER,代表进入循环,开始写数据,执行binder_write,这个也不是binder驱动中的binder_write
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
//这里大于0,可以写
bwr