Binder驱动初探
一.Binder驱动涉及的基本数据结构
1.binder_buffer
对mmap分配的内存进行管理的基本节点
struct binder_buffer {
struct list_head entry; /* free and allocated entries by addesss */
struct rb_node rb_node; /* free entry by size or allocated entry */
/* by address */
unsigned free:1;
unsigned allow_user_free:1;
unsigned async_transaction:1;
unsigned debug_id:29;
struct binder_transaction *transaction;
struct binder_node *target_node;
size_t data_size;
size_t offsets_size;
uint8_t data[0];
};
struct binder_buffer {
struct list_head entry; /* free and allocated entries by addesss */
struct rb_node rb_node; /* free entry by size or allocated entry */
/* by address */
unsigned free:1;
unsigned allow_user_free:1;
unsigned async_transaction:1;
unsigned debug_id:29;
struct binder_transaction *transaction;
struct binder_node *target_node;
size_t data_size;
size_t offsets_size;
uint8_t data[0];
};
2.flat_binder_object
进程间传递的
binder结构
struct flat_binder_object {
/* 8 bytes for large_flat_header. */
unsigned long type;
unsigned long flags;
/* 8 bytes of data. */
union {
void *binder; /* local object */
signed long handle; /* remote object */
};
/* extra data associated with local object */
void *cookie;
};
struct flat_binder_object {
/* 8 bytes for large_flat_header. */
unsigned long type;
unsigned long flags;
/* 8 bytes of data. */
union {
void *binder; /* local object */
signed long handle; /* remote object */
};
/* extra data associated with local object */
void *cookie;
};
3.binder_ref
binder_ref代表着对一个binder_node的引用
struct binder_ref {
/* Lookups needed: */
/* node + proc => ref (transaction) */
/* desc + proc => ref (transaction, inc/dec ref) */
/* node => refs + procs (proc exit) */
int debug_id;
struct rb_node rb_node_desc;
struct rb_node rb_node_node;
struct hlist_node node_entry;
struct binder_proc *proc;
struct binder_node *node;
uint32_t desc;
int strong;
int weak;
struct binder_ref_death *death;
};
struct binder_ref {
/* Lookups needed: */
/* node + proc => ref (transaction) */
/* desc + proc => ref (transaction, inc/dec ref) */
/* node => refs + procs (proc exit) */
int debug_id;
struct rb_node rb_node_desc;
struct rb_node rb_node_node;
struct hlist_node node_entry;
struct binder_proc *proc;
struct binder_node *node;
uint32_t desc;
int strong;
int weak;
struct binder_ref_death *death;
};
4.binder_node
binder_node代表了进程上的一个提供服务的
Server端
struct binder_node {
int debug_id;
struct binder_work work;
union {
struct rb_node rb_node;
struct hlist_node dead_node;
};
struct binder_proc *proc;
struct hlist_head refs;
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
void __user *ptr;
void __user *cookie;
unsigned has_strong_ref:1;
unsigned pending_strong_ref:1;
unsigned has_weak_ref:1;
unsigned pending_weak_ref:1;
unsigned has_async_transaction:1;
unsigned accept_fds:1;
unsigned min_priority:8;
struct list_head async_todo;
#ifdef BINDER_MONITOR
char name[MAX_SERVICE_NAME_LEN];
#endif
};
struct binder_node {
int debug_id;
struct binder_work work;
union {
struct rb_node rb_node;
struct hlist_node dead_node;
};
struct binder_proc *proc;
struct hlist_head refs;
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
void __user *ptr;
void __user *cookie;
unsigned has_strong_ref:1;
unsigned pending_strong_ref:1;
unsigned has_weak_ref:1;
unsigned pending_weak_ref:1;
unsigned has_async_transaction:1;
unsigned accept_fds:1;
unsigned min_priority:8;
struct list_head async_todo;
#ifdef BINDER_MONITOR
char name[MAX_SERVICE_NAME_LEN];
#endif
};
5.binder_proc
binder_proc代表了一个进程的信息
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
struct rb_root nodes;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
int pid;
struct vm_area_struct *vma;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
void *buffer;
ptrdiff_t user_buffer_offset;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
struct page **pages;
size_t buffer_size;
uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
struct list_head delivered_death;
int max_threads;
int requested_threads;
int requested_threads_started;
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
#ifdef RT_PRIO_INHERIT
unsigned long default_rt_prio : 16;
unsigned long default_policy : 16;
#endif
};
node,ref和 proc是三个最重要的结构,分别代表 Server,引用和进程的信息。在内核中,他们之间是可以互相引用的。 node中的proc 指针,就指向了它所在进程的 proc信息。ref 则代表了对 node节点的引用,可能在不同的 proc上会有对同一个node的使用,所以就需要使用 ref来引用node 。
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
struct rb_root nodes;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
int pid;
struct vm_area_struct *vma;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
void *buffer;
ptrdiff_t user_buffer_offset;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
struct page **pages;
size_t buffer_size;
uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
struct list_head delivered_death;
int max_threads;
int requested_threads;
int requested_threads_started;
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
#ifdef RT_PRIO_INHERIT
unsigned long default_rt_prio : 16;
unsigned long default_policy : 16;
#endif
};
node,ref和 proc是三个最重要的结构,分别代表 Server,引用和进程的信息。在内核中,他们之间是可以互相引用的。 node中的proc 指针,就指向了它所在进程的 proc信息。ref 则代表了对 node节点的引用,可能在不同的 proc上会有对同一个node的使用,所以就需要使用 ref来引用node 。
二.Binder驱动重点函数分析
Binder机制使用BpBinder
作为client端的代理,使用
BBinder作为Server
端的代理,二者都通过
/dev/binder设备进行通信,下面我们将从
Binder驱动层面看看Binder机制的一些实现细节
1.binder_ioctl
从上面的分析,我们可以看到跟
Binder驱动相关的操作,都是通过
ioctl实现的,这是一个系统级的多路复用,最终会调用到
kernel中的binder.c
的binder_ioctl,在这个文件中实现了
Binder驱动的所有代码
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
//ioctl的 fd对应这里的filep,而 filep的private_data 是一个binder_proc,这里的 proc应该是process 的意思
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
。。。。
//这个 ioctl应该运行在用户进程的某个 thread上,proc 管理着当前进程中的线程。这里通过 proc找到当前的thread,所以本质上讲, binder应该是进程间的线程通信机制
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
bwr.read_size, bwr.read_buffer);
if (bwr.write_size > 0) {
//这里发送用户进程期望发送的数据
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) {
//这里读取数据到用户进程
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
bwr.read_consumed, bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
case BINDER_SET_MAX_THREADS:
//设置用户进程使用最大线程数
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
break;
case BINDER_SET_CONTEXT_MGR:
//这个 case是在servicemanager 中调用的,它创建了一个 context_mgr_node内核变量,该变量持有一个当前进程的 proc变量,因为该case是在 servicemanager进程中调用到的,所以也就与 servicemanager进程关联起来。由于所有的用户进程共享内核空间, 这意味着不同的用户进程进入内核空间后,访问的都是同一个内核变量,这也就意味着不同的用户进程都能够与 servicemanager建立联系。
if (binder_context_mgr_node != NULL) {
printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto err;
}
if (binder_context_mgr_uid != -1) {
if (binder_context_mgr_uid != current->cred->euid) {
printk(KERN_ERR "binder: BINDER_SET_"
"CONTEXT_MGR bad uid %d != %d\n",
current->cred->euid,
binder_context_mgr_uid);
ret = -EPERM;
goto err;
}:
} else
binder_context_mgr_uid = current->cred->euid;
binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
}
#ifdef BINDER_MONITOR
strcpy(binder_context_mgr_node->name, "servicemanager");
#endif
binder_context_mgr_node->local_weak_refs++;
binder_context_mgr_node->local_strong_refs++;
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
break;
。。。。
default:
ret = -EINVAL;
goto err;
}
。。。
return ret;
}
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
//ioctl的 fd对应这里的filep,而 filep的private_data 是一个binder_proc,这里的 proc应该是process 的意思
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
。。。。
//这个 ioctl应该运行在用户进程的某个 thread上,proc 管理着当前进程中的线程。这里通过 proc找到当前的thread,所以本质上讲, binder应该是进程间的线程通信机制
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
case BINDER_WRITE_READ: {
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto err;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
bwr.read_size, bwr.read_buffer);
if (bwr.write_size > 0) {
//这里发送用户进程期望发送的数据
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
if (bwr.read_size > 0) {
//这里读取数据到用户进程
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto err;
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
bwr.read_consumed, bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
}
break;
}
case BINDER_SET_MAX_THREADS:
//设置用户进程使用最大线程数
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
break;
case BINDER_SET_CONTEXT_MGR:
//这个 case是在servicemanager 中调用的,它创建了一个 context_mgr_node内核变量,该变量持有一个当前进程的 proc变量,因为该case是在 servicemanager进程中调用到的,所以也就与 servicemanager进程关联起来。由于所有的用户进程共享内核空间, 这意味着不同的用户进程进入内核空间后,访问的都是同一个内核变量,这也就意味着不同的用户进程都能够与 servicemanager建立联系。
if (binder_context_mgr_node != NULL) {
printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto err;
}
if (binder_context_mgr_uid != -1) {
if (binder_context_mgr_uid != current->cred->euid) {
printk(KERN_ERR "binder: BINDER_SET_"
"CONTEXT_MGR bad uid %d != %d\n",
current->cred->euid,
binder_context_mgr_uid);
ret = -EPERM;
goto err;
}:
} else
binder_context_mgr_uid = current->cred->euid;
binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
}
#ifdef BINDER_MONITOR
strcpy(binder_context_mgr_node->name, "servicemanager");
#endif
binder_context_mgr_node->local_weak_refs++;
binder_context_mgr_node->local_strong_refs++;
binder_context_mgr_node->has_strong_ref = 1;
binder_context_mgr_node->has_weak_ref = 1;
break;
。。。。
default:
ret = -EINVAL;
goto err;
}
。。。
return ret;
}
2.mmap
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
struct binder_buffer *buffer;
。。。。
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
goto err_get_vm_area_failed;
}
//proc->buffer是个 void*,这里应该是获得分配的内存地址,
proc->buffer = area->addr;
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
。。。
buffer = proc->buffer;
INIT_LIST_HEAD(&proc->buffers);
//buffer是一个 binder_buffer结构,这里把buffer加入到链表中
list_add(&buffer->entry, &proc->buffers);
buffer->free = 1;
binder_insert_free_buffer(proc, buffer);
proc->free_async_space = proc->buffer_size / 2;
barrier();
proc->files = get_files_struct(current);
proc->vma = vma;
。。。。
return ret;
}
从 这段代码中,我们可以看到 mmap是如何处理分配的内存块的,查看 binder_buffer的结构,该结构的data字段可看作是一个变长数组,整个内存块在这里初始化为一个 binder_buffer节点,后面分配和释放内存,肯定也是基于 binder_buffer这个结构和这块内存。而且必须说明 的是,这块内存是一块共享内存,它存在于内核空间和用户进程空间,也就是说,内核写到这块内存的数据,用户空间可以直接读取,这样就少了一个从内核空间到 用户空间的复制数据的过程。
{
int ret;
struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
struct binder_buffer *buffer;
。。。。
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
if (area == NULL) {
ret = -ENOMEM;
failure_string = "get_vm_area";
goto err_get_vm_area_failed;
}
//proc->buffer是个 void*,这里应该是获得分配的内存地址,
proc->buffer = area->addr;
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
。。。
buffer = proc->buffer;
INIT_LIST_HEAD(&proc->buffers);
//buffer是一个 binder_buffer结构,这里把buffer加入到链表中
list_add(&buffer->entry, &proc->buffers);
buffer->free = 1;
binder_insert_free_buffer(proc, buffer);
proc->free_async_space = proc->buffer_size / 2;
barrier();
proc->files = get_files_struct(current);
proc->vma = vma;
。。。。
return ret;
}
从 这段代码中,我们可以看到 mmap是如何处理分配的内存块的,查看 binder_buffer的结构,该结构的data字段可看作是一个变长数组,整个内存块在这里初始化为一个 binder_buffer节点,后面分配和释放内存,肯定也是基于 binder_buffer这个结构和这块内存。而且必须说明 的是,这块内存是一块共享内存,它存在于内核空间和用户进程空间,也就是说,内核写到这块内存的数据,用户空间可以直接读取,这样就少了一个从内核空间到 用户空间的复制数据的过程。
3.binder_transact
binder_ioctl
中的binder_thread_write函数中有如下的一段:
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
用户进程在发送 BC_TRANSACTION或者BC_REPLY 命令的时候,会调用到这里,继而调用到 binder_transaction。binder_transaction 这个函数很复杂,这里我们只分析一些片段,完整的代码请参看参考 2。在看下面的代码之前,需要对几个主要的结构进行说明,binder_node表示一个服务,它总是与一个表示进程的结构binder_proc相关联的,也就是说,这个进行提供这种服务,另外,有一个结构binder_ref,它表示对服务binder_node的引用,它也是与binder_proc相关联的,表示对这个服务的引用,同时binder_proc有为这个binder_ref分配一个desc,或者叫handle,用来提供给用户控件标示对这个服务的引用。
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
用户进程在发送 BC_TRANSACTION或者BC_REPLY 命令的时候,会调用到这里,继而调用到 binder_transaction。binder_transaction 这个函数很复杂,这里我们只分析一些片段,完整的代码请参看参考 2。在看下面的代码之前,需要对几个主要的结构进行说明,binder_node表示一个服务,它总是与一个表示进程的结构binder_proc相关联的,也就是说,这个进行提供这种服务,另外,有一个结构binder_ref,它表示对服务binder_node的引用,它也是与binder_proc相关联的,表示对这个服务的引用,同时binder_proc有为这个binder_ref分配一个desc,或者叫handle,用来提供给用户控件标示对这个服务的引用。
<代码片段0>
if (reply) {
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
binder_user_error("binder: %d:%d got reply transaction "
"with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_empty_call_stack;
}
binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
binder_user_error("binder: %d:%d got reply transaction "
"with bad transaction stack,"
" transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
target_thread = in_reply_to->from;
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
binder_user_error("binder: %d:%d got reply transaction "
"with bad target transaction stack %d, "
"expected %d\n",
proc->pid, thread->pid,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
}
这段代码显然是在BC_REPLY的时候调用的,也就是响应BC_TRANSACTION的时候,这里需要特别关注的是target_proc,它是根据上次发出请求的时候,记录的thread中找到的,不同于下面BC_TRANSACTION的时候,根据node得到的target_proc
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
binder_user_error("binder: %d:%d got reply transaction "
"with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_empty_call_stack;
}
binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
binder_user_error("binder: %d:%d got reply transaction "
"with bad transaction stack,"
" transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
target_thread = in_reply_to->from;
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
binder_user_error("binder: %d:%d got reply transaction "
"with bad target transaction stack %d, "
"expected %d\n",
proc->pid, thread->pid,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
}
这段代码显然是在BC_REPLY的时候调用的,也就是响应BC_TRANSACTION的时候,这里需要特别关注的是target_proc,它是根据上次发出请求的时候,记录的thread中找到的,不同于下面BC_TRANSACTION的时候,根据node得到的target_proc
<代码片段1>
if (tr->target.handle) {
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle);
if (ref == NULL) {
binder_user_error("binder: %d:%d got "
"transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
target_node = ref->node;
} else {
target_node = binder_context_mgr_node;
if (target_node == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d binder_context_mgr_node is NULL\n",
proc->pid, thread->pid);
#endif
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
}
}
这段代码根据传送的参数 handle,查找相应的target_node,这里我们可以清楚的看到当 handle为0 的时候, target_node取值为 binder_context_mgr_node ,即在ioctl中 new的那个binder_node ,它对应的当然是调用了 BINDER_SET_CONTEXT_MGR的进程,也就是servicemanager进程,如果 handle不为0 ,通过调用 binder_get_ref,根据handle 查找到binder_ref,继而找到 target_node
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
uint32_t desc)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (desc < ref->desc)
n = n->rb_left;
else if (desc > ref->desc)
n = n->rb_right;
else
return ref;
}
return NULL;
}
从这个函数,我们知道 handle在内核中被称为desc,它保存在 binder_ref中,请参看binder_ref结构,
ref = rb_entry(n, struct binder_ref, rb_node_desc);这个宏很有意思,它的作用是从节点 n,找到它的container,即 n是binder_ref 结构中的 rb_node_desc字段,已知该字段,找到 binder_ref的地址。
proc->refs_by_desc是一个红黑树的根节点,这样就把 binder_ref按照desc 的大小,通过红黑树组织起来。
上面的代码如下调用 binder_get_ref
ref = binder_get_ref(proc, tr->target.handle);
其中proc代表了调用 ioctl的进程,这句话的意思是说在当前进程中,查找 handle对应的ref ,如果返回 NULL,表示肯定出错了,因为这个 Handle必定是内核在当前进程生成了 ref,然后通过BpBinder把 Handle返回给应用进程的,然后应用进程通过这个 BpBinder进行通信,再调用到这里,所以这里通过 handle在当前进程的ref红黑树中一定能够找到。
上面那段代码的目的是找到一个 target_node,即目标Server
我们再来看下面的代码,其中 t为binder_transaction*
//target_proc是从 target_node中得到的,从这可以得到目标进程的信息,这里显然是从目标进程的内存块中分配一块内存,就是一个 binder_buffer结构
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
。。。
//binder_buffer结构的 data字段就是数据区,offp指向可用的一块内存区域
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
//从用户空间拷贝数据到 offp,即目标进程的共享内存区,这可是跨进程的,也就是说,源进程通过 ioctl期望发送的数据,就是在这里复制到目标进程中。从此再无拷贝,这也就是 Binder效率高的体现,只此拷贝一次
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"data ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
。。。
//off_end指向内存结尾处,从 offp到off_end 是若干个 struct flat_binder_object,其定义参考结构介绍部分
在看下一段代码之前,我们回头看 BpServiceManager::addService函数
virtual status_t addService(const String16& name, const sp<IBinder>& service)
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
LOGI("Remote_transact add service");
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
data.writeStrongBinder(service);这一句,到底 write了什么东西到Parcel中呢?
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
return flatten_binder(ProcessState::self(), val, this);
}
status_t flatten_binder(const sp<ProcessState>& proc,
const sp<IBinder>& binder, Parcel* out)
{
flat_binder_object obj;
obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
if (binder != NULL) {
IBinder *local = binder->localBinder();
if (!local) {
BpBinder *proxy = binder->remoteBinder();
if (proxy == NULL) {
LOGE("null proxy");
}
const int32_t handle = proxy ? proxy->handle() : 0;
obj.type = BINDER_TYPE_HANDLE;
obj.handle = handle;
obj.cookie = NULL;
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = local->getWeakRefs();
obj.cookie = local;
}
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = NULL;
obj.cookie = NULL;
}
return finish_flatten_binder(binder, obj, out);
}
先看flatten_binder函数, IBinder定义了两个虚拟函数localBinder和 remoteBinder,默认都返回NULL,这两个 函数的作用就是为了区分这个 IBinder是BpBinder 还是BBinder,因为 BpBinder实现了remoteBinder ,BBinder 实现了 localBinder,都是return this 。
这样,我们就知道对于 BpBinder
obj.type = BINDER_TYPE_HANDLE;
obj.handle = handle;
obj.cookie = NULL;
对于BBinder
obj.type = BINDER_TYPE_BINDER;
obj.binder = local->getWeakRefs();
obj.cookie = local;
也就是说,通过 copy_from_user函数,传递给transaction的这些参数已经作为 flat_binder_object,被复制到目标进程中。
下面来看后面的代码,它实际上是一个大的循环来遍历内存中的 flat_binder_object数组,根据object的 type分别处理。我们按照两个流程来分析下面这个循环:
1.addService
2.checkService
if (tr->target.handle) {
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle);
if (ref == NULL) {
binder_user_error("binder: %d:%d got "
"transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
target_node = ref->node;
} else {
target_node = binder_context_mgr_node;
if (target_node == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d binder_context_mgr_node is NULL\n",
proc->pid, thread->pid);
#endif
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
}
}
这段代码根据传送的参数 handle,查找相应的target_node,这里我们可以清楚的看到当 handle为0 的时候, target_node取值为 binder_context_mgr_node ,即在ioctl中 new的那个binder_node ,它对应的当然是调用了 BINDER_SET_CONTEXT_MGR的进程,也就是servicemanager进程,如果 handle不为0 ,通过调用 binder_get_ref,根据handle 查找到binder_ref,继而找到 target_node
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
uint32_t desc)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (desc < ref->desc)
n = n->rb_left;
else if (desc > ref->desc)
n = n->rb_right;
else
return ref;
}
return NULL;
}
从这个函数,我们知道 handle在内核中被称为desc,它保存在 binder_ref中,请参看binder_ref结构,
ref = rb_entry(n, struct binder_ref, rb_node_desc);这个宏很有意思,它的作用是从节点 n,找到它的container,即 n是binder_ref 结构中的 rb_node_desc字段,已知该字段,找到 binder_ref的地址。
proc->refs_by_desc是一个红黑树的根节点,这样就把 binder_ref按照desc 的大小,通过红黑树组织起来。
上面的代码如下调用 binder_get_ref
ref = binder_get_ref(proc, tr->target.handle);
其中proc代表了调用 ioctl的进程,这句话的意思是说在当前进程中,查找 handle对应的ref ,如果返回 NULL,表示肯定出错了,因为这个 Handle必定是内核在当前进程生成了 ref,然后通过BpBinder把 Handle返回给应用进程的,然后应用进程通过这个 BpBinder进行通信,再调用到这里,所以这里通过 handle在当前进程的ref红黑树中一定能够找到。
上面那段代码的目的是找到一个 target_node,即目标Server
我们再来看下面的代码,其中 t为binder_transaction*
//target_proc是从 target_node中得到的,从这可以得到目标进程的信息,这里显然是从目标进程的内存块中分配一块内存,就是一个 binder_buffer结构
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
。。。
//binder_buffer结构的 data字段就是数据区,offp指向可用的一块内存区域
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
//从用户空间拷贝数据到 offp,即目标进程的共享内存区,这可是跨进程的,也就是说,源进程通过 ioctl期望发送的数据,就是在这里复制到目标进程中。从此再无拷贝,这也就是 Binder效率高的体现,只此拷贝一次
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"data ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
。。。
//off_end指向内存结尾处,从 offp到off_end 是若干个 struct flat_binder_object,其定义参考结构介绍部分
在看下一段代码之前,我们回头看 BpServiceManager::addService函数
virtual status_t addService(const String16& name, const sp<IBinder>& service)
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
LOGI("Remote_transact add service");
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
data.writeStrongBinder(service);这一句,到底 write了什么东西到Parcel中呢?
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
return flatten_binder(ProcessState::self(), val, this);
}
status_t flatten_binder(const sp<ProcessState>& proc,
const sp<IBinder>& binder, Parcel* out)
{
flat_binder_object obj;
obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
if (binder != NULL) {
IBinder *local = binder->localBinder();
if (!local) {
BpBinder *proxy = binder->remoteBinder();
if (proxy == NULL) {
LOGE("null proxy");
}
const int32_t handle = proxy ? proxy->handle() : 0;
obj.type = BINDER_TYPE_HANDLE;
obj.handle = handle;
obj.cookie = NULL;
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = local->getWeakRefs();
obj.cookie = local;
}
} else {
obj.type = BINDER_TYPE_BINDER;
obj.binder = NULL;
obj.cookie = NULL;
}
return finish_flatten_binder(binder, obj, out);
}
先看flatten_binder函数, IBinder定义了两个虚拟函数localBinder和 remoteBinder,默认都返回NULL,这两个 函数的作用就是为了区分这个 IBinder是BpBinder 还是BBinder,因为 BpBinder实现了remoteBinder ,BBinder 实现了 localBinder,都是return this 。
这样,我们就知道对于 BpBinder
obj.type = BINDER_TYPE_HANDLE;
obj.handle = handle;
obj.cookie = NULL;
对于BBinder
obj.type = BINDER_TYPE_BINDER;
obj.binder = local->getWeakRefs();
obj.cookie = local;
也就是说,通过 copy_from_user函数,传递给transaction的这些参数已经作为 flat_binder_object,被复制到目标进程中。
下面来看后面的代码,它实际上是一个大的循环来遍历内存中的 flat_binder_object数组,根据object的 type分别处理。我们按照两个流程来分析下面这个循环:
1.addService
2.checkService

AddService的调用流程,其中绿色为用户空间进程,蓝色为内核空间
按照addService的流程,参数是 MediaPlayerService对象,也就是一个BBinder对象, fp->type为BINDER_TYPE_BINDER ,对应如下的代码:
按照addService的流程,参数是 MediaPlayerService对象,也就是一个BBinder对象, fp->type为BINDER_TYPE_BINDER ,对应如下的代码:
<代码片段2>
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
//在当前进程,根据fp->binder查找 node节点,即对应fp->binder的 Server
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
//如果没找到,就创建一个 binder_node
node = binder_new_node(proc, fp->binder, fp->cookie);
if (node == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_new_node_failed;
}
node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
goto err_binder_get_ref_for_node_failed;
}
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
} break;
这里有两个函数需要说明一下, binder_get_node和binder_get_ref_for_node ,它们的原型如下:
static struct binder_node *binder_get_node(struct binder_proc *proc,
void __user *ptr)
static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
struct binder_node *node)
binder_get_node是在 proc进程中,根据ptr,查找 node节点。binder_get_ref_for_node 是在proc进程中,根据 node节点,查找对它的引用,如果没有找到对它的引用,需要创建一个对它的引用。
再来看上面 case的代码,首先根据fp->binder在当前 proc查找node ,如果没有对应的 node就创建一个新的node,这些代码是符合我们的期望的,因为调用 addService的进程,即Service所在的进程,在注册服务之前确实是找不到代表服务的 node的,需要创建一个新的node, fp->binder即MediaPlayerService 实例,被保存到新创建的这个 node中。
接着通过 binder_get_ref_for_node在目标进程中查找是否有对 node的引用,这里的目标进程就是 servicemanager进程,此时它是没有对node的引用的,调用这个函数之后,会在其 proc上创建一个对node的引用。最后这个引用的 desc会赋值给fp->handle ,别忘了此时的 fp是在target_proc ,即servicemanager进程的内存上的。后面系统会调用用户空间的 servicemanager进程来read 数据,最终添加到它的服务链表中。注意此时读取的数据为 handle。这样Service 进程就把服务注册到 servicemanager进程中。
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
//在当前进程,根据fp->binder查找 node节点,即对应fp->binder的 Server
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
//如果没找到,就创建一个 binder_node
node = binder_new_node(proc, fp->binder, fp->cookie);
if (node == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_new_node_failed;
}
node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
goto err_binder_get_ref_for_node_failed;
}
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
} break;
这里有两个函数需要说明一下, binder_get_node和binder_get_ref_for_node ,它们的原型如下:
static struct binder_node *binder_get_node(struct binder_proc *proc,
void __user *ptr)
static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
struct binder_node *node)
binder_get_node是在 proc进程中,根据ptr,查找 node节点。binder_get_ref_for_node 是在proc进程中,根据 node节点,查找对它的引用,如果没有找到对它的引用,需要创建一个对它的引用。
再来看上面 case的代码,首先根据fp->binder在当前 proc查找node ,如果没有对应的 node就创建一个新的node,这些代码是符合我们的期望的,因为调用 addService的进程,即Service所在的进程,在注册服务之前确实是找不到代表服务的 node的,需要创建一个新的node, fp->binder即MediaPlayerService 实例,被保存到新创建的这个 node中。
接着通过 binder_get_ref_for_node在目标进程中查找是否有对 node的引用,这里的目标进程就是 servicemanager进程,此时它是没有对node的引用的,调用这个函数之后,会在其 proc上创建一个对node的引用。最后这个引用的 desc会赋值给fp->handle ,别忘了此时的 fp是在target_proc ,即servicemanager进程的内存上的。后面系统会调用用户空间的 servicemanager进程来read 数据,最终添加到它的服务链表中。注意此时读取的数据为 handle。这样Service 进程就把服务注册到 servicemanager进程中。

checkService
的调用流程,其中绿色为用户空间进程,蓝色为内核空间
checkService会返回一个 IBinder,用于与server 端进程通信,这分为两个阶段,第一阶段是 client端发送BC_TRANSACTION 命令,第二个阶段是 server端发送BC_REPLY 返回IBinder。主要是第二个阶段, servicemanager进程在处理CHECK_SERVICE的时候,会查找服务列表,然后调用 bio_put_ref生成返回的obj
void bio_put_ref(struct binder_io *bio, void *ptr)
{
struct binder_object *obj;
if (ptr)
obj = bio_alloc_obj(bio);
else
obj = bio_alloc(bio, sizeof(*obj));
if (!obj)
return;
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->type = BINDER_TYPE_HANDLE;
obj->pointer = ptr;
obj->cookie = 0;
}
在该函数中我们可以看到 obj的type 为BINDER_TYPE_HANDLE,而此时的 obj->pointer就是上面分析中的fp->handle。
再来看内核中的 binder_transaction函数
checkService会返回一个 IBinder,用于与server 端进程通信,这分为两个阶段,第一阶段是 client端发送BC_TRANSACTION 命令,第二个阶段是 server端发送BC_REPLY 返回IBinder。主要是第二个阶段, servicemanager进程在处理CHECK_SERVICE的时候,会查找服务列表,然后调用 bio_put_ref生成返回的obj
void bio_put_ref(struct binder_io *bio, void *ptr)
{
struct binder_object *obj;
if (ptr)
obj = bio_alloc_obj(bio);
else
obj = bio_alloc(bio, sizeof(*obj));
if (!obj)
return;
obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
obj->type = BINDER_TYPE_HANDLE;
obj->pointer = ptr;
obj->cookie = 0;
}
在该函数中我们可以看到 obj的type 为BINDER_TYPE_HANDLE,而此时的 obj->pointer就是上面分析中的fp->handle。
再来看内核中的 binder_transaction函数
<代码片段3>
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
binder_user_error("binder: %d:%d got "
"transaction with invalid "
"handle, %ld\n", proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%p\n",
ref->debug_id, ref->desc, ref->node->debug_id,
ref->node->ptr);
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d get new binder ref failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, new_ref->debug_id,
new_ref->desc, ref->node->debug_id);
}
} break;
调用binder_get_ref函数,根据 handle在当前进程中查找ref,这个 ref是一定能找到的,因为在 addService的时候,已经在servicemanager进程的 proc中加入了ref 节点用来引用服务的 node,而现在当前进程就是servicemanager进程,所以一定能够找到这样的 ref。如果ref->node->proc == target_proc 条件成立,那么说明发起请求的进程就是提供服务的进程,即 Server端,那么需要返回的是一个 BBinder,根据flat_binder_object 的要求,此时 type要设置为BINDER_TYPE_BINDER ,binder要设置为 ref->node->ptr,否则的话,就是客户端发起的请求,需要返回一个 BpBinder,首先调用binder_get_ref_for_node,在 target_proc中查找是否有引用了ref->node的引用,如果没有则要创建一个 ref,由此得到一个新的desc,因为此时的 fp指针已经是在目标进程中分配的,且已经复制了源进程,即 servicemanager进程的数据,所以此时fp的 type已经是BINDER_TYPE_HANDLE ,只需要赋值 fp->handle就可以了。
客户端进程,一般是通过 IPCThreadState,在调用ioctl 读取函数的时候,会收到 BR_REPLY命令,然后调用
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(size_t),
freeBuffer, this);
来读取数据,读取到的 flat_binder_object就是刚才设置的数据。reply为 Parcel类实例,调用ipcSetDataReference函数将会把 flat_binder_object反序列化,以BpServiceManager为例,它的 checkService函数如下:
virtual sp<IBinder> checkService( const String16& name) const
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
return reply.readStrongBinder();
}
reply.readStongBinder就得到了一个 BpBinder,IBinder 就这样被返回给客户端进程。
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
binder_user_error("binder: %d:%d got "
"transaction with invalid "
"handle, %ld\n", proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%p\n",
ref->debug_id, ref->desc, ref->node->debug_id,
ref->node->ptr);
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d get new binder ref failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, new_ref->debug_id,
new_ref->desc, ref->node->debug_id);
}
} break;
调用binder_get_ref函数,根据 handle在当前进程中查找ref,这个 ref是一定能找到的,因为在 addService的时候,已经在servicemanager进程的 proc中加入了ref 节点用来引用服务的 node,而现在当前进程就是servicemanager进程,所以一定能够找到这样的 ref。如果ref->node->proc == target_proc 条件成立,那么说明发起请求的进程就是提供服务的进程,即 Server端,那么需要返回的是一个 BBinder,根据flat_binder_object 的要求,此时 type要设置为BINDER_TYPE_BINDER ,binder要设置为 ref->node->ptr,否则的话,就是客户端发起的请求,需要返回一个 BpBinder,首先调用binder_get_ref_for_node,在 target_proc中查找是否有引用了ref->node的引用,如果没有则要创建一个 ref,由此得到一个新的desc,因为此时的 fp指针已经是在目标进程中分配的,且已经复制了源进程,即 servicemanager进程的数据,所以此时fp的 type已经是BINDER_TYPE_HANDLE ,只需要赋值 fp->handle就可以了。
客户端进程,一般是通过 IPCThreadState,在调用ioctl 读取函数的时候,会收到 BR_REPLY命令,然后调用
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(size_t),
freeBuffer, this);
来读取数据,读取到的 flat_binder_object就是刚才设置的数据。reply为 Parcel类实例,调用ipcSetDataReference函数将会把 flat_binder_object反序列化,以BpServiceManager为例,它的 checkService函数如下:
virtual sp<IBinder> checkService( const String16& name) const
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
return reply.readStrongBinder();
}
reply.readStongBinder就得到了一个 BpBinder,IBinder 就这样被返回给客户端进程。
对于binder有三个典型的用例:
1.addService
3.client与server之间传递数据
addService的时候,在代码片段1中,确定当前进程是调用addSercie的进程,即提供服务的进程,目标进程是servicemanager进程。由于service是作为参数传递进来的,所以会进入代码片段2,内核在Service进程中,加入了一个代表service的node节点,servicemanager 的proc中加入了一个引用这个 node节点的ref 节点,同时servicemanager的proc为这个 ref节点分配一个handle。这个handle会返回给servicemanager进程。
2.checkService
在
checkService返回IBinder
的时候,
servicemanager进程根据名字找到service及其
handle,进入内核后,在代码片段0中,首先确定了target_proc是发送请求的进程,由于返回的Binder是存在与参数中的,所以进入代码片段3中,此时的类型是BINDER_TYPE_HANDLE,内核根据这个handle找到
servicemanager的proc
中的ref节点,进而找到它引用的服务
node节点,然后在目标进程的
proc中加入一个ref节点引用这个
node节点,这个ref节点也得到一个
handle,最后一个包含这个handle的
BpBinder被返回被用户进程。这里显然出现了两个handle,一个是servicemanager中的handle,一个是用户请求服务的进程中的handle,他们的值不一定相同,但是他们对应的ref中引用的node一定是相同的。3.client与server之间传递数据
addService是service向servicemanager注册服务的过程,checkService是用户进程向servicemanager查询service,并返回BpBinder的过程。在这之后,就是client与server之间的数据传递了。发送数据的时候,内核进入代码片段1,内核可以从这个handle中找到当前进程对node的ref,继而找到node,再找到node所在的proc,即target
proc,然后通过共享内存把数据copy过去,然后是对数据的处理,如果数据中包含有Binder,会进入代码片段2或者3的处理。
同样,在service返回数据的时候,内核进入代码片段0,继而是copy参数数据,最后是参数数据的处理,如果包含Binder,会进入代码片段2或者3的处理。
其实上面
switch/case的代码都是针对发送或者接收的参数进行的操作,也就是说,在
client和server
端传送Binder实例,无论是
BpBinder还是BBinder
,内核会对他们做一些修改,理解这一点很重要,实际的
Service应用中对于Binder的使用是非常灵活的,例如
MediaPlayerService对它的使用就非常的灵活,它会创建一些
Service,即BBinder
返回给client端,等
Client收到的时候已经变成BpBinder了,原因就是内核对传递的
Binder实例进行了变换,参考上面的
switch/case就可以知道是怎么回事了。
三.参考
1.文件列表
kernel/drivers/staging/android/binder.c
http://www.linuxidc.com/Linux/2011-07/39620.htm
http://www.linuxidc.com/Linux/2011-07/39269.htm
http://www.linuxidc.com/Linux/2011-07/39270p9.htm
http://www.linuxidc.com/Linux/2011-07/39271.htm
http://www.2cto.com/kf/201202/118538.html
http://www.linuxidc.com/Linux/2011-07/39620.htm
http://www.linuxidc.com/Linux/2011-07/39269.htm
http://www.linuxidc.com/Linux/2011-07/39270p9.htm
http://www.linuxidc.com/Linux/2011-07/39271.htm
http://www.2cto.com/kf/201202/118538.html
2.binder_transaction
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
#ifdef BINDER_MONITOR
e->code = tr->code;
do_posix_clock_monotonic_gettime(&e->timestamp);
monotonic_to_bootbased(&e->timestamp);
#endif
if (reply) {
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
binder_user_error("binder: %d:%d got reply transaction "
"with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_empty_call_stack;
}
#ifdef BINDER_MONITOR
binder_cancel_bwdog(in_reply_to);
#endif
binder_set_nice(in_reply_to->saved_priority);
#ifdef RT_PRIO_INHERIT
if (rt_task(current) &&
!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
struct sched_param param = {
.sched_priority = in_reply_to->saved_rt_prio,
};
mt_sched_setscheduler_nocheck(current,
in_reply_to->saved_policy, ¶m);
}
#endif
if (in_reply_to->to_thread != thread) {
binder_user_error("binder: %d:%d got reply transaction "
"with bad transaction stack,"
" transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
target_thread = in_reply_to->from;
if (target_thread == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d got reply transaction "
"with bad transaction reply_from,"
" transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
#endif
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
binder_user_error("binder: %d:%d got reply transaction "
"with bad target transaction stack %d, "
"expected %d\n",
proc->pid, thread->pid,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
#ifdef BINDER_MONITOR
e->service[0] = '\0';
#endif
} else {
if (tr->target.handle) {
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle);
if (ref == NULL) {
binder_user_error("binder: %d:%d got "
"transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
target_node = ref->node;
} else {
target_node = binder_context_mgr_node;
if (target_node == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d binder_context_mgr_node is NULL\n",
proc->pid, thread->pid);
#endif
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
}
}
e->to_node = target_node->debug_id;
#ifdef BINDER_MONITOR
strcpy(e->service, target_node->name);
#endif
target_proc = target_node->proc;
if (target_proc == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d target_proc is NULL\n",
proc->pid, thread->pid);
#endif
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
binder_user_error("binder: %d:%d got new "
"transaction with bad transaction stack"
", transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
goto err_bad_call_stack;
}
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
tmp = tmp->from_parent;
}
}
}
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d transaction allocation failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
#ifdef BINDER_MONITOR
memcpy(&t->timestamp, &e->timestamp, sizeof(struct timespec));
t->checkpoint = BMONITOR_CHECKPOINT_TRANS_CREATE_SUCCESS;
/* a little differnt from real starting time */
do_gettimeofday(&t->tv);
if (!reply)
strcpy(t->service, target_node->name);
#endif
binder_stats_created(BINDER_STAT_TRANSACTION);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d tcomplete allocation failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_BWORK_CREATE_SUCCESS;
#endif
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = ++binder_last_id;
e->debug_id = t->debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
"binder: %d:%d BC_REPLY %d -> %d:%d, "
"data %p-%p size %zd-%zd\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
tr->data.ptr.buffer, tr->data.ptr.offsets,
tr->data_size, tr->offsets_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
"binder: %d:%d BC_TRANSACTION %d -> "
"%d - node %d, data %p-%p size %zd-%zd\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
tr->data.ptr.buffer, tr->data.ptr.offsets,
tr->data_size, tr->offsets_size);
#ifdef BINDER_MONITOR
t->fproc = proc->pid;
t->fthrd = thread->pid;
t->tproc = target_proc->pid;
t->tthrd = target_thread ? target_thread->pid : 0;
#endif
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = proc->tsk->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
#ifdef RT_PRIO_INHERIT
t->rt_prio = current->rt_priority;
t->policy = current->policy;
t->saved_rt_prio = MAX_RT_PRIO;
#endif
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d buffer allocation failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_BBUFF_CREATE_SUCCESS;
#endif
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"data ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_BBUFF_COPY_SUCCESS_1;
#endif
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"offsets ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_BBUFF_COPY_SUCCESS_2;
#endif
if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
binder_user_error("binder: %d:%d got transaction with "
"invalid offsets size, %zd\n",
proc->pid, thread->pid, tr->offsets_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_BBUFF_COPY_SUCCESS_3;
#endif
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(void *))) {
binder_user_error("binder: %d:%d got transaction with "
"invalid offset, %zd\n",
proc->pid, thread->pid, *offp);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
node = binder_new_node(proc, fp->binder, fp->cookie);
if (node == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d create new node failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_binder_new_node_failed;
}
node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
#ifdef BINDER_MONITOR
{
int i, len = 0;
//This is an addService() transaction identified by:
//fp->type == BINDER_TYPE_BINDER && tr->target.handle == 0
if (tr->target.handle == 0) {
//Hack into addService() payload:
//Service name string is located at MAGIC_SERVICE_NAME_OFFSET,
//and interleaved with character '\0'.
//For example, 'p', '\0', 'h', '\0', 'o', '\0', 'n', '\0', 'e'
for (i = 0 ; 2*i < tr->data_size ; i++) {
if (2*i < MAGIC_SERVICE_NAME_OFFSET) {
continue;
}
if (len >= (MAX_SERVICE_NAME_LEN-1)) {//Prevent array index overflow.
break;
}
len += sprintf((node->name)+len, "%c", *((char *)tr->data.ptr.buffer+2*i));
}
node->name[len] = '\0';
//printk("binder: %d:%d node->name=%s(len=%d)\n", proc->pid, thread->pid, node->name, len);
} else {
node->name[0] = '\0';
//printk("binder: %d:%d not to Service Manager\n", proc->pid, thread->pid);
}
}
#endif
}
if (fp->cookie != node->cookie) {
binder_user_error("binder: %d:%d sending u%p "
"node %d, cookie mismatch %p != %p\n",
proc->pid, thread->pid,
fp->binder, node->debug_id,
fp->cookie, node->cookie);
goto err_binder_get_ref_for_node_failed;
}
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d get binder ref failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%p -> ref %d desc %d\n",
node->debug_id, node->ptr, ref->debug_id,
ref->desc);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
binder_user_error("binder: %d:%d got "
"transaction with invalid "
"handle, %ld\n", proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%p\n",
ref->debug_id, ref->desc, ref->node->debug_id,
ref->node->ptr);
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d get new binder ref failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, new_ref->debug_id,
new_ref->desc, ref->node->debug_id);
}
} break;
case BINDER_TYPE_FD: {
int target_fd;
struct file *file;
if (reply) {
if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
} else if (!target_node->accept_fds) {
binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
file = fget(fp->handle);
if (file == NULL) {
binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fget_failed;
}
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
if (target_fd < 0) {
fput(file);
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d no unused fd available, %d\n",
proc->pid, thread->pid, target_fd);
#endif
return_error = BR_FAILED_REPLY;
goto err_get_unused_fd_failed;
}
task_fd_install(target_proc, target_fd, file);
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %ld -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
fp->handle = target_fd;
} break;
default:
binder_user_error("binder: %d:%d got transactio"
"n with invalid object type, %lx\n",
proc->pid, thread->pid, fp->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
}
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_FLAT_BINDER_REBUILD_SUCCESS;
#endif
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_BWORK_ENQUEUE_SUCCESS;
#endif
if (target_wait) {
#ifdef RT_PRIO_INHERIT
unsigned long flag;
wait_queue_t *curr, *next;
spin_lock_irqsave(&target_wait->lock, flag);
list_for_each_entry_safe(curr, next, &target_wait->task_list, task_list) {
unsigned flags = curr->flags;
struct task_struct *tsk = curr->private;
#ifdef BINDER_MONITOR
if (tsk->state == TASK_UNINTERRUPTIBLE) {
printk(KERN_ERR "binder: from %d:%d to %d:%d target thread state: %ld\n",
proc->pid, thread->pid,
tsk->tgid, tsk->pid, tsk->state);
show_stack(tsk, NULL);
}
#endif
if (!reply &&
t->policy == SCHED_RR && t->rt_prio > tsk->rt_priority &&
!(t->flags & TF_ONE_WAY)) {
struct sched_param param = {
.sched_priority = t->rt_prio,
};
t->saved_rt_prio = tsk->rt_priority;
t->saved_policy = tsk->policy;
mt_sched_setscheduler_nocheck(tsk, t->policy, ¶m);
}
if (curr->func(curr, TASK_INTERRUPTIBLE, 0, NULL) &&
(flags & WQ_FLAG_EXCLUSIVE)) {
break;
}
}
spin_unlock_irqrestore(&target_wait->lock, flag);
#else
wake_up_interruptible(target_wait);
#endif
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_ALREADY_WAKEUP_RECEIVER;
#ifndef USER_BUILD_KERNEL
if (!reply && e->service[0] != '\0' && e->target_handle != 0) {
printk(KERN_DEBUG
"binder: %d: %s from %d:%d (%s) to %d:%d (%s) node %d "
"handle %d (%s) size %d:%d code(decimal) %d start %lu.%02lu\n",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "),
e->from_proc, e->from_thread, proc->tsk->comm,
e->to_proc, e->to_thread, target_proc->tsk->comm, e->to_node,
e->target_handle, e->service,
e->data_size, e->offsets_size, e->code,
(unsigned long)e->timestamp.tv_sec,
(e->timestamp.tv_nsec / (NSEC_PER_SEC / 1000000)));
}
#endif
t->wait_on = reply ? WAIT_ON_REPLY_READ : WAIT_ON_READ;
binder_queue_bwdog(t, (time_t)WAIT_BUDGET_READ);
#endif
return;
err_get_unused_fd_failed:
err_fget_failed:
err_fd_not_allowed:
err_binder_get_ref_for_node_failed:
err_binder_get_ref_failed:
err_binder_new_node_failed:
err_bad_object_type:
err_bad_offset:
err_copy_data_failed:
binder_transaction_buffer_release(target_proc, t->buffer, offp);
t->buffer->transaction = NULL;
binder_free_buf(target_proc, t->buffer);
err_binder_alloc_buf_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
#ifdef BINDER_MONITOR
binder_cancel_bwdog(t);
#endif
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"binder: %d:%d transaction failed %d, size %zd-%zd\n",
proc->pid, thread->pid, return_error,
tr->data_size, tr->offsets_size);
{
struct binder_transaction_log_entry *fe;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
}
BUG_ON(thread->return_error != BR_OK);
if (in_reply_to) {
thread->return_error = BR_TRANSACTION_COMPLETE;
binder_send_failed_reply(in_reply_to, return_error);
} else
thread->return_error = return_error;
}
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
size_t *offp, *off_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
#ifdef BINDER_MONITOR
e->code = tr->code;
do_posix_clock_monotonic_gettime(&e->timestamp);
monotonic_to_bootbased(&e->timestamp);
#endif
if (reply) {
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
binder_user_error("binder: %d:%d got reply transaction "
"with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_empty_call_stack;
}
#ifdef BINDER_MONITOR
binder_cancel_bwdog(in_reply_to);
#endif
binder_set_nice(in_reply_to->saved_priority);
#ifdef RT_PRIO_INHERIT
if (rt_task(current) &&
!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
struct sched_param param = {
.sched_priority = in_reply_to->saved_rt_prio,
};
mt_sched_setscheduler_nocheck(current,
in_reply_to->saved_policy, ¶m);
}
#endif
if (in_reply_to->to_thread != thread) {
binder_user_error("binder: %d:%d got reply transaction "
"with bad transaction stack,"
" transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
target_thread = in_reply_to->from;
if (target_thread == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d got reply transaction "
"with bad transaction reply_from,"
" transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
#endif
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
binder_user_error("binder: %d:%d got reply transaction "
"with bad target transaction stack %d, "
"expected %d\n",
proc->pid, thread->pid,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
#ifdef BINDER_MONITOR
e->service[0] = '\0';
#endif
} else {
if (tr->target.handle) {
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle);
if (ref == NULL) {
binder_user_error("binder: %d:%d got "
"transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
target_node = ref->node;
} else {
target_node = binder_context_mgr_node;
if (target_node == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d binder_context_mgr_node is NULL\n",
proc->pid, thread->pid);
#endif
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
}
}
e->to_node = target_node->debug_id;
#ifdef BINDER_MONITOR
strcpy(e->service, target_node->name);
#endif
target_proc = target_node->proc;
if (target_proc == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d target_proc is NULL\n",
proc->pid, thread->pid);
#endif
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
binder_user_error("binder: %d:%d got new "
"transaction with bad transaction stack"
", transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
goto err_bad_call_stack;
}
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
tmp = tmp->from_parent;
}
}
}
if (target_thread) {
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d transaction allocation failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
#ifdef BINDER_MONITOR
memcpy(&t->timestamp, &e->timestamp, sizeof(struct timespec));
t->checkpoint = BMONITOR_CHECKPOINT_TRANS_CREATE_SUCCESS;
/* a little differnt from real starting time */
do_gettimeofday(&t->tv);
if (!reply)
strcpy(t->service, target_node->name);
#endif
binder_stats_created(BINDER_STAT_TRANSACTION);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d tcomplete allocation failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_BWORK_CREATE_SUCCESS;
#endif
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = ++binder_last_id;
e->debug_id = t->debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
"binder: %d:%d BC_REPLY %d -> %d:%d, "
"data %p-%p size %zd-%zd\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
tr->data.ptr.buffer, tr->data.ptr.offsets,
tr->data_size, tr->offsets_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
"binder: %d:%d BC_TRANSACTION %d -> "
"%d - node %d, data %p-%p size %zd-%zd\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
tr->data.ptr.buffer, tr->data.ptr.offsets,
tr->data_size, tr->offsets_size);
#ifdef BINDER_MONITOR
t->fproc = proc->pid;
t->fthrd = thread->pid;
t->tproc = target_proc->pid;
t->tthrd = target_thread ? target_thread->pid : 0;
#endif
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = proc->tsk->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
#ifdef RT_PRIO_INHERIT
t->rt_prio = current->rt_priority;
t->policy = current->policy;
t->saved_rt_prio = MAX_RT_PRIO;
#endif
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d buffer allocation failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_BBUFF_CREATE_SUCCESS;
#endif
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"data ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_BBUFF_COPY_SUCCESS_1;
#endif
if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("binder: %d:%d got transaction with invalid "
"offsets ptr\n", proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_BBUFF_COPY_SUCCESS_2;
#endif
if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
binder_user_error("binder: %d:%d got transaction with "
"invalid offsets size, %zd\n",
proc->pid, thread->pid, tr->offsets_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_BBUFF_COPY_SUCCESS_3;
#endif
off_end = (void *)offp + tr->offsets_size;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(void *))) {
binder_user_error("binder: %d:%d got transaction with "
"invalid offset, %zd\n",
proc->pid, thread->pid, *offp);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct binder_ref *ref;
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
node = binder_new_node(proc, fp->binder, fp->cookie);
if (node == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d create new node failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_binder_new_node_failed;
}
node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
#ifdef BINDER_MONITOR
{
int i, len = 0;
//This is an addService() transaction identified by:
//fp->type == BINDER_TYPE_BINDER && tr->target.handle == 0
if (tr->target.handle == 0) {
//Hack into addService() payload:
//Service name string is located at MAGIC_SERVICE_NAME_OFFSET,
//and interleaved with character '\0'.
//For example, 'p', '\0', 'h', '\0', 'o', '\0', 'n', '\0', 'e'
for (i = 0 ; 2*i < tr->data_size ; i++) {
if (2*i < MAGIC_SERVICE_NAME_OFFSET) {
continue;
}
if (len >= (MAX_SERVICE_NAME_LEN-1)) {//Prevent array index overflow.
break;
}
len += sprintf((node->name)+len, "%c", *((char *)tr->data.ptr.buffer+2*i));
}
node->name[len] = '\0';
//printk("binder: %d:%d node->name=%s(len=%d)\n", proc->pid, thread->pid, node->name, len);
} else {
node->name[0] = '\0';
//printk("binder: %d:%d not to Service Manager\n", proc->pid, thread->pid);
}
}
#endif
}
if (fp->cookie != node->cookie) {
binder_user_error("binder: %d:%d sending u%p "
"node %d, cookie mismatch %p != %p\n",
proc->pid, thread->pid,
fp->binder, node->debug_id,
fp->cookie, node->cookie);
goto err_binder_get_ref_for_node_failed;
}
ref = binder_get_ref_for_node(target_proc, node);
if (ref == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d get binder ref failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
if (fp->type == BINDER_TYPE_BINDER)
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
fp->handle = ref->desc;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%p -> ref %d desc %d\n",
node->debug_id, node->ptr, ref->debug_id,
ref->desc);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
binder_user_error("binder: %d:%d got "
"transaction with invalid "
"handle, %ld\n", proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%p\n",
ref->debug_id, ref->desc, ref->node->debug_id,
ref->node->ptr);
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d get new binder ref failed\n",
proc->pid, thread->pid);
#endif
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, new_ref->debug_id,
new_ref->desc, ref->node->debug_id);
}
} break;
case BINDER_TYPE_FD: {
int target_fd;
struct file *file;
if (reply) {
if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
} else if (!target_node->accept_fds) {
binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
file = fget(fp->handle);
if (file == NULL) {
binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fget_failed;
}
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
if (target_fd < 0) {
fput(file);
#ifdef BINDER_MONITOR
binder_user_error("binder: %d:%d no unused fd available, %d\n",
proc->pid, thread->pid, target_fd);
#endif
return_error = BR_FAILED_REPLY;
goto err_get_unused_fd_failed;
}
task_fd_install(target_proc, target_fd, file);
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %ld -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
fp->handle = target_fd;
} break;
default:
binder_user_error("binder: %d:%d got transactio"
"n with invalid object type, %lx\n",
proc->pid, thread->pid, fp->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
}
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_FLAT_BINDER_REBUILD_SUCCESS;
#endif
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
if (target_node->has_async_transaction) {
target_list = &target_node->async_todo;
target_wait = NULL;
} else
target_node->has_async_transaction = 1;
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_BWORK_ENQUEUE_SUCCESS;
#endif
if (target_wait) {
#ifdef RT_PRIO_INHERIT
unsigned long flag;
wait_queue_t *curr, *next;
spin_lock_irqsave(&target_wait->lock, flag);
list_for_each_entry_safe(curr, next, &target_wait->task_list, task_list) {
unsigned flags = curr->flags;
struct task_struct *tsk = curr->private;
#ifdef BINDER_MONITOR
if (tsk->state == TASK_UNINTERRUPTIBLE) {
printk(KERN_ERR "binder: from %d:%d to %d:%d target thread state: %ld\n",
proc->pid, thread->pid,
tsk->tgid, tsk->pid, tsk->state);
show_stack(tsk, NULL);
}
#endif
if (!reply &&
t->policy == SCHED_RR && t->rt_prio > tsk->rt_priority &&
!(t->flags & TF_ONE_WAY)) {
struct sched_param param = {
.sched_priority = t->rt_prio,
};
t->saved_rt_prio = tsk->rt_priority;
t->saved_policy = tsk->policy;
mt_sched_setscheduler_nocheck(tsk, t->policy, ¶m);
}
if (curr->func(curr, TASK_INTERRUPTIBLE, 0, NULL) &&
(flags & WQ_FLAG_EXCLUSIVE)) {
break;
}
}
spin_unlock_irqrestore(&target_wait->lock, flag);
#else
wake_up_interruptible(target_wait);
#endif
}
#ifdef BINDER_MONITOR
t->checkpoint = BMONITOR_CHECKPOINT_ALREADY_WAKEUP_RECEIVER;
#ifndef USER_BUILD_KERNEL
if (!reply && e->service[0] != '\0' && e->target_handle != 0) {
printk(KERN_DEBUG
"binder: %d: %s from %d:%d (%s) to %d:%d (%s) node %d "
"handle %d (%s) size %d:%d code(decimal) %d start %lu.%02lu\n",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "),
e->from_proc, e->from_thread, proc->tsk->comm,
e->to_proc, e->to_thread, target_proc->tsk->comm, e->to_node,
e->target_handle, e->service,
e->data_size, e->offsets_size, e->code,
(unsigned long)e->timestamp.tv_sec,
(e->timestamp.tv_nsec / (NSEC_PER_SEC / 1000000)));
}
#endif
t->wait_on = reply ? WAIT_ON_REPLY_READ : WAIT_ON_READ;
binder_queue_bwdog(t, (time_t)WAIT_BUDGET_READ);
#endif
return;
err_get_unused_fd_failed:
err_fget_failed:
err_fd_not_allowed:
err_binder_get_ref_for_node_failed:
err_binder_get_ref_failed:
err_binder_new_node_failed:
err_bad_object_type:
err_bad_offset:
err_copy_data_failed:
binder_transaction_buffer_release(target_proc, t->buffer, offp);
t->buffer->transaction = NULL;
binder_free_buf(target_proc, t->buffer);
err_binder_alloc_buf_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
#ifdef BINDER_MONITOR
binder_cancel_bwdog(t);
#endif
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"binder: %d:%d transaction failed %d, size %zd-%zd\n",
proc->pid, thread->pid, return_error,
tr->data_size, tr->offsets_size);
{
struct binder_transaction_log_entry *fe;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
}
BUG_ON(thread->return_error != BR_OK);
if (in_reply_to) {
thread->return_error = BR_TRANSACTION_COMPLETE;
binder_send_failed_reply(in_reply_to, return_error);
} else
thread->return_error = return_error;
}