android(5)
2011-7-16 17:22:52
BC_FREE_BUFFER
binder_buffer_lookup
buffer的结构体
struct binder_buffer {
struct list_head entry; /* free and allocated entries by addesss */
struct rb_node rb_node; /* free entry by size or allocated entry */
/* by address */
unsigned free:1;
unsigned allow_user_free:1;
unsigned async_transaction:1;
unsigned debug_id:29;
struct binder_transaction *transaction;
struct binder_node *target_node;
size_t data_size;
size_t offsets_size;
uint8_t data[0];
};
将buffer从树上找出来
判断是否是所有用户都已经释放了
如果没有就返回失败
if (buffer->async_transaction && buffer->target_node) {
BUG_ON(!buffer->target_node->has_async_transaction);
if (list_empty(&buffer->target_node->async_todo))
buffer->target_node->has_async_transaction = 0;
else
list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
}
如果这个buffer有异步传输,并且有target_node
就检查目标的async_todo是不为空,就将async_todo.next 移动到thread的todo中
binder_transaction_buffer_release
释放传输buffer
如果buffer有目标,降目标的强引用
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
struct flat_binder_object {
/* 8 bytes for large_flat_header. */
unsigned long type;
unsigned long flags;
/* 8 bytes of data. */
union {
void *binder; /* local object */
signed long handle; /* remote object */
};
/* extra data associated with local object */
void *cookie;
};
透出来吧
再看一下服务管理
service manager 如何向服务注册?
关注其main方法
struct binder_state
{
int fd;
void *mapped;
unsigned mapsize;
};
这个结构挺简单的
bind_open
struct binder_state *binder_open(unsigned mapsize)
{
struct binder_state *bs;
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return 0;
}
bs->fd = open("/dev/binder", O_RDWR);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open device (%s)\n",
strerror(errno));
goto fail_open;
}
bs->mapsize = mapsize;
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
/* TODO: check version */
return bs;
fail_map:
close(bs->fd);
fail_open:
free(bs);
return 0;
}
映射了128K的空间
现在看一下该杂项设备的map方法
binder_mmap
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M;
首先映射的地址空间不能过4M
检查设置映射标志
如果binder_proc的buffer字段已经被设置
使用 get_vm_area 从内核中临时映射一块线性地址
user_buffer_offset记录了用户线性地址和内核线性地址之间的差值
到现在为止,只是保留了线性地址空间
vma->vm_ops = &binder_vm_ops; 绑定
调用 binder_update_page_range 为地址空间先分配一个page吧
tmp_area.addr = page_addr;
tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
page_array_ptr = page;
ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
map_vm_area 关联到了一个物理块
注意 get_vm_area 在内核保留一个内核地址空间
map_vm_area 为一块内核地址空间来map内存
vm_insert_page 让用户保留的地址空间也映射过来
注意到外层的for循环
一个个page的进行映射,将128K都映射完
到此 用户层的binder_open 操作完毕
接下来调用
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
向底层发送 BINDER_SET_CONTEXT_MGR 消息