Lab3: page tables

Overview

  • 本 lab 关于页表(page table),涉及到 xv6 的虚拟内存、物理内存、三级页表、内存访问权限等内容
    • Speed up system calls:要在内核和用户态之间创建一个共享的只读页,使用户态能够直接读取内核态写入的数据,从而加速系统调用
    • Print a page table:要在 exec 中插入一个打印函数,使得 xv6 启动时会打印首个进程的页表信息。我们要实现这个打印函数,将三级页表的所有可用 PTE 均打印出来
    • Detect which pages have been accessed:利用 PTE_A 位,监控有哪些 PTE 被访问(read/write)过,将结果返回给用户态
  • 用xv6-book上的一张图来展示xv6的页表设计
    • image.png|475
    • 虚拟地址只用了39位
    • 页表有2272^{27}227个页表项(PTE)的数组,分三级来排布,每一级有 512 个 PTE
    • 每一个 PTE 包含 44 位的物理页号(PPN)和 10 位的 Flags
    • 第一、二级的PPN指向下一级页表的 PTE,而第三级的PPN指向物理内存
    • 映射时,xv6 先通过 39 位虚拟地址的高 27 位来寻找第三级 PTE(通过三级页表映射),然后将其中的 44 位 PPN 和虚拟地址剩下的 12 位组合成 56 位的物理地址

Speed up system calls (easy)

  • 为了保护数据安全,用户态是不能直接读取内核态的数据,而是要通过系统调用。如果创建一个可读 PTE 指向一块内存,该 PTE 是用户态和内核态共享的,那么用户态就可以直接读取这块内核数据,而无需经过复杂的系统调用
  • 以 getpid 为例,proc结构是内核态数据,用户态无法直接读取,因此需要通过系统调用 getpid 来读取 pid。
    • 所以需要我们创建一个共享 PTE,将虚拟地址 USYSCALL 映射到 pid 的物理地址,这样用户态直接读取 USYSCALL 就可以获取到 pid 了
// user/ulib.c
int
ugetpid(void)
{
  struct usyscall *u = (struct usyscall *)USYSCALL;
  return u->pid;
}
  • 首先,我们 proc 新增一个字段,存放 usyscall 的地址
// kernel/proc.h 
struct proc 
{ 
   // ... 
   struct usyscall *usyscall; 
}
  • 然后,在 allocproc 时,为其分配一块物理内存,参考已给出的Allocate a trapframe page(进程的陷阱帧的分配)代码
// kernel/proc.c
static struct proc*
allocproc(void)
{
  // ...
  // Allocate a trapframe page.
  if((p->trapframe = (struct trapframe *)kalloc()) == 0)
  {
    freeproc(p);
    release(&p->lock);
    return 0;
  }

  // 为共享页分配空间
  #ifdef LAB_PGTBL 
  if ((p->usyscall = (struct usyscall*)kalloc()) == 0) 
  {
    freeproc(p);
    release(&p->lock);
    return 0;
  }
  p->usyscall->pid = p->pid;
  #endif
  // ...
}
  • proc 有一个字段名为 pagetable,用于存放页表的地址。页表的初始化位于函数 proc_pagetable 中,通过 mappages 在页表中注册新的 PTE,参考 TRAPFRAME 的方式,将 USYSCALL 映射到 p->usyscall 中
    • 注意:这里的flag 位不仅仅要置位 PTE_R,还要置位 PTE_U,否则用户态无权访问
// kernel/proc.c
pagetable_t
proc_pagetable(struct proc *p)
{
  pagetable_t pagetable;
  // ...
  // map the trapframe page just below the trampoline page, for
  // trampoline.S.
  if(mappages(pagetable, TRAPFRAME, PGSIZE,
              (uint64)(p->trapframe), PTE_R | PTE_W) < 0){
    uvmunmap(pagetable, TRAMPOLINE, 1, 0);
    uvmfree(pagetable, 0);
    return 0;
  }

  #ifdef LAB_PGTBL
  // 尝试将进程的usyscall地址映射到用户空间的USYSCALL
  if(mappages(pagetable, USYSCALL, PGSIZE,(uint64)p->usyscall, PTE_R | PTE_U) < 0)
  {
    uvmunmap(pagetable, USYSCALL, 1, 0);
    uvmfree(pagetable, 0);
  }
  #endif

  return pagetable;
}
  • 新增完映射后,我们需要在进程 free 时对其解映射,同样参考 TRAPFRAME 的代码
// kernel/proc.c
void
proc_freepagetable(pagetable_t pagetable, uint64 sz)
{
  uvmunmap(pagetable, TRAMPOLINE, 1, 0);
  uvmunmap(pagetable, TRAPFRAME, 1, 0);
  #ifdef LAB_PGTBL
  uvmunmap(pagetable, USYSCALL, 1, 0);
  #endif
  uvmfree(pagetable, sz);
}
  • 前面分配了空间,用完以后肯定要释放,因此需要在 freeproc 处将其释放,同样参考trapframe的实现
// kernel/proc.c
static void
freeproc(struct proc *p)
{
  if(p->trapframe)
    kfree((void*)p->trapframe);
  p->trapframe = 0;
  
  #ifdef LAB_PGTBL
  if (p->usyscall)
      kfree((void*)p->usyscall);
  p->usyscall = 0;
  #endif
  // ...
}

Print a page table (easy)

  • 实现一个内核函数 vmprint,其接收一个 pagetable,能够将其中所有的可用 PTE 的信息全部打印出来
  • 利用DFS实现(因为就是三级页表,所以直接用三层for循环嵌套也可以实现)
    • 在多级页表中,中间层的页表条目通常只设置存在位,而最底层的页表条目会设置存在位和权限位,由此可以作为递归终止判断条件(这在下面的结果中也可以得到印证)
// kernel/vm.c
void
vmprint_dfs(pagetable_t pagetable, uint depth)
{
  static char* prefix[] = {"","..",".. ..",".. .. .."};

  for (int i = 0; i < 512; i++) 
  {
    pte_t pte = pagetable[i];
    if (pte & PTE_V) 
    {
      pte_t child = PTE2PA(pte);
      printf("%s%d: pte %p pa %p\n", prefix[depth], i, pte, child);
      // 确保有下一级页表(而不是指向实际的物理地址)
      if (child && ((pte & (PTE_R | PTE_W | PTE_X))==0)) 
        vmprint_dfs((pagetable_t)child, depth + 1);
    }
  }
}

void
vmprint(pagetable_t pagetable)
{
  printf("page table %p\n", pagetable);
  vmprint_dfs(pagetable, 1);
}
  • 在 exec 中当 pid 为1时调用 vmprint
// kernel/exec.c
int
exec(char *path, char **argv)
{
  // ...
  if(p->pid == 1)
    vmprint(p->pagetable);
  // ...
}
  • make qemu 时即可打印出首进程的页表信息。
xv6 kernel is booting

hart 1 starting
hart 2 starting
page table 0x0000000087f6b000
..0: pte 0x0000000021fd9c01 pa 0x0000000087f67000
.. ..0: pte 0x0000000021fd9801 pa 0x0000000087f66000
.. .. ..0: pte 0x0000000021fda01b pa 0x0000000087f68000
.. .. ..1: pte 0x0000000021fd9417 pa 0x0000000087f65000
.. .. ..2: pte 0x0000000021fd9007 pa 0x0000000087f64000
.. .. ..3: pte 0x0000000021fd8c17 pa 0x0000000087f63000
..255: pte 0x0000000021fda801 pa 0x0000000087f6a000
.. ..511: pte 0x0000000021fda401 pa 0x0000000087f69000
.. .. ..509: pte 0x0000000021fdcc13 pa 0x0000000087f73000
.. .. ..510: pte 0x0000000021fdd007 pa 0x0000000087f74000
.. .. ..511: pte 0x0000000020001c0b pa 0x0000000080007000
init: starting sh
$ 

Detect which pages have been accessed (hard)

  • 实现一个系统调用 sys_pgaccess,从一个虚拟地址对应的 PTE 开始,往下搜索一定数量的被访问过的页表,并把结果通过 mask 的方式返回给用户。每当 sys_pgaccess 调用一次,页表被访问标志就要清 0
  • 怎么知道哪些页表被访问了?
    • 每个 PTE 有个 PTE_A 位,该位被置 1 则说明被访问过。置位操作有硬件完成,无需我们考虑。但是,硬件只能做到置位,无法做到复位。因此每次 sys_pgaccess 时要手动将 PTE_A 复位 0
  • 怎么通过虚拟地址依次遍历后续 PTE?
    • PTE 是连续的,一个 PTE 大小为 PGSIZE,因此只要将虚拟地址按 PGSIZE 累加即可得到后续的 PTE
walk(p->pagetable, va + i * PGSIZE, 0); // 后续第i个PTE
  • PTE_A 是第 6 位
    • image.png|475
  • sys_pgaccess,接收三个参数,分别为:起始虚拟地址、 遍历页数目、 用户存储返回结果的地址。因为其是系统调用,故参数的传递需要通过 argaddr、argint 来完成
    • 通过不断的 walk 来获取连续的 PTE,然后检查其 PTE_A 位,如果为 1 则记录在 mask 中,随后将 PTE_A 手动清 0。最后,通过 copyout 将结果拷贝给用户即可
// kernel/sysproc.c
int
sys_pgaccess(void)
{
  struct proc* p = myproc();

  uint64 va;              // 待检测页表起始地址
  int num_pages;          // 待检测页表的页数
  uint64 access_result;   // 记录检测结果掩码的地址
  uint64 result = 0;      // 临时记录结果

  // 从用户栈中获取参数
  argaddr(0, &va);  
  argint(1, &num_pages);
  argaddr(2, &access_result);

  if (num_pages <= 0 || num_pages > 512)
    return -1;

  // 遍历页表
  for (int i = 0; i < num_pages; i++)
  {
    pte_t* pte = walk(p->pagetable, va + i * PGSIZE, 0);
    if (pte && (*pte & PTE_V) && (*pte & PTE_A))
    {
      // 清除访问位
      *pte &= ~PTE_A;  
      result |= (1 << i);
    }
  }

  // 将检测结果写入用户栈
  copyout(p->pagetable, access_result, (char*)&result, sizeof(result));
  return 0;
}
#include <default_pmm.h> #include <defs.h> #include <error.h> #include <memlayout.h> #include <mmu.h> #include <pmm.h> #include <stdio.h> #include <string.h> #include <sync.h> #include <vmm.h> #include <sw.h> // virtual address of physical page array struct Page *pages; // amount of physical memory (in pages) size_t npage = 0; // The kernel image is mapped at VA=KERNBASE and PA=info.base uint_t va_pa_offset; // memory starts at 0x0 in SW const size_t nbase = DRAM_BASE / PGSIZE; // virtual address of boot-time page directory pde_t *boot_pgdir = NULL; // physical address of boot-time page directory uintptr_t boot_cr3; // physical memory management const struct pmm_manager *pmm_manager; static void check_alloc_page(void); static void check_pgdir(void); static void check_boot_pgdir(void); // init_pmm_manager - initialize a pmm_manager instance static void init_pmm_manager(void) { pmm_manager = &default_pmm_manager; cprintf("memory management: %s\n", pmm_manager->name); pmm_manager->init(); } // init_memmap - call pmm->init_memmap to build Page struct for free memory static void init_memmap(struct Page *base, size_t n) { pmm_manager->init_memmap(base, n); } // alloc_pages - call pmm->alloc_pages to allocate a continuous n*PAGESIZE // memory struct Page *alloc_pages(size_t n) { struct Page *page = NULL; bool intr_flag; local_intr_save(intr_flag); { page = pmm_manager->alloc_pages(n); } local_intr_restore(intr_flag); return page; } // free_pages - call pmm->free_pages to free a continuous n*PAGESIZE memory void free_pages(struct Page *base, size_t n) { bool intr_flag; local_intr_save(intr_flag); { pmm_manager->free_pages(base, n); } local_intr_restore(intr_flag); } // nr_free_pages - call pmm->nr_free_pages to get the size (nr*PAGESIZE) // of current free memory size_t nr_free_pages(void) { size_t ret; bool intr_flag; local_intr_save(intr_flag); { ret = pmm_manager->nr_free_pages(); } local_intr_restore(intr_flag); return ret; } /* page_init - initialize the physical memory management */ static void page_init(void) { va_pa_offset = PHYSICAL_MEMORY_OFFSET; uint64_t mem_begin = KERNEL_BEGIN_PADDR; uint64_t mem_size = PHYSICAL_MEMORY_END - KERNEL_BEGIN_PADDR; uint64_t mem_end = PHYSICAL_MEMORY_END; //硬编码取代 sbi_query_memory()接口 cprintf("membegin %llx memend %llx mem_size %llx\n",mem_begin, mem_end, mem_size); cprintf("physcial memory map:\n"); cprintf(" memory: 0x%08lx, [0x%08lx, 0x%08lx].\n", mem_size, mem_begin, mem_end - 1); uint64_t maxpa = mem_end; if (maxpa > KERNTOP) { maxpa = KERNTOP; } extern char end[]; npage = maxpa / PGSIZE; // BBL has put the initial page table at the first available page after the // kernel // so stay away from it by adding extra offset to end pages = (struct Page *)ROUNDUP((void *)end, PGSIZE); pages = KADDR(PADDR(pages)); for (size_t i = 0; i < npage - nbase; i++) { SetPageReserved(pages + i); } uintptr_t freemem = PADDR((uintptr_t)pages + sizeof(struct Page) * (npage - nbase)); mem_begin = ROUNDUP(freemem, PGSIZE); mem_end = ROUNDDOWN(mem_end-1, PGSIZE); if (freemem < mem_end) { init_memmap(pa2page(mem_begin), (mem_end - mem_begin) / PGSIZE); } } static void enable_paging(void) { lcr3(boot_cr3); } // boot_alloc_page - allocate one page using pmm->alloc_pages(1) // return value: the kernel virtual address of this allocated page // note: this function is used to get the memory for PDT(Page Directory // Table)&PT(Page Table) static void *boot_alloc_page(void) { struct Page *p = alloc_page(); if (p == NULL) { panic("boot_alloc_page failed.\n"); } return page2kva(p); } // pmm_init - setup a pmm to manage physical memory, build PDT&PT to setup // paging mechanism // - check the correctness of pmm & paging mechanism, print PDT&PT void pmm_init(void) { // We need to alloc/free the physical memory (granularity is 4KB or other // size). // So a framework of physical memory manager (struct pmm_manager)is defined // in pmm.h // First we should init a physical memory manager(pmm) based on the // framework. // Then pmm can alloc/free the physical memory. // Now the first_fit/best_fit/worst_fit/buddy_system pmm are available. init_pmm_manager(); // detect physical memory space, reserve already used memory, // then use pmm->init_memmap to create free page list page_init(); // use pmm->check to verify the correctness of the alloc/free function in a // pmm check_alloc_page(); // create boot_pgdir, an initial page directory(Page Directory Table, PDT) extern char boot_page_table[]; boot_pgdir = (pte_t*)boot_page_table; boot_cr3 = PADDR(boot_pgdir); cprintf("nr_free_pages = %d\n",nr_free_pages()); check_pgdir(); static_assert(KERNBASE % PTSIZE == 0 && KERNTOP % PTSIZE == 0); enable_paging(); // now the basic virtual memory map(see memalyout.h) is established. // check the correctness of the basic virtual memory map. check_boot_pgdir(); } // get_pte - get pte and return the kernel virtual address of this pte for la // - if the PT contians this pte didn&#39;t exist, alloc a page for PT // parameter: // pgdir: the kernel virtual base address of PDT // la: the linear address need to map // create: a logical value to decide if alloc a page for PT // return vaule: the kernel virtual address of this pte pte_t *get_pte(pde_t *pgdir, uintptr_t la, bool create) { /* * If you need to visit a physical address, please use KADDR() * please read pmm.h for useful macros * * Maybe you want help comment, BELOW comments can help you finish the code * * Some Useful MACROs and DEFINEs, you can use them in below implementation. * MACROs or Functions: * PDX(la) = the index of page directory entry of VIRTUAL ADDRESS la. * KADDR(pa) : takes a physical address and returns the corresponding * kernel virtual address. * set_page_ref(page,1) : means the page be referenced by one time * page2pa(page): get the physical address of memory which this (struct * Page *) page manages * struct Page * alloc_page() : allocation a page * memset(void *s, char c, size_t n) : sets the first n bytes of the * memory area pointed by s * to the specified value c. * DEFINEs: * PTE_P 0x001 // page table/directory entry * flags bit : Present * PTE_W 0xc00 // page table/directory entry * flags bit : Writeable * PTE_U 0x8800 // page table/directory entry * flags bit : User can access */ pde_t *pdep2 = &pgdir[PDX2(la)]; if (!(*pdep2 & PTE_V)) { struct Page *page; if (!create || (page = alloc_page()) == NULL) { return NULL; } page_ref_inc(page); uintptr_t pa = page2pa(page); memset(KADDR(pa), 0, PGSIZE); *pdep2 = pte_create(page2ppn(page), PTE_U | PTE_V); } pde_t *pdep1 = &((pde_t *)KADDR(PDE_ADDR(*pdep2)))[PDX1(la)]; if (!(*pdep1 & PTE_V)) { struct Page *page; if (!create || (page = alloc_page()) == NULL) { return NULL; } page_ref_inc(page); uintptr_t pa = page2pa(page); memset(KADDR(pa), 0, PGSIZE); *pdep1 = pte_create(page2ppn(page), PTE_U | PTE_V); } pde_t *pdep0 = &((pde_t *)KADDR(PDE_ADDR(*pdep1)))[PDX0(la)]; if (!(*pdep0 & PTE_V)) { struct Page *page; if (!create || (page = alloc_page()) == NULL) { return NULL; } // page_ref_inc(page); uintptr_t pa = page2pa(page); memset(KADDR(pa), 0, PGSIZE); *pdep0 = pte_create(page2ppn(page), PTE_U | PTE_V); } return &((pte_t *)KADDR(PDE_ADDR(*pdep0)))[PTX(la)]; } // get_page - get related Page struct for linear address la using PDT pgdir struct Page *get_page(pde_t *pgdir, uintptr_t la, pte_t **ptep_store) { pte_t *ptep = get_pte(pgdir, la, 0); if (ptep_store != NULL) { *ptep_store = ptep; } if (ptep != NULL && *ptep & PTE_V) { return pte2page(*ptep); } return NULL; } // page_remove_pte - free an Page sturct which is related linear address la // - and clean(invalidate) pte which is related linear address la // note: PT is changed, so the TLB need to be invalidate static inline void page_remove_pte(pde_t *pgdir, uintptr_t la, pte_t *ptep) { /* * * Please check if ptep is valid, and tlb must be manually updated if * mapping is updated * * Maybe you want help comment, BELOW comments can help you finish the code * * Some Useful MACROs and DEFINEs, you can use them in below implementation. * MACROs or Functions: * struct Page *page pte2page(*ptep): get the according page from the * value of a ptep * free_page : free a page * page_ref_dec(page) : decrease page->ref. NOTICE: ff page->ref == 0 , * then this page should be free. * tlb_invalidate(pde_t *pgdir, uintptr_t la) : Invalidate a TLB entry, * but only if the page tables being * edited are the ones currently in use by the * processor. * DEFINEs: * PTE_P 0x001 // page table/directory entry * flags bit : Present */ if (*ptep & PTE_V) { //(1) check if this page table entry is struct Page *page = pte2page(*ptep); //(2) find corresponding page to pte page_ref_dec(page); //(3) decrease page reference if (page_ref(page) == 0) { //(4) and free this page when page reference reachs 0 free_page(page); } *ptep = 0; //(5) clear second page table entry tlb_invalidate(pgdir, la); //(6) flush tlb } } void page_remove_ptx(pde_t *pgdir, uintptr_t la) { pte_t *pte = &((pte_t *)KADDR(PDE_ADDR(*pgdir)))[PTX(la)]; if (!(*pte & PTE_V)){ return; } page_remove_pte(NULL, la, pte); //todo panic("Not Implemented!\n"); } void page_remove_pdx0(pde_t *pgdir, uintptr_t la) { pde_t *pdep0 = &((pde_t *)KADDR(PDE_ADDR(*pgdir)))[PDX0(la)]; if (!(*pdep0 & PTE_V)){ return; } page_remove_ptx(pdep0, la); //lab6 todo panic("Not Implemented!\n"); } void page_remove_pdx1(pde_t *pgdir, uintptr_t la) { pde_t *pdep1 = &((pde_t *)KADDR(PDE_ADDR(*pgdir)))[PDX1(la)]; if (!(*pdep1 & PTE_V)){ return; } page_remove_pdx0(pdep1, la); //lab6 todo panic("Not Implemented!\n"); } void page_remove_pdx2(pde_t *pgdir, uintptr_t la) { pde_t *pdep2 = &pgdir[PDX2(la)]; if (!(*pdep2 & PTE_V)){ return; } page_remove_pdx1(pdep2, la); //lab6 todo panic("Not Implemented!\n"); } // page_remove - free an Page which is related linear address la and has an // validated pte void page_remove(pde_t *pgdir, uintptr_t la) { page_remove_pdx2(pgdir,la); } // page_insert - build the map of phy addr of an Page with the linear addr la // paramemters: // pgdir: the kernel virtual base address of PDT // page: the Page which need to map // la: the linear address need to map // perm: the permission of this Page which is setted in related pte // return value: always 0 // note: PT is changed, so the TLB need to be invalidate int page_insert(pde_t *pgdir, struct Page *page, uintptr_t la, uint32_t perm) { pte_t *ptep = get_pte(pgdir, la, 1); if (ptep == NULL) { return -E_NO_MEM; } page_ref_inc(page); if (*ptep & PTE_V) { struct Page *p = pte2page(*ptep); if (p == page) { page_ref_dec(page); } else { page_remove_pte(pgdir, la, ptep); } } else{ page_ref_inc(kva2page(ptep)); } *ptep = pte_create(page2ppn(page), PTE_V | perm); tlb_invalidate(pgdir, la); return 0; } // invalidate a TLB entry, but only if the page tables being // edited are the ones currently in use by the processor. void tlb_invalidate(pde_t *pgdir, uintptr_t la) { flush_tlb(); } // pgdir_alloc_page - call alloc_page & page_insert functions to // - allocate a page size memory & setup an addr map // - pa<->la with linear address la and the PDT pgdir struct Page *pgdir_alloc_page(pde_t *pgdir, uintptr_t la, uint32_t perm) { struct Page *page = alloc_page(); if (page != NULL) { if (page_insert(pgdir, page, la, perm) != 0) { free_page(page); return NULL; } } return page; } static void check_alloc_page(void) { pmm_manager->check(); cprintf("check_alloc_page() succeeded!\n"); } static void check_pgdir(void) { // assert(npage <= KMEMSIZE / PGSIZE); size_t nr_free_pages_store = nr_free_pages(); assert(npage <= KERNTOP / PGSIZE); assert(boot_pgdir != NULL && (uint32_t)PGOFF(boot_pgdir) == 0); assert(get_page(boot_pgdir, 0x0, NULL) == NULL); struct Page *p1, *p2; p1 = alloc_page(); assert(page_insert(boot_pgdir, p1, 0x0, 0) == 0); pte_t *ptep; assert((ptep = get_pte(boot_pgdir, 0x0, 0)) != NULL); assert(pte2page(*ptep) == p1); assert(page_ref(p1) == 1); ptep = (pte_t *)KADDR(PDE_ADDR(boot_pgdir[0])); ptep = (pte_t *)KADDR(PDE_ADDR(ptep[0])); ptep = (pte_t *)KADDR(PDE_ADDR(ptep[0])) + 1; assert(get_pte(boot_pgdir, PGSIZE, 0) == ptep); p2 = alloc_page(); assert(page_insert(boot_pgdir, p2, PGSIZE, PTE_U | PTE_W) == 0); assert((ptep = get_pte(boot_pgdir, PGSIZE, 0)) != NULL); assert(*ptep & PTE_U); assert(*ptep & PTE_W); assert(boot_pgdir[0] & PTE_U); assert(page_ref(p2) == 1); assert(page_insert(boot_pgdir, p1, PGSIZE, 0) == 0); assert(page_ref(p1) == 2); assert(page_ref(p2) == 0); assert((ptep = get_pte(boot_pgdir, PGSIZE, 0)) != NULL); assert(pte2page(*ptep) == p1); assert((*ptep & PTE_U) == 0); cprintf(" page_remove 0 \n"); page_remove(boot_pgdir, 0x0); assert(page_ref(p1) == 1); assert(page_ref(p2) == 0); cprintf(" page_remove PGSIZE \n"); page_remove(boot_pgdir, PGSIZE); assert(page_ref(p1) == 0); assert(page_ref(p2) == 0); //assert(page_ref(pde2page(boot_pgdir[0])) == 1); assert(page_ref(pde2page(boot_pgdir[0])) == 0); #if 0 //free pgdir pte_t *ptep2 = (pte_t *)KADDR(PDE_ADDR(boot_pgdir[0])); pte_t *ptep1 = (pte_t *)KADDR(PDE_ADDR(ptep2[0])); pte_t *ptep0 = (pte_t *)KADDR(PDE_ADDR(ptep1[0])); free_page(kva2page((void *)ptep0)); free_page(kva2page((void *)ptep1)); free_page(kva2page((void *)ptep2)); boot_pgdir[0] = 0; #endif assert(nr_free_pages_store == nr_free_pages()); cprintf("check_pgdir() succeeded!\n"); } static void check_boot_pgdir(void) { pte_t *ptep; int i; assert(boot_pgdir[0] == 0); size_t nr_free_pages_store = nr_free_pages(); struct Page *p; p = alloc_page(); assert(page_insert(boot_pgdir, p, 0x100, PTE_W | PTE_R) == 0); assert(page_ref(p) == 1); assert(page_insert(boot_pgdir, p, 0x100 + PGSIZE, PTE_W | PTE_R) == 0); assert(page_ref(p) == 2); const char *str = "ucore: Hello world!!"; strcpy((void *)0x100, str); assert(strcmp((void *)0x100, (void *)(0x100 + PGSIZE)) == 0); *(char *)(page2kva(p) + 0x100) = &#39;\0&#39;; assert(strlen((const char *)0x100) == 0); page_remove(boot_pgdir, 0x100); page_remove(boot_pgdir, 0x100 + PGSIZE); #if 0 //free pgdir pte_t *ptep2 = (pte_t *)KADDR(PDE_ADDR(boot_pgdir[0])); pte_t *ptep1 = (pte_t *)KADDR(PDE_ADDR(ptep2[0])); pte_t *ptep0 = (pte_t *)KADDR(PDE_ADDR(ptep1[0])); free_page(kva2page((void *)ptep0)); free_page(kva2page((void *)ptep1)); free_page(kva2page((void *)ptep2)); boot_pgdir[0] = 0; #endif assert(nr_free_pages_store == nr_free_pages()); cprintf("check_boot_pgdir() succeeded!\n"); } void *kmalloc(size_t n) { void *ptr = NULL; struct Page *base = NULL; assert(n > 0 && n < 1024 * 0124); int num_pages = (n + PGSIZE - 1) / PGSIZE; base = alloc_pages(num_pages); assert(base != NULL); ptr = page2kva(base); return ptr; } void kfree(void *ptr, size_t n) { assert(n > 0 && n < 1024 * 0124); assert(ptr != NULL); struct Page *base = NULL; int num_pages = (n + PGSIZE - 1) / PGSIZE; base = kva2page(ptr); free_pages(base, num_pages); }完成实验并给我要修改的代码部分
最新发布
11-02
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值