【xv6】Lab: locks

Memory allocator

My Solution:

  • kmem.freelist -> kmem.freelist[NCPU], kmem.lock -> kmem.lock[NCPU]
  • kinit initialize all cpu’s kmem.freelist[NCPU] and kmem.lock[NCPU], so we should refactor kfree which divided kfree into two functions: _kfree(int core, void *pa) and kfree(void *pd), so that freerange() could pass the argument coreid to _kfree()
  • We should also refactor kalloc which divied kalloc into two functions: _kalloc(int core) and kalloc(void), so that when current cpu’s freelist is empty, we can steal from other cpu’s freelist.

Patch

diff --git a/kernel/kalloc.c b/kernel/kalloc.c
old mode 100644
new mode 100755
index fa6a0ac..484bf9b
--- a/kernel/kalloc.c
+++ b/kernel/kalloc.c
@@ -9,7 +9,8 @@
 #include "riscv.h"
 #include "defs.h"
 
-void freerange(void *pa_start, void *pa_end);
+void freerange(int core, void *pa_start, void *pa_end);
+void _kfree(int core, void *pa);
 
 extern char end[]; // first address after kernel.
                    // defined by kernel.ld.
@@ -19,24 +20,46 @@ struct run {
 };
 
 struct {
-  struct spinlock lock;
-  struct run *freelist;
+  struct spinlock lock[NCPU];
+  struct run *freelist[NCPU];
 } kmem;
 
 void
 kinit()
 {
-  initlock(&kmem.lock, "kmem");
-  freerange(end, (void*)PHYSTOP);
+  char buf[8];
+  int nbuf;
+  uint64 nmem, membeg, memend;
+
+  nmem = PHYSTOP - (uint64)end;
+  if (nmem % NCPU != 0)
+    panic("nmem % NCPU != 0");
+
+  nmem /= NCPU;
+  membeg = (uint64) end;
+
+  for(int i = 0; i < NCPU; i++){
+    initlock(&kmem.lock[i], "kmem");
+    memend = membeg + nmem;
+    freerange(i, (void*) membeg, (void*) memend);
+    membeg = memend;
+  }
 }
 
 void
-freerange(void *pa_start, void *pa_end)
+freerange(int core, void *pa_start, void *pa_end)
 {
   char *p;
   p = (char*)PGROUNDUP((uint64)pa_start);
   for(; p + PGSIZE <= (char*)pa_end; p += PGSIZE)
-    kfree(p);
+    _kfree(core, p);
 }
 
 // Free the page of physical memory pointed at by v,
@@ -44,7 +67,7 @@ freerange(void *pa_start, void *pa_end)
 // call to kalloc().  (The exception is when
 // initializing the allocator; see kinit above.)
 void
-kfree(void *pa)
+_kfree(int core, void *pa)
 {
   struct run *r;
 
@@ -56,27 +79,53 @@ kfree(void *pa)
 
   r = (struct run*)pa;
 
-  acquire(&kmem.lock);
-  r->next = kmem.freelist;
-  kmem.freelist = r;
-  release(&kmem.lock);
+  acquire(&kmem.lock[core]);
+  r->next = kmem.freelist[core];
+  kmem.freelist[core] = r;
+  release(&kmem.lock[core]);
+}
+
+void
+kfree(void *pa)
+{
+  int core = cpuid();
+  _kfree(core, pa);
 }
 
 // Allocate one 4096-byte page of physical memory.
 // Returns a pointer that the kernel can use.
 // Returns 0 if the memory cannot be allocated.
-void *
-kalloc(void)
+struct run *
+_kalloc(int core)
 {
   struct run *r;
 
-  acquire(&kmem.lock);
-  r = kmem.freelist;
+  acquire(&kmem.lock[core]);
+  r = kmem.freelist[core];
   if(r)
-    kmem.freelist = r->next;
-  release(&kmem.lock);
+    kmem.freelist[core] = r->next;
+  release(&kmem.lock[core]);
 
   if(r)
     memset((char*)r, 5, PGSIZE); // fill with junk
-  return (void*)r;
+  return r;
 }
+
+void *
+kalloc(void)
+{
+  int core = cpuid();
+  struct run *r = _kalloc(core);
+  if (!r) {
+    for (int i = 0; i < NCPU; i++) {
+      if (i == core)
+        continue;
+
+      r = _kalloc(i);
+      if (r)
+        break;
+    }
+  }
+
+  return (void*) r;
+}

Buffer cache

My Solution

  • Add new struct “bucket”, it contained two field: dummy head of bcache linked-list / spinlock to protect bcache list
  • Add fixed array of hash table bucket
  • Remove bcache global lock, bcache.head, buf.prev what we are no longer used
  • Add new field “timestamp” to struct buf and initialize it as UINT_MAX
  • Modify binit():
    • Distribute the bcache buffers evenly into each hash bucket
  • Modify brelse():
    • Remove the code about return entry to the bcache global linked-list
    • Just update buf->timestamp as global variable “ticks” when “refcnt == 0”
  • Modify bget():
    • Use least recently used policy when we perform bget() to find a free block
    • When we couldn’t find a free block in local bucket bcache list, we should steal from other bucket
    • Add a new function bsteal() to perform steal bcache buffer from other bucket
  • Modify bpin() and bunpin():
    • It’s ok not use bucket->lock to protect

Patch

---
 kernel/bio.c | 164 ++++++++++++++++++++++++++++++++++-----------------
 kernel/buf.h |   2 +-
 2 files changed, 112 insertions(+), 54 deletions(-)

diff --git a/kernel/bio.c b/kernel/bio.c
index 60d91a6..ef9301c 100755
--- a/kernel/bio.c
+++ b/kernel/bio.c
@@ -23,33 +23,88 @@
 #include "fs.h"
 #include "buf.h"
 
+#define NBUCKET 13
+#define initbuf(b)              \
+  do {                          \
+    b->dev = dev;               \
+    b->blockno = blockno;       \
+    b->valid = 0;               \
+    b->refcnt = 1;              \
+    b->ts = ticks;              \
+  } while(0);
+
 struct {
-  struct spinlock lock;
   struct buf buf[NBUF];
-
-  // Linked list of all buffers, through prev/next.
-  // Sorted by how recently the buffer was used.
-  // head.next is most recent, head.prev is least.
-  struct buf head;
+  struct bucket{
+    struct buf dummy;
+    struct spinlock lock;
+  } buckets[NBUCKET];
 } bcache;
 
+static inline struct bucket *
+getbucket(uint blockno)
+{
+  return bcache.buckets + blockno % NBUCKET;
+}
+
+static inline void
+bucket_addbuf(struct bucket *bucket, struct buf *b)
+{
+  b->next = bucket->dummy.next;
+  bucket->dummy.next = b;
+}
+
 void
 binit(void)
 {
   struct buf *b;
+  struct bucket *bucket;
+  uint i, j, end, nr = NBUF / NBUCKET;
+  
+  for (i = 0; i < NBUCKET; ++i){
+    bucket = bcache.buckets + i;
+    bucket->dummy.next = &bucket->dummy;
+
+    end = (i != NBUCKET-1) ? (i+1)*nr : NBUF;
+    for (j = i * nr; j < end; ++j){
+      if (j >= NBUF)
+        panic("j >= NBUF");
+      b = &bcache.buf[j];
+      bucket_addbuf(bucket, b);
+      b->ts = 0xffffffff;
+      initsleeplock(&b->lock, "buffer");
+    }
+  }
+}
 
-  initlock(&bcache.lock, "bcache");
-
-  // Create linked list of buffers
-  bcache.head.prev = &bcache.head;
-  bcache.head.next = &bcache.head;
-  for(b = bcache.buf; b < bcache.buf+NBUF; b++){
-    b->next = bcache.head.next;
-    b->prev = &bcache.head;
-    initsleeplock(&b->lock, "buffer");
-    bcache.head.next->prev = b;
-    bcache.head.next = b;
+static struct buf*
+bsteal(struct bucket *notthisone)
+{
+  struct bucket *cur;
+  struct buf *b, *pre;
+
+  for (cur = bcache.buckets; cur != bcache.buckets+NBUCKET; ++cur) {
+    if (cur == notthisone)
+      continue;
+
+    acquire(&cur->lock);
+    b = cur->dummy.next, pre = &cur->dummy;
+    if (b == pre) {
+      release(&cur->lock);
+      continue;
+    }
+
+    for (; b != &cur->dummy; b = b->next, pre = pre->next) {
+      if (b->refcnt == 0) {
+        pre->next = b->next;
+        release(&cur->lock);
+        b->next = 0;
+        return b;
+      }
+    }
+    release(&cur->lock);
   }
+  return 0;
 }
 
 // Look through buffer cache for block on device dev.
@@ -58,34 +113,50 @@ binit(void)
 static struct buf*
 bget(uint dev, uint blockno)
 {
-  struct buf *b;
-
-  acquire(&bcache.lock);
+  struct buf *b, *bfree = 0;
+  struct bucket *bucket = getbucket(blockno);
+  uint ts = 0xffffffff;
 
   // Is the block already cached?
-  for(b = bcache.head.next; b != &bcache.head; b = b->next){
+  acquire(&bucket->lock);
+  for(b = bucket->dummy.next; b != &bucket->dummy; b = b->next){
     if(b->dev == dev && b->blockno == blockno){
       b->refcnt++;
-      release(&bcache.lock);
+      b->ts = ticks;
+      release(&bucket->lock);
       acquiresleep(&b->lock);
       return b;
+    } else if (b->refcnt == 0 && b->ts < ts) {
+        ts = b->ts;
+        bfree = b;
     }
   }
 
   // Not cached.
   // Recycle the least recently used (LRU) unused buffer.
-  for(b = bcache.head.prev; b != &bcache.head; b = b->prev){
-    if(b->refcnt == 0) {
-      b->dev = dev;
-      b->blockno = blockno;
-      b->valid = 0;
-      b->refcnt = 1;
-      release(&bcache.lock);
-      acquiresleep(&b->lock);
-      return b;
+  if (bfree) {
+    initbuf(bfree);
+  } else {
+    release(&bucket->lock); 
+    bfree = bsteal(bucket);
+    if (!bfree || bfree->refcnt != 0)
+      panic("bget: no free buffer || refcnt != 0");
+    initbuf(bfree);
+    acquire(&bucket->lock);
+    for(b = bucket->dummy.next; b != &bucket->dummy; b = b->next){
+      // another thread may steal a buffer to store the same blockno
+      // so we need to check again, if the blockno is already cached
+      // we should set bfree->refcnt = 0
+      if(b->dev == dev && b->blockno == blockno)
+        bfree->refcnt = 0;
     }
+
+    // add bfree to the bucket is fine whatever the blockno is cached or not 
+    bucket_addbuf(bucket, bfree);
   }
-  panic("bget: no buffers");
+  release(&bucket->lock);
+  acquiresleep(&bfree->lock);
+  return bfree;
 }
 
 // Return a locked buf with the contents of the indicated block.
@@ -121,33 +192,20 @@ brelse(struct buf *b)
 
   releasesleep(&b->lock);
 
-  acquire(&bcache.lock);
+  struct bucket *bucket = getbucket(b->blockno);
+  acquire(&bucket->lock);
   b->refcnt--;
-  if (b->refcnt == 0) {
-    // no one is waiting for it.
-    b->next->prev = b->prev;
-    b->prev->next = b->next;
-    b->next = bcache.head.next;
-    b->prev = &bcache.head;
-    bcache.head.next->prev = b;
-    bcache.head.next = b;
-  }
-  
-  release(&bcache.lock);
+  if (b->refcnt == 0)
+    b->ts = ticks;
+  release(&bucket->lock);
 }
 
-void
+inline void
 bpin(struct buf *b) {
-  acquire(&bcache.lock);
   b->refcnt++;
-  release(&bcache.lock);
 }
 
-void
+inline void
 bunpin(struct buf *b) {
-  acquire(&bcache.lock);
   b->refcnt--;
-  release(&bcache.lock);
-}
-
-
+}
\ No newline at end of file
diff --git a/kernel/buf.h b/kernel/buf.h
index 4616e9e..8283e86 100755
--- a/kernel/buf.h
+++ b/kernel/buf.h
@@ -5,8 +5,8 @@ struct buf {
   uint blockno;
   struct sleeplock lock;
   uint refcnt;
-  struct buf *prev; // LRU cache list
   struct buf *next;
   uchar data[BSIZE];
+  uint ts; // timestamp
 };
 
-- 
2.25.1

Verify

# make grade
== Test running kalloctest == 
$ make qemu-gdb
(41.1s) 
== Test   kalloctest: test1 == 
  kalloctest: test1: OK 
== Test   kalloctest: test2 == 
  kalloctest: test2: OK 
== Test kalloctest: sbrkmuch == 
$ make qemu-gdb
kalloctest: sbrkmuch: OK (3.9s) 
== Test running bcachetest == 
$ make qemu-gdb
(2.7s) 
== Test   bcachetest: test0 == 
  bcachetest: test0: OK 
== Test   bcachetest: test1 == 
  bcachetest: test1: OK 
== Test usertests == 
$ make qemu-gdb
usertests: OK (48.0s) 
== Test time == 
time: OK 
Score: 70/70

Note

We should perform make clean every time before we run our lab to make sure filesystem and block data be cleaned

bio.c

// Buffer cache.
//
// The buffer cache is a linked list of buf structures holding
// cached copies of disk block contents.  Caching disk blocks
// in memory reduces the number of disk reads and also provides
// a synchronization point for disk blocks used by multiple processes.
//
// Interface:
// * To get a buffer for a particular disk block, call bread.
// * After changing buffer data, call bwrite to write it to disk.
// * When done with the buffer, call brelse.
// * Do not use the buffer after calling brelse.
// * Only one process at a time can use a buffer,
//     so do not keep them longer than necessary.


#include "types.h"
#include "param.h"
#include "spinlock.h"
#include "sleeplock.h"
#include "riscv.h"
#include "defs.h"
#include "fs.h"
#include "buf.h"

#define NBUCKET 13
#define initbuf(b)              \
  do {                          \
    b->dev = dev;               \
    b->blockno = blockno;       \
    b->valid = 0;               \
    b->refcnt = 1;              \
    b->ts = ticks;              \
  } while(0);

struct {
  struct buf buf[NBUF];
  struct bucket{
    struct buf dummy;
    struct spinlock lock;
  } buckets[NBUCKET];
} bcache;

static inline struct bucket *
getbucket(uint blockno)
{
  return bcache.buckets + blockno % NBUCKET;
}

static inline void
bucket_addbuf(struct bucket *bucket, struct buf *b)
{
  b->next = bucket->dummy.next;
  bucket->dummy.next = b;
}

void
binit(void)
{
  struct buf *b;
  struct bucket *bucket;
  uint i, j, end, nr = NBUF / NBUCKET;
  
  for (i = 0; i < NBUCKET; ++i){
    bucket = bcache.buckets + i;
    bucket->dummy.next = &bucket->dummy;

    end = (i != NBUCKET-1) ? (i+1)*nr : NBUF;
    for (j = i * nr; j < end; ++j){
      if (j >= NBUF)
        panic("j >= NBUF");
      b = &bcache.buf[j];
      bucket_addbuf(bucket, b);
      b->ts = 0xffffffff;
      initsleeplock(&b->lock, "buffer");
    }
  }
}

static struct buf*
bsteal(struct bucket *notthisone)
{
  struct bucket *cur;
  struct buf *b, *pre;

  for (cur = bcache.buckets; cur != bcache.buckets+NBUCKET; ++cur) {
    if (cur == notthisone)
      continue;

    acquire(&cur->lock);
    b = cur->dummy.next, pre = &cur->dummy;
    if (b == pre) {
      release(&cur->lock);
      continue;
    }

    for (; b != &cur->dummy; b = b->next, pre = pre->next) {
      if (b->refcnt == 0) {
        pre->next = b->next;
        release(&cur->lock);
        b->next = 0;
        return b;
      }
    }
    release(&cur->lock);
  }
  return 0;
}

// Look through buffer cache for block on device dev.
// If not found, allocate a buffer.
// In either case, return locked buffer.
static struct buf*
bget(uint dev, uint blockno)
{
  struct buf *b, *bfree = 0;
  struct bucket *bucket = getbucket(blockno);
  uint ts = 0xffffffff;

  // Is the block already cached?
  acquire(&bucket->lock);
  for(b = bucket->dummy.next; b != &bucket->dummy; b = b->next){
    if(b->dev == dev && b->blockno == blockno){
      b->refcnt++;
      b->ts = ticks;
      release(&bucket->lock);
      acquiresleep(&b->lock);
      return b;
    } else if (b->refcnt == 0 && b->ts < ts) {
        ts = b->ts;
        bfree = b;
    }
  }

  // Not cached.
  // Recycle the least recently used (LRU) unused buffer.
  if (bfree) {
    initbuf(bfree);
  } else {
    release(&bucket->lock); 
    bfree = bsteal(bucket);
    if (!bfree || bfree->refcnt != 0)
      panic("bget: no free buffer || refcnt != 0");
    initbuf(bfree);
    acquire(&bucket->lock);
    for(b = bucket->dummy.next; b != &bucket->dummy; b = b->next){
      // another thread may steal a buffer to store the same blockno
      // so we need to check again, if the blockno is already cached
      // we should set bfree->refcnt = 0
      if(b->dev == dev && b->blockno == blockno)
        bfree->refcnt = 0;
    }

    // add bfree to the bucket is fine whatever the blockno is cached or not 
    bucket_addbuf(bucket, bfree);
  }
  release(&bucket->lock);
  acquiresleep(&bfree->lock);
  return bfree;
}

// Return a locked buf with the contents of the indicated block.
struct buf*
bread(uint dev, uint blockno)
{
  struct buf *b;

  b = bget(dev, blockno);
  if(!b->valid) {
    virtio_disk_rw(b, 0);
    b->valid = 1;
  }
  return b;
}

// Write b's contents to disk.  Must be locked.
void
bwrite(struct buf *b)
{
  if(!holdingsleep(&b->lock))
    panic("bwrite");
  virtio_disk_rw(b, 1);
}

// Release a locked buffer.
// Move to the head of the most-recently-used list.
void
brelse(struct buf *b)
{
  if(!holdingsleep(&b->lock))
    panic("brelse");

  releasesleep(&b->lock);

  struct bucket *bucket = getbucket(b->blockno);
  acquire(&bucket->lock);
  b->refcnt--;
  if (b->refcnt == 0)
    b->ts = ticks;
  release(&bucket->lock);
}

inline void
bpin(struct buf *b) {
  b->refcnt++;
}

inline void
bunpin(struct buf *b) {
  b->refcnt--;
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值