MIT6.828_HW10_Bigger file for xv6

xv6文件系统扩展
本文探讨了如何通过添加二级索引节点来扩大xv6文件系统的文件大小限制,从原有的140个扇区扩展至16523个扇区,详细介绍了修改过程中的关键步骤和技术细节。

MIT6.828_HW10_Bigger file for xv6

当前 xv6 文件大小限制在 140 个扇区。直接索引节点 12 个, 以及一个一级索引节点,其指向一个 sector,可包含 512/4 = 128个扇区。即总和为12+128 = 140个扇区。我们需要为xv6的文件节点添加一个二级索引节点,其包含128个一级索引节点的地址,每个一级索引节点又包含了128数据扇区。最终一个文件大小将增加到 16523 个扇区,大约8.5M。

The format of an on-disk inode is defined by struct dinode in fs.h. You’re particularly interested in NDIRECT, NINDIRECT, MAXFILE, and the addrs[] element of struct dinode. xv6 标准 inode 如下图所示。
inode 结构
在读写文件时,会调用 bmap()(in fs.c)。在写时,bmap 分配存储文件所需的 blocks并且如果需要存储块地址,还会分配一个 indirect block。

bmap() 处理两种页号. The bn argument is a “logical block” – a block number relative to the start of the file. The block numbers in ip->addrs[], and the argument to bread(), are disk block numbers. You can view bmap() as mapping a file’s logical block numbers into disk block numbers.

实现过程

修改 param.h 文件, 将 #define FSSIZE 1000改为

#define FSSIZE       20000

Download big.c into your xv6 directory, add it to the UPROGS list, start up xv6, and run big. It creates as big a file as xv6 will let it, and reports the resulting size. It should say 140 sectors.
做完预处理工作后运行big,应该有以下输出。

$ big
.
wrote 140 sectors
done; ok

You’ll have to have only 11 direct blocks, rather than 12, to make room for your new doubly-indirect block。 之所以这么 arrange, 可以避免我们修改 Inode 结点 addr数组的个数,索引结点总数依然是NDIRECT + 1。不过后来发现,修改NDIRECT,程序会更加清晰。即相应需要修改两处

// 1
#define NDIRECT 11
#define NINDIRECT (BSIZE / sizeof(uint))
// DOUBLE INDIRECT
#define NDINDIRECT (NINDIRECT * NINDIRECT)
#define MAXFILE (NDIRECT + NINDIRECT + NDINDIRECT)

// 2
// in-memory copy of an inode
struct inode {
  ...
  uint addrs[NDIRECT+ 2];
};

// 3
// On-disk inode structure
struct dinode {
  short type;           // File type
  ...          // Size of file (bytes)
  uint addrs[NDIRECT + 2];   // Data block addresses
};

buf 结构体。

struct buf {
  int flags;
  uint dev;
  uint blockno;
  struct sleeplock lock;
  uint refcnt;
  struct buf *prev; // LRU cache list
  struct buf *next;
  struct buf *qnext; // disk queue
  uchar data[BSIZE];
};

最终实现代码并不难写出,仿照一级索引结点写法使用相应的函数调用。[Note]需要注意 brelse(bp)的时机。详细过程可以查看注释。

static uint
bmap(struct inode *ip, uint bn)
{
  uint addr, *a;
  struct buf *bp;
  struct buf *bp2;
  // 直接索引结点数目 bn= 0~10
  if(bn < NDIRECT){
  	// 创建直接索引
    if((addr = ip->addrs[bn]) == 0)
      ip->addrs[bn] = addr = balloc(ip->dev);
    return addr;
  }
  bn -= NDIRECT;
  
  // #define NINDIRECT (BSIZE / sizeof(uint))  BSIZE = 512
  if(bn < NINDIRECT){
  	// 一级索引
    // Load indirect block, allocating if necessary.
    if((addr = ip->addrs[NDIRECT]) == 0)
      ip->addrs[NDIRECT] = addr = balloc(ip->dev);
	
    bp = bread(ip->dev, addr);
    a = (uint*)bp->data;
    if((addr = a[bn]) == 0){
      a[bn] = addr = balloc(ip->dev);
      log_write(bp);
    }
    brelse(bp);
    return addr;
  }
  
  bn -= NINDIRECT;
  // 二级索引
  if (bn < NDINDIRECT) {
  	// 根结点
    if((addr = ip->addrs[NDIRECT+1]) == 0)
		ip->addrs[NDIRECT+1] = addr = balloc(ip->dev);

	bp = bread(ip->dev, addr);
    // 指向一级索引
    a = (uint *)bp->data;
    if ((addr = a[bn/NINDIRECT]) == 0) {
       a[bn/NINDIRECT] = addr = balloc(ip->dev);
	   log_write(bp);
	}
	
	bp2 = bread(ip->dev, addr);
	// 二级页表
	a = (uint *)bp2->data;
	if ((addr = a[bn%NINDIRECT]) == 0) {
	  a[bn%NINDIRECT] = addr = balloc(ip->dev);
      log_write(bp2);
	}
	
	brelse(bp2);
	brelse(bp);
	return addr;
  }

  panic("bmap: out of range");
}

修改这段代码,将其改成可轮询计算目标路径下的每一副图片:import argparse import torch import torch.nn as nn from network.Math_Module import P, Q from network.decom import Decom import os import torchvision import torchvision.transforms as transforms from PIL import Image import time from utils import * def one2three(x): return torch.cat([x, x, x], dim=1).to(x) class Inference(nn.Module): def __init__(self, opts): super().__init__() self.opts = opts # loading decomposition model self.model_Decom_low = Decom() self.model_Decom_low = load_initialize(self.model_Decom_low, self.opts.Decom_model_low_path) # loading R; old_model_opts; and L model self.unfolding_opts, self.model_R, self.model_L= load_unfolding(self.opts.unfolding_model_path) # loading adjustment model self.adjust_model = load_adjustment(self.opts.adjust_model_path) self.P = P() self.Q = Q() transform = [ transforms.ToTensor(), ] self.transform = transforms.Compose(transform) print(self.model_Decom_low) print(self.model_R) print(self.model_L) print(self.adjust_model) #time.sleep(8) def unfolding(self, input_low_img): for t in range(self.unfolding_opts.round): if t == 0: # initialize R0, L0 P, Q = self.model_Decom_low(input_low_img) else: # update P and Q w_p = (self.unfolding_opts.gamma + self.unfolding_opts.Roffset * t) w_q = (self.unfolding_opts.lamda + self.unfolding_opts.Loffset * t) P = self.P(I=input_low_img, Q=Q, R=R, gamma=w_p) Q = self.Q(I=input_low_img, P=P, L=L, lamda=w_q) R = self.model_R(r=P, l=Q) L = self.model_L(l=Q) return R, L def lllumination_adjust(self, L, ratio): ratio = torch.ones(L.shape).cuda() * self.opts.ratio return self.adjust_model(l=L, alpha=ratio) def forward(self, input_low_img): if torch.cuda.is_available(): input_low_img = input_low_img.cuda() with torch.no_grad(): start = time.time() R, L = self.unfolding(input_low_img) High_L = self.lllumination_adjust(L, self.opts.ratio) I_enhance = High_L * R p_time = (time.time() - start) return I_enhance, p_time def run(self, low_img_path): file_name = os.path.basename(self.opts.img_path) name = file_name.split('.')[0] low_img = self.transform(Image.open(low_img_path)).unsqueeze(0) enhance, p_time = self.forward(input_low_img=low_img) if not os.path.exists(self.opts.output): os.makedirs(self.opts.output) save_path = os.path.join(self.opts.output, file_name.replace(name, "%s_%d_URetinexNet"%(name, self.opts.ratio))) np_save_TensorImg(enhance, save_path) print("================================= time for %s: %f============================"%(file_name, p_time)) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Configure') # specify your data path here! parser.add_argument('--img_path', type=str, default="./MSRS/MSRS/1_ADF.png") parser.add_argument('--output', type=str, default="./MSRS/output") # ratio are recommended to be 3-5, bigger ratio will lead to over-exposure parser.add_argument('--ratio', type=int, default=5) # model path parser.add_argument('--Decom_model_low_path', type=str, default="./ckpt/init_low.pth") parser.add_argument('--unfolding_model_path', type=str, default="./ckpt/unfolding.pth") parser.add_argument('--adjust_model_path', type=str, default="./ckpt/L_adjust.pth") parser.add_argument('--gpu_id', type=int, default=0) opts = parser.parse_args() for k, v in vars(opts).items(): print(k, v) os.environ['CUDA_VISIBLE_DEVICES'] = str(opts.gpu_id) model = Inference(opts).cuda() model.run(opts.img_path)
最新发布
07-27
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值