Linux内核中RAID5源码详解之写过程剖析(二)
继上一篇Linux内核中RAID5源码详解之写过程剖析(一),现在我们已经将bio添加到了相应的stripe_head中,记为sh,由于RAID5处理的基本单元就是stripe_head,所以接下来就是怎么处理这个含有bio的stripe_head过程。
上篇中make_request()
中最后调用release_stripe_plug()
处理sh,关于如何从release_stripe_plug()
到handle_stripe()
的这个流程,我的这两篇博文Linux 内核中RAID5源码详解之stripe_head的管理 和Linux 内核中RAID5源码详解之守护进程raid5d则详细的描述了这个流程的转换,好了,闲话不多说,现在我们就来揭开RAID5中处理stripe_head的主战场:handle_stripe()
!
这里我声明下,由于RAID5源码在设计时考虑的问题很全面,其中有很大一部分代码是为了实现其稳定性,所以我们再这里忽略这些负责容错的代码段,我们将注意力放在处理请求的代码部分,这样比较简单些。期间会有很多的状态转移,对stripe_head和 缓冲区dev状态不熟悉的可以参考我之前的博文Linux内核中RAID5的基本架构与数据结构解析。
Background
这里我们跟踪一个4KB的写请求,期间通过观察stripe_head的state和dev的flag来了解RAID5是怎么实现写请求的。
现在我们假设有1个4KB的写请求,已经添加到了stripe_head 0 的 0号盘上,具体状态如图:
根据上一篇Linux内核中RAID5源码详解之写过程剖析(一)中的实验环境,我们采用给出的形式化表达,那么make_request()
后有:
1.after make_request()
sh:STRIPE_HANDLE,STRIPE_PREREAD_ACTIVE
d0:R5_OVERWRITE
d1,d2,d3:NULL
pd_idx=3//3号盘为 parity 盘
Handle_stripe
实验背景交代完毕,接下来就是如何处理这个sh了。此时handle_stripe()
上场了,我们跟进handle_stripe()
的源码:
static void handle_stripe(struct stripe_head *sh)
{
struct stripe_head_state s;
struct r5conf *conf = sh->raid_conf;
int i;
int prexor;
int disks = sh->disks;
struct r5dev *pdev, *qdev;
printk("+++handle_stripe : stripe %llu\n",(unsigned long long)sh->sector);
clear_bit(STRIPE_HANDLE, &sh->state);//清除需要处理标志,因为现在是正在处理
if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {//设置正在处理标志
/* already being handled, ensure it gets handled
* again when current action finishes */
set_bit(STRIPE_HANDLE, &sh->state);
return;
}
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {//如果是同步请求的话,则进入
spin_lock(&sh->stripe_lock);
/* Cannot process 'sync' concurrently with 'discard' */
if (!test_bit(STRIPE_DISCARD, &sh->state) &&
test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
set_bit(STRIPE_SYNCING, &sh->state);
clear_bit(STRIPE_INSYNC, &sh->state);
clear_bit(STRIPE_REPLACED, &sh->state);
}
spin_unlock(&sh->stripe_lock);
}
clear_bit(STRIPE_DELAYED, &sh->state);//清除延迟处理标志
pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
"pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
(unsigned long long)sh->sector, sh->state,
atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
sh->check_state, sh->reconstruct_state);
analyse_stripe(sh, &s);//分析条带
这是handle_stripe()
的开头的一段代码,主要是为sh设置正在处理标志,同时分析sh。分析的函数为analyse_stripe(sh,&s)
,这里我们注意到函数的实参中有个s,定义如下:
struct stripe_head_state {
/* 'syncing' means that we need to read all devices, either
* to check/correct parity, or to reconstruct a missing device.
* 'replacing' means we are replacing one or more drives and
* the source is valid at this point so we don't need to
* read all devices, just the replacement targets.
*/
int syncing, expanding, expanded, replacing;
int locked, uptodate, to_read, to_write, failed, written;/*记录stripe_head结构中的dev中上锁的、数据最新的、未处理完的读请求、未处理完的写请求、处理失败的和已经处理完成的写请求数*/
int to_fill, compute, req_compute, non_overwrite;
int failed_num[2];
int p_failed, q_failed;
int dec_preread_active;
unsigned long ops_request;
struct bio *return_bi;
struct md_rdev *blocked_rdev;
int handle_bad_blocks;
};
这个stripe_head_state,则是很简单的将分析结构包装在一起,为后续的判断做准备而已,这里我们主要关心的几个域已经在上面代码中注释出来。
anasyle_stripe
接下来我们就来看看是怎么分析这个sh的。跟进analyse_stripe()
:
static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
{
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks;
struct r5dev *dev;
int i;
int do_recovery = 0;
memset(s, 0, sizeof(*s));//将s中的所有域值全部清空
s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
s->failed_num[0] = -1;
s->failed_num[1] = -1;
/* Now to look around and see what can be done */
rcu_read_lock();
for (i=disks; i--; ) {//为每一个盘的缓冲区一一分析
struct md_rdev *rdev;
sector_t first_bad;
int bad_sectors;
int is_bad = 0;
dev = &sh->dev[i];
pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
i, dev->flags,
dev->toread, dev->towrite, dev->written);
/* maybe we can reply to a read
*
* new wantfill requests are only permitted while
* ops_complete_biofill is guaranteed to be inactive
*/
if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
!test_bit(STRIPE_BIOFILL_RUN, &sh->state))/*如果这个缓冲区上有读请求,并且这个缓冲区中的数据已经是最新的了,那么则直接从缓冲区中读就可以了,没必要在从磁盘上读了,这样减少了IO时间*/
set_bit(R5_Wantfill, &dev->flags);//为缓冲区设置需要将dev的page数据copy到bio的page中的标志
/* now count some things */
if (test_bit(R5_LOCKED, &dev->flags))//记录上锁的缓冲区数
s->locked++;
if (test_bit(R5_UPTODATE, &dev->flags))//记录数据最新的缓冲区数
s->uptodate++;
if (test_bit(R5_Wantcompute, &dev->flags)) {//记录需要计算的缓冲区数
s->compute++;
BUG_ON(s->compute > 2);
}
if (test_bit(R5_Wantfill, &dev->flags))//记录需要填充的缓冲区数
s->to_fill++;
else if (dev->toread)//记录未完成的读请求数
s->to_read++;
if (dev->towrite) {//记录未完成的写请求数
s->to_write++;
if (!test_bit(R5_OVERWRITE, &dev->flags))//记录满写请求的个数,即写的范围覆盖整个缓冲区
s->non_overwrite++;
}
if (dev->written)//记录已经完成的写请求的个数
s->written++;
...//中间省略那些容错的计数部分
rcu_read_unlock();
}
相应的注释已经给出,这个函数就是负责统计sh中未完成的读写请求的个数,上锁的缓冲区的个数,完成的读写请求的个数等等,最终结果保存在s中,没那么复杂吧。
在analyse_stripe()
过后,我们来看看sh的state和dev的flag如何:
2. after analyse_stripe()
sh:STRIPE_ACTIVE,STRIPE_PREREAD_ACTIVE
d0:R5_OVERWRITE
d1,d2,d3:NULL
pd_idx=3,uptodate=0,to_write=1//没有缓冲区中的数据为最新的,含有一个为处理完成的写请求
回到handle_stripe()
中,接下来就是根据分析得到的结果s来判断符合哪种情况,源码相信大家也看到了,那么多的if
语句,好,根据上面得到的s,可见to_write的值为1,所以符合
if (s.to_write && !sh->reconstruct_state && !sh->check_state)
handle_stripe_dirtying(conf, sh, &s, disks);
进入handle_stripe_dirtying()
。
handle_stripe_dirtying
跟进handle_stripe_dirtying()
:
static void handle_stripe_dirtying(struct r5conf *conf,
struct stripe_head *sh,
struct stripe_head_state *s,
int disks)
{
int rmw = 0, rcw = 0, i;
sector_t recovery_cp = conf->mddev->recovery_cp;
/* RAID6 requires 'rcw' in current implementation.
* Otherwise, check whether resync is now happening or should start.
* If yes, then the array is dirty (after unclean shutdown or
* initial creation), so parity in some stripes might be inconsistent.
* In this case, we need to always do reconstruct-write, to ensure
* that in case of drive failure or read-error correction, we
* generate correct data from the parity.
*/
if (conf->max_degraded == 2 ||
(recovery_cp < MaxSector && sh->sector >= recovery_cp &&
s->failed == 0)) {
/* Calculate the real rcw later - for now make it
* look like rcw is cheaper
*/
rcw = 1; rmw = 2;
pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
conf->max_degraded, (unsigned long long)recovery_cp,
(unsigned long long)sh->sector);
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {/*对于含有写请求的缓冲区和校验盘的缓冲区而言,如果没上锁,数据不是最新的并且不需要计算,那么需要此磁盘上读取数据到这个缓冲区,则rmw计数*/
if (test_bit(R5_Insync, &dev->flags))//数据是正确的
rmw++;
else
rmw += 2*disks; /* cannot read it */
}
/* Would I have to read this buffer for reconstruct_write */
if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {/*对于非校验盘而言,如果缓冲区上有写请求并且写的范围覆盖不是整个缓冲区那么需要从磁盘上读数据,如果没有写请求,而且没上锁、数据不是最新的并且不需要计算,则需要从磁盘上读数据到缓冲区中,则rcw计数*/
if (test_bit(R5_Insync, &dev->flags))
rcw++;
else
rcw += 2*disks;
}
}
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
if (rmw < rcw && rmw > 0) {//采用rmw方式需要读的盘数少,则使用rmw方式处理写请求
/* prefer read-modify-write, but need to get some data */
if (conf->mddev->queue)
blk_add_trace_msg(conf->mddev->queue,
"raid5 rmw %llu %d",
(unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags)) &&
test_bit(R5_Insync, &dev->flags)) {/*选取满足上述条件的缓冲区--有写请求或者是校验盘的缓冲区,同时并且是没上锁、数据不是最新的、不需要计算的和磁盘上数据是正确的缓冲区*/
if (test_bit(STRIPE_PREREAD_ACTIVE,
&sh->state)) {//只有sh的状态是预读的才可以这是读状态
pr_debug("Read_old block %d for r-m-w\n",
i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
} else {//否则延迟处理
set_bit(STRIPE_DELAYED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
}
}
}
if (rcw <= rmw && rcw > 0) {//采用rcw方式需要读的盘数少,则用rcw
/* want reconstruct write, but need to get some data */
int qread =0;
rcw = 0;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (!test_bit(R5_OVERWRITE, &dev->flags) &&
i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {//同理,选取相应的缓冲区
rcw++;
if (test_bit(R5_Insync, &dev->flags) &&
test_bit(STRIPE_PREREAD_ACTIVE,
&sh->state)) {//只有磁盘上的数据是正确的同时sh为预读,才能设置读状态,否则延迟处理
pr_debug("Read_old block "
"%d for Reconstruct\n", i);
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantread, &dev->flags);
s->locked++;
qread++;
} else {
set_bit(STRIPE_DELAYED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
}
}
if (rcw && conf->mddev->queue)
blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
(unsigned long long)sh->sector,
rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
}
if (rcw > disks && rmw > disks &&
!test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))//不满足状态,延迟处理
set_bit(STRIPE_DELAYED, &sh->state);
/* now if nothing is locked, and if we have enough data,
* we can start a write request
*/
/* since handle_stripe can be called at any time we need to handle the
* case where a compute block operation has been submitted and then a
* subsequent call wants to start a write request. raid_run_ops only
* handles the case where compute block and reconstruct are requested
* simultaneously. If this is not the case then new writes need to be
* held off until the compute completes.
*/
if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
(s->locked == 0 && (rcw == 0 || rmw == 0) &&
!test_bit(STRIPE_BIT_DELAY, &sh->state)))//不需要从磁盘读数据了,于是处理sh
schedule_reconstruction(sh, s, rcw == 0, 0);
}
这里牵涉到RAID5中关于条带的写方式:rmw和rcw。这里我们通过我们的例子来通过图示法讲下这两个方式。本例中,写请求落在d0上,且写范围正好覆盖整个缓冲区,由于RAID5中处理的基本单元是stripe_head,所以要保证这个写请求过后,这个sh所表示的磁盘上的数据是最新的且正确的,由于 parity 的存在,此时新的数据写入,必须要计算新的 parity ,那么就牵涉到怎么计算 parity 的方式了。有两种方法:
- rmw:read-modified-write,这个是覆盖写的方式
本例中,rmw需要读取d0和d3的数据,分别记为 X 和P ,d0中即将写入的新数据记为 X′ 。
则新的 parity P′=P⊕X⊕X′ (如果写范围没覆盖整个缓冲区); P′=P⊕X′ (写范围覆盖整个缓冲区),这里在读盘的时候不区分是否为OVERWRITE,这是为了统一处理。最后将 X′ 和 P′ 写入到d0和d3在磁盘中的相应位置,即覆盖了原先的旧的数据,此为rmw。 - rcw:read-construct-write,这个是重构一个stripe_head
本例中,rcw需要读取d1和d2的数据,分别记为 Y 和Z ,d0中即将写入的新数据记为 X′ 。
则新的 parity P′=X′⊕Y⊕Z 。这里在读盘的时候需要区分写请求是否为overwrite,如果不是,则还需要读取写的缓冲区在磁盘上的数据以计算 parity 。本例中d0为overwrite,所以不需要读取d0的数据。最后将 X′ 和 P′ 写到d0和d3在磁盘上的相应位置即可。
注意:对于写方式,不管是选择rmw还是rcw,这两种方式只是读取的盘不一样,从而计算 parity 的方式不一样,但最终写的数据和位置都是一样的。
本例中由于 rmw=2,rcw=2 ,所以根据代码中的if
分支判断条件,采用rcw方式,此时由于d1和d2缓冲区的flag均满足设置条件,所以将d1和d2的flag设置为R5_Wantread
,并上锁。
所以在执行完handle_stripe_dirtying()
后,sh的state和dev的flag为:
3.after handle_stripe_dirtying()
sh:STRIPE_ACTIVE,STRIPE_HEANDLE,STRIPE_PREREAD_ACTIVE
d0:R5_OVERWRITE,R5_Insync
d1,d2:R5_LOCKED,R5_Insync,R5_Wantread
d3:R5_Insync
由于不满足最后个if语句,所以不会进入schedule_reconstruction()
,返回到handle_stripe()
中。
回到handle_stripe()
由于接下来的if判断都不成立,所以进入ops_run_io()
,这个函数可厉害了,它可是真正下发io请求到磁盘上的地方。我们去看看它是怎么运作的。
ops_run_io
跟进函数:
static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
{
struct r5conf *conf = sh->raid_conf;
int i, disks = sh->disks;
might_sleep();
for (i = disks; i--; ) {//分别处理每个缓冲区
int rw;
int replace_only = 0;
struct bio *bi, *rbi;
struct md_rdev *rdev, *rrdev = NULL;
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {//如果为想要写标志,则进入分支,并清除该标志
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
rw = WRITE_FUA;
else
rw = WRITE;
if (test_bit(R5_Discard, &sh->dev[i].flags))
rw |= REQ_DISCARD;
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))//如果为想要读标志,则进入分支,并清除该标志
rw = READ;
else if (test_and_clear_bit(R5_WantReplace,
&sh->dev[i].flags)) {
rw = WRITE;
replace_only = 1;
} else
continue;
if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
rw |= REQ_SYNC;
bi = &sh->dev[i].req;
rbi = &sh->dev[i].rreq; /* For writing to replacement */
rcu_read_lock();
rrdev = rcu_dereference(conf->disks[i].replacement);
smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
rdev = rcu_dereference(conf->disks[i].rdev);
if (!rdev) {
rdev = rrdev;
rrdev = NULL;
}
if (rw & WRITE) {
if (replace_only)
rdev = NULL;
if (rdev == rrdev)
/* We raced and saw duplicates */
rrdev = NULL;
} else {
if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
rdev = rrdev;
rrdev = NULL;
}
if (rdev && test_bit(Faulty, &rdev->flags))
rdev = NULL;
if (rdev)
atomic_inc(&rdev->nr_pending);
if (rrdev && test_bit(Faulty, &rrdev->flags))
rrdev = NULL;
if (rrdev)
atomic_inc(&rrdev->nr_pending);
rcu_read_unlock();
/* We have already checked bad blocks for reads. Now
* need to check for writes. We never accept write errors
* on the replacement, so we don't to check rrdev.
*/
while ((rw & WRITE) && rdev &&
test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
int bad_sectors;
int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
&first_bad, &bad_sectors);
if (!bad)
break;
if (bad < 0) {
set_bit(BlockedBadBlocks, &rdev->flags);
if (!conf->mddev->external &&
conf->mddev->flags) {
/* It is very unlikely, but we might
* still need to write out the
* bad block log - better give it
* a chance*/
md_check_recovery(conf->mddev);
}
/*
* Because md_wait_for_blocked_rdev
* will dec nr_pending, we must
* increment it first.
*/
atomic_inc(&rdev->nr_pending);
md_wait_for_blocked_rdev(rdev, conf->mddev);
} else {
/* Acknowledged bad block - skip the write */
rdev_dec_pending(rdev, conf->mddev);
rdev = NULL;
}
}
if (rdev) {
if (s->syncing || s->expanding || s->expanded
|| s->replacing)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
set_bit(STRIPE_IO_STARTED, &sh->state);//设置IO下发标志
bio_reset(bi);
bi->bi_bdev = rdev->bdev;
bi->bi_rw = rw;
/*设置IO回调函数,即当IO执行完成后会调用该函数*/
bi->bi_end_io = (rw & WRITE)
? raid5_end_write_request
: raid5_end_read_request;
bi->bi_private = sh;
pr_debug("%s: for %llu schedule op %ld on disc %d\n",
__func__, (unsigned long long)sh->sector,
bi->bi_rw, i);
atomic_inc(&sh->count);
if (use_new_offset(conf, sh))
bi->bi_iter.bi_sector = (sh->sector
+ rdev->new_data_offset);
else
bi->bi_iter.bi_sector = (sh->sector
+ rdev->data_offset);
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
bi->bi_rw |= REQ_NOMERGE;
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
/*设置bio请求的相关域,请求大小、起始偏移量等等*/
sh->dev[i].vec.bv_page = sh->dev[i].page;
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_iter.bi_size = STRIPE_SIZE;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
*/
if (rw & REQ_DISCARD)
bi->bi_vcnt = 0;
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
if (conf->mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
bi, disk_devt(conf->mddev->gendisk),
sh->dev[i].sector);
generic_make_request(bi);//下发请求到磁盘上
}
...//忽略无关的代码段
}
其实这个函数很简单,上面我们已经得到需要读取d1和d2在磁盘上的数据,那么这个函数就是下发读请求到1号盘和2号盘上,读取相应的数据到d1的page和d2的page中。当IO执行完成后,根据设定的回调函数raid5_end_read_request()
执行结束操作。
于是有:
4.after ops_run_io()
sh:STRIPE_ACTIVE,STRIPE_HANDLE,STRIPE_PREREAD_ACTIVE,STRIPE_IO_STRATED
d0:R5_OVERWRITE,R5_Insync
d1,d2:R5_LOCKED,R5_Insync
d3:R5_Insync
raid5_end_read_request
现在我们来看看这个IO回调函数raid5_end_read_request()
,当IO执行结束后会执行该函数,跟进:
static void raid5_end_read_request(struct bio * bi, int error)
{
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);//测试数据是否是最新的,即请求是否已经正确完成
char b[BDEVNAME_SIZE];
struct md_rdev *rdev = NULL;
sector_t s;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)//找到请求属于的盘号,记为i
break;
pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
uptodate);
if (i == disks) {
BUG();
return;
}
if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
/* If replacement finished while this request was outstanding,
* 'replacement' might be NULL already.
* In that case it moved down to 'rdev'.
* rdev is not removed until all requests are finished.
*/
rdev = conf->disks[i].replacement;
if (!rdev)
rdev = conf->disks[i].rdev;
if (use_new_offset(conf, sh))
s = sh->sector + rdev->new_data_offset;
else
s = sh->sector + rdev->data_offset;
if (uptodate) {//如果请求已经正确完成
set_bit(R5_UPTODATE, &sh->dev[i].flags);//对缓冲区设置为uptodate,表示其数据已经是最新的了
...//容错处理,忽略
rdev_dec_pending(rdev, conf->mddev);
clear_bit(R5_LOCKED, &sh->dev[i].flags);//对缓冲区解锁
set_bit(STRIPE_HANDLE, &sh->state);//设置sh为需要处理标志
release_stripe(sh);//处理sh
}
这个回调函数最主要的功能就是检查请求是否已经正确完成和将相应的缓冲区设置R5_UPTODATE标记,然后再处理sh。有很多代码都是进行出错处理,忽略这部分的代码,看起来会变得很简单。
在回调函数执行后,sh的state和dev的flag变为:
5.after raid5_end_read_request()
sh:STRIPE_ACTIVE,STRIPE_HANDLE,STRIPE_PREREAD_ACTIVE,STRIPE_IO_STRATED
d0:R5_OVERWRITE,R5_Insync
d1,d2:R5_Insync,R5_UPTODATE
d3:R5_Insync
当唤醒守护线程raid5d后,再一次处理sh时,此时sh的d1和d2的page中已经包含了磁盘上的对应位置的数据,所以可以计算新的
parity
,可想而知,接下来的步骤就是计算
parity
和写数据到磁盘上了。
那么在此进入handle_stripe()
后,通过analyse_stripe()
分析sh后,有:
6.after analyse_stripe()
sh:STRIPE_ACTIVE,STRIPE_HANDLE,STRIPE_PREREAD_ACTIVE,STRIPE_IO_STRATED
d0:R5_OVERWRITE,R5_Insync
d1,d2:R5_Insync,R5_UPTODATE
d3:R5_Insync
pd_idx=3,uptodate=2,to_write=1
由于to_write=1,再次进入handle_stripe_dirtying()
,再次计算rmw和rcw的值,得到
rmw=2,rcw=0
,满足最后个分支,进入schedule_reconstruction()
。
schedule_reconstruction
跟进:
static void
schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int rcw, int expand)
{
int i, pd_idx = sh->pd_idx, disks = sh->disks;
struct r5conf *conf = sh->raid_conf;
int level = conf->level;
if (rcw) {//此时rcw为0,不进入
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (dev->towrite) {
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantdrain, &dev->flags);
if (!expand)
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
}
}
/* if we are not expanding this is a proper write request, and
* there will be bios with new data to be drained into the
* stripe cache
*/
if (!expand) {//进入该分支
if (!s->locked)
/* False alarm, nothing to do */
return;
sh->reconstruct_state = reconstruct_state_drain_run;//设置重构状态
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);//设置重构操作
} else
sh->reconstruct_state = reconstruct_state_run;
set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);//设置需要重构操作
if (s->locked + conf->max_degraded == disks)
if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
atomic_inc(&conf->pending_full_writes);
} else {
BUG_ON(level == 6);
BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
for (i = disks; i--; ) {//真正设置标记的地方
struct r5dev *dev = &sh->dev[i];
if (i == pd_idx)
continue;
if (dev->towrite &&
(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
set_bit(R5_Wantdrain, &dev->flags);//设置需要将bio的数据copy到dev的page中
set_bit(R5_LOCKED, &dev->flags);//缓冲区上锁
clear_bit(R5_UPTODATE, &dev->flags);//清除缓冲区的uptodate标记
s->locked++;
}
}
if (!s->locked)
/* False alarm - nothing to do */
return;
sh->reconstruct_state = reconstruct_state_prexor_drain_run;
set_bit(STRIPE_OP_PREXOR, &s->ops_request);
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
}
/* keep the parity disk(s) locked while asynchronous operations
* are in flight
*/
set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);//缓冲区上锁
clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);//清除校验盘的uptodate标记
s->locked++;
if (level == 6) {
int qd_idx = sh->qd_idx;
struct r5dev *dev = &sh->dev[qd_idx];
set_bit(R5_LOCKED, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
s->locked++;
}
pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
__func__, (unsigned long long)sh->sector,
s->locked, s->ops_request);
}
这个函数前面我们遇到一次,由于不满足判断条件,所以不会进入。前面计算的rmw和rcw,只要有1个值为0,都表示此时已经满足了stripe_head的条件,可以执行写操作了。现在rcw=0,表示可以执行重构写了,此时d1和d2的page中已经是磁盘上的数据了,此时需要计算
parity
,所以需要d0中的数据,而d0的数据现在仍然在d0的bio中,所以需要将bio的page的数据copy到d0的page中,这就是R5_Wantdrain所代表的含义,同时将sh设置为重构状态,并将操作保存在s的ops_request中。
执行后有:
7.after schedule_reconstruction()
sh:STRIPE_ACTIVE,STRIPE_HANDLE,STRIPE_PREREAD_ACTIVE,STRIPE_IO_STRATED
d0:R5_LOCKED,R5_OVERWRITE,R5_Insync,R5_Wantdrain
d1,d2:R5_Insync,R5_UPTODATE
d3:R5_Insync,R5_LOCKED
sh->reconstruct_state=reconstruct_state_drain_run
s->ops_request=stripe_op_reconstruct和stripe_op_biodrain
返回到handle_stripe()
,此时满足:
if (s.ops_request)
raid_run_ops(sh, s.ops_request);
进入raid_run_ops()
。
raid_run_ops
static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
{
int overlap_clear = 0, i, disks = sh->disks;
struct dma_async_tx_descriptor *tx = NULL;
struct r5conf *conf = sh->raid_conf;
int level = conf->level;
struct raid5_percpu *percpu;
unsigned long cpu;
printk("+++raid_run_ops : stripe %llu\n",(unsigned long long)sh->sector);
cpu = get_cpu();
percpu = per_cpu_ptr(conf->percpu, cpu);
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {//这是将dev的page数据copy到bio的page中
ops_run_biofill(sh);
overlap_clear++;
}
if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {//这是计算操作
if (level < 6)
tx = ops_run_compute5(sh, percpu);
else {
if (sh->ops.target2 < 0 || sh->ops.target < 0)
tx = ops_run_compute6_1(sh, percpu);
else
tx = ops_run_compute6_2(sh, percpu);
}
/* terminate the chain if reconstruct is not set to be run */
if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
async_tx_ack(tx);
}
if (test_bit(STRIPE_OP_PREXOR, &ops_request))
tx = ops_run_prexor(sh, percpu, tx);
if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {//将bio的page数据copy到dev的page中,进入该分支
tx = ops_run_biodrain(sh, tx);
overlap_clear++;
}
if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {//重构该sh,进入该分支
if (level < 6)
ops_run_reconstruct5(sh, percpu, tx);
else
ops_run_reconstruct6(sh, percpu, tx);
}
if (test_bit(STRIPE_OP_CHECK, &ops_request)) {//check操作
if (sh->check_state == check_state_run)
ops_run_check_p(sh, percpu);
else if (sh->check_state == check_state_run_q)
ops_run_check_pq(sh, percpu, 0);
else if (sh->check_state == check_state_run_pq)
ops_run_check_pq(sh, percpu, 1);
else
BUG();
}
if (overlap_clear)
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
put_cpu();
}
这个函数是根据s的ops_request值执行相应的操作,所以在这里根据上述的s的ops_request值,一次进入ops_run_biodrain()
和ops_run_reconstruct5()
。首先跟进ops_run_biodrain()
。
ops_run_biodrain
跟进:
static struct dma_async_tx_descriptor *
ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
int i;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
struct bio *chosen;
if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {//选取需要操作的缓冲区,并清除该标记
struct bio *wbi;
spin_lock_irq(&sh->stripe_lock);
chosen = dev->towrite;//将写请求从dev的towrite链表删除
dev->towrite = NULL;
BUG_ON(dev->written);
wbi = dev->written = chosen;//将写请求添加到written链表中,表示已经写了,这里代表写到了缓冲区中
spin_unlock_irq(&sh->stripe_lock);
WARN_ON(dev->page != dev->orig_page);
while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
if (wbi->bi_rw & REQ_FUA)
set_bit(R5_WantFUA, &dev->flags);
if (wbi->bi_rw & REQ_SYNC)
set_bit(R5_SyncIO, &dev->flags);
if (wbi->bi_rw & REQ_DISCARD)
set_bit(R5_Discard, &dev->flags);
else {
tx = async_copy_data(1, wbi, &dev->page,
dev->sector, tx, sh);//通过DMA描述符,经请求的page数据copy到dev的page中
if (dev->page != dev->orig_page) {
set_bit(R5_SkipCopy, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
clear_bit(R5_OVERWRITE, &dev->flags);
}
}
wbi = r5_next_bio(wbi, dev->sector);
}
}
}
return tx;
}
这个操作很简单,就是将数据从请求copy到缓冲区中,通过DMA描述符来完成该操作,并将请求从缓冲区的towrite链表中移到written链表中。
copy操作由async_copy_data()
完成,跟进该函数。
aysnc_copy_data
static struct dma_async_tx_descriptor *
async_copy_data(int frombio, struct bio *bio, struct page **page,
sector_t sector, struct dma_async_tx_descriptor *tx,
struct stripe_head *sh)
{
struct bio_vec bvl;
struct bvec_iter iter;
struct page *bio_page;
int page_offset;
struct async_submit_ctl submit;
enum async_tx_flags flags = 0;
if (bio->bi_iter.bi_sector >= sector)
page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
else
page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
if (frombio)
flags |= ASYNC_TX_FENCE;
init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
bio_for_each_segment(bvl, bio, iter) {
int len = bvl.bv_len;
int clen;
int b_offset = 0;
if (page_offset < 0) {
b_offset = -page_offset;
page_offset += b_offset;
len -= b_offset;
}
if (len > 0 && page_offset + len > STRIPE_SIZE)
clen = STRIPE_SIZE - page_offset;
else
clen = len;
if (clen > 0) {
b_offset += bvl.bv_offset;
bio_page = bvl.bv_page;
if (frombio) {
if (sh->raid_conf->skip_copy &&
b_offset == 0 && page_offset == 0 &&
clen == STRIPE_SIZE)
*page = bio_page;
else
tx = async_memcpy(*page, bio_page, page_offset,
b_offset, clen, &submit);
} else
tx = async_memcpy(bio_page, *page, b_offset,
page_offset, clen, &submit);
}
/* chain the operations */
submit.depend_tx = tx;
if (clen < len) /* hit end of page */
break;
page_offset += len;
}
return tx;
}
这个函数只是单纯的设置需要copy的数据的范围,保证copy的准确性,最后通过async_memcpy()
系统调用来实现copy操作,这里就不多赘述这个操作了。
操作完成后,返回到raid_run_ops()
中,执行重构操作,进入ops_run_reconstruct5()
。
ops_run_reconstruct5
跟进:
static void
ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
struct page **xor_srcs = percpu->scribble;
struct async_submit_ctl submit;
int count = 0, pd_idx = sh->pd_idx, i;
struct page *xor_dest;
int prexor = 0;
unsigned long flags;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = 0; i < sh->disks; i++) {//查找校验盘,记为i
if (pd_idx == i)
continue;
if (!test_bit(R5_Discard, &sh->dev[i].flags))
break;
}
if (i >= sh->disks) {
atomic_inc(&sh->count);
set_bit(R5_Discard, &sh->dev[pd_idx].flags);
ops_complete_reconstruct(sh);
return;
}
/* check if prexor is active which means only process blocks
* that are part of a read-modify-write (written)
*/
if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
prexor = 1;
xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (dev->written)
xor_srcs[count++] = dev->page;
}
} else {//xor_dest为校验盘的page,xor_srcs存放的数该sh中数据缓冲区的page,最终通过xor_srcs这些数据的异或计算得到新的parity,存放在xor_dest中
xor_dest = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (i != pd_idx)
xor_srcs[count++] = dev->page;
}
}
/* 1/ if we prexor'd then the dest is reused as a source
* 2/ if we did not prexor then we are redoing the parity
* set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
* for the synchronous xor case
*/
flags = ASYNC_TX_ACK |
(prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
atomic_inc(&sh->count);
init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
to_addr_conv(sh, percpu));//设置计算操作执行完的回调函数ops_complete_reconstruct
if (unlikely(count == 1))
tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
else
tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);//计算parity
}
这个函数是计算新的
parity
,由于前面的操作,已经将d0的请求的数据写到了缓冲区中,而d1和d2的缓冲区数据也已经是最新的了,所以可以执行计算
parity
的操作,并设置计算完的回调函数ops_complete_recosntruct()
。
跟进该回调函数:
ops_complete_reconstruct
跟进:
static void ops_complete_reconstruct(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
int disks = sh->disks;
int pd_idx = sh->pd_idx;
int qd_idx = sh->qd_idx;
int i;
bool fua = false, sync = false, discard = false;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; ) {
fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
discard |= test_bit(R5_Discard, &sh->dev[i].flags);
}
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (dev->written || i == pd_idx || i == qd_idx) {
if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
set_bit(R5_UPTODATE, &dev->flags);//d0设置为uptodate
if (fua)
set_bit(R5_WantFUA, &dev->flags);
if (sync)
set_bit(R5_SyncIO, &dev->flags);
}
}
if (sh->reconstruct_state == reconstruct_state_drain_run)//设置重构状态
sh->reconstruct_state = reconstruct_state_drain_result;
else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
sh->reconstruct_state = reconstruct_state_prexor_drain_result;
else {
BUG_ON(sh->reconstruct_state != reconstruct_state_run);
sh->reconstruct_state = reconstruct_state_result;
}
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);//处理sh
}
这个函数很简单,只是单纯的设置sh的重构状态为reconstruct_state_drain_result。
在这个操作完成后,sh的state和dev的flag有:
8,after ops_complete_reconstruct()
sh:STRIPE_ACTIVE,STRIPE_HANDLE,STRIPE_PREREAD_ACTIVE,STRIPE_IO_STRATED
d0:R5_LOCKED,R5_OVERWRITE,R5_Insync,R5_Wantdrain
d1,d2:R5_Insync,R5_UPTODATE
d3:R5_Insync,R5_LOCKED,R5_UPTODATE
sh->reconstruct_state=reconstruct_state_drain_result
再次进入ops_run_io()
时没有dev的标记为R5_Wantread或者R5_Wantwrite,所以不进行任何IO操作。
当守护进程再次唤醒时,处理sh时,此时第三次进入handle_stripe()
:
由于此时sh->reconstruct_state=reconstruct_state_drain_result,所以满足该分支:
if (sh->reconstruct_state == reconstruct_state_drain_result ||
sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
sh->reconstruct_state = reconstruct_state_idle;
/* All the 'written' buffers and the parity block are ready to
* be written back to disk
*/
BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
!test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
BUG_ON(sh->qd_idx >= 0 &&
!test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
!test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_LOCKED, &dev->flags) &&
(i == sh->pd_idx || i == sh->qd_idx ||
dev->written)) {//如果是校验盘或者含有写请求的缓冲区
pr_debug("Writing block %d\n", i);
set_bit(R5_Wantwrite, &dev->flags);//设置为需要写操作,表示需要将缓冲区的数据写到磁盘上
if (prexor)
continue;
if (s.failed > 1)
continue;
if (!test_bit(R5_Insync, &dev->flags) ||
((i == sh->pd_idx || i == sh->qd_idx) &&
s.failed == 0))
set_bit(STRIPE_INSYNC, &sh->state);
}
}
if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
s.dec_preread_active = 1;
}
这个是接着上述的重构操作,上述的重构操作完成后,此时sh中的数据全部是最新的,但d0的数据和校验盘缓冲区的数据还没有写到磁盘上,所以需要将数据写到磁盘上,这个分支就是为了设置这个R5_Wantwrite标记的。
注意:这个分支完成后不会进入handle_stripe_clean_event()
,因为校验盘现在是LOCKED,并且不是uptodate的,所以会直接进入ops_run_io()
。
这次进入ops_run_io()
时,d0和d3缓冲区标记为R5_Wantwrite,此时下发IO,完成数据从缓冲区到磁盘的写操作。
当IO执行完,调用回调函数raid5_end_write_request()
, 回调函数执行完后,此时数据已经写到了磁盘上,sh的state和dev的flag分别为:
9.after raid5_end_write_request
sh:STRIPE_HANDLE,STRIPE_Insync,STRIPE_IO_STARTED
d0:R5_UPTODATE,R5_OVERWRITE,R5_Insync
d1,d2:R5_UPTODATE,R5_Insync
d3:R5_UPTODATE,R5_Insync
当再次唤醒守护进程raid5d时,再次处理sh,此时进入handle_stripe()
,分析sh得:
10.after analyse_stripe()
sh:STRIPE_HANDLE,STRIPE_Insync,STRIPE_IO_STARTED
d0:R5_UPTODATE,R5_OVERWRITE,R5_Insync
d1,d2:R5_UPTODATE,R5_Insync
d3:R5_UPTODATE,R5_Insync
pd_idx=3,uptodate=4,written=1
此时满足这个分支:
if (s.written &&
(s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
&& !test_bit(R5_LOCKED, &pdev->flags)
&& (test_bit(R5_UPTODATE, &pdev->flags) ||
test_bit(R5_Discard, &pdev->flags))))) &&
(s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
&& !test_bit(R5_LOCKED, &qdev->flags)
&& (test_bit(R5_UPTODATE, &qdev->flags) ||
test_bit(R5_Discard, &qdev->flags))))))
handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
进入handle_stripe_clean_event()
。
handle_stripe_clean_event
跟进:
/* handle_stripe_clean_event
* any written block on an uptodate or failed drive can be returned.
* Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
* never LOCKED, so we don't need to test 'failed' directly.
*/
static void handle_stripe_clean_event(struct r5conf *conf,
struct stripe_head *sh, int disks, struct bio **return_bi)
{
int i;
struct r5dev *dev;
int discard_pending = 0;
for (i = disks; i--; )
if (sh->dev[i].written) {
dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) &&
(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Discard, &dev->flags) ||
test_bit(R5_SkipCopy, &dev->flags))) {
/* We can return any write requests */
struct bio *wbi, *wbi2;
pr_debug("Return write for disc %d\n", i);
if (test_and_clear_bit(R5_Discard, &dev->flags))
clear_bit(R5_UPTODATE, &dev->flags);
if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
dev->page = dev->orig_page;
}
wbi = dev->written;
dev->written = NULL;//将请求从written链表中清除
while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector);
if (!raid5_dec_bi_active_stripes(wbi)) {
md_write_end(conf->mddev);
wbi->bi_next = *return_bi;
*return_bi = wbi;
}
wbi = wbi2;
}
bitmap_endwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS,
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
} else if (test_bit(R5_Discard, &dev->flags))
discard_pending = 1;
WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
WARN_ON(dev->page != dev->orig_page);
}
if (!discard_pending &&
test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
if (sh->qd_idx >= 0) {
clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
}
/* now that discard is done we can proceed with any sync */
clear_bit(STRIPE_DISCARD, &sh->state);
/*
* SCSI discard will change some bio fields and the stripe has
* no updated data, so remove it from hash list and the stripe
* will be reinitialized
*/
spin_lock_irq(&conf->device_lock);
remove_hash(sh);
spin_unlock_irq(&conf->device_lock);
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
set_bit(STRIPE_HANDLE, &sh->state);
}
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
md_wakeup_thread(conf->mddev->thread);
}
纵观该函数,最重要的莫过于注释的那句了,这个函数相当于扫尾人员,当一个party结束后,总需要人来清理现场,这个函数就是清理现场的。它将请求从sh中删除,因为这个请求此时已经执行完毕了,而由于stripe_head的数目只有256个,所以需要循环使用,所以需要将请求从stripe_head中删除。,但缓冲区的数据不能删,因为一旦删除,缓冲区将无任何缓冲的意义可言。
最后返回到handle_stripe()
中,执行ops_run_io()
,然而这个函数并不会干任何事。至此,返回bio,这个写请求就处理完成了。
总结
其实RAID5处理写请求的流程很简单,分为如下几步:
- 将bio请求添加到相应的stripe_head结构中
- 决定一种写决策(rmw还是rcw)
- 读入需要的磁盘数据
- 计算新的 parity
- 将新的数据和 parity 写到磁盘上
- 删除bio
祝好~^_^