RAID5 IO處理之replace代碼詳解

1 作用從字面意思理解,replacement即是替換 。我們知道硬盤都有一定的使用壽命,可以在硬盤失效之前通過該功能將就盤的數據遷移至新盤 。因為replacement的流程是從舊盤中讀出數據直接寫入新盤,因此比重構少很多讀和校驗值計算的操作,效率更高 。
另外在raid2.0中 , 由于硬盤切片的使用方式,當系統只添加一塊新盤時無法直接給raid擴容,需要先進行資源均衡,使得各盤空閑空間一致后再擴容,所以replacement同樣適用于均衡場景中切片回收替換的邏輯 。
2 代碼解析2.1 需替換設置通過命令 echo want_replacement > /sys/block/md0/md/dev-sdb/state 設置硬盤標記為"需替換"狀態,該sys命令會執行如下代碼:
state_store()/* Replacement標記表明成員磁盤新盤,不能被設置為需替換 */ \_ if (rdev->raid_disk >= 0 && !test_bit(Replacement, &rdev->flags))/* 給舊盤設置標記表明該成員磁盤是需要替換的 */set_bit(WantReplacement, &rdev->flags);/* 設置md為不要需要重構狀態 */ \_ set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);/* 喚醒raid5d */ \_ md_wakeup_thread(rdev->mddev->thread);|- raid5d()/* 檢查是否需要同步* 此時在sys接口的調用棧中,try_lock失敗直接退出未創建同步線程*/\_ md_check_recovery()2.2 加入新盤通過命令 mdadm --manage -a /dev/md0 /dev/sde 給塊設備加入新盤,新盤加入后自動開始同步 。
函數調用關系如下:
md_ioctl() \_ add_new_disk()raid5d() \_ md_check_recovery()\_ remove_and_add_spares()\_ raid5_add_disk()\_ md_register_thread()\_ md_wakeup_thread(mddev->sync_thread)md_do_sync()這里關鍵函數為 raid5_add_disk()  , 在函數內設置了相關rdev的各項標記 , 這里只說明該函數的相關邏輯,如下:
static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev){struct r5conf *conf = mddev->private;struct disk_info *p;int first = 0;int last = conf->raid_disk - 1;/* 遍歷所有磁盤 */for (disk = first; disk <= last; disk++) {p = conf->disk + disk;/* 如果設置了需替換標記且尚未指定新盤 */if (test_bit(WantReplacement, &rdev->flags) &&p->replacement == NULL) {/* 設置磁盤狀態為未同步 */clear_bit(In_sync, &rdev->flags);/* 設置新盤在md中的磁盤索引 */rdev->raid_disk = disk;/* 設置md需要全盤同步 */config->fullsync = 1;/* 給replacement指針賦值使其指向新盤 */rcu_assign_pointer(p->replacement, rdev);break;}}}加入新盤后調用 md_do_sync() 會發起同步
2.3 條帶處理在同步函數中,循環調用 sync_request() ,該函數主要邏輯如下:
static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster){ /* 獲取一個空閑條帶 */ sh = get_active_stripe(conf, sector_nr, 0, 1, 0); if (sh == NULL) {sh = get_active_stripe(conf, sector_nr, 0, 0, 0);schedule_timeout_uninterruptible(1); } /* 設置同步標記 */ set_bit(STRIPE_SYNC_REQUESTED, &sh->state); /* 將條帶推入條帶狀態機處理 */ handle_stripe(sh); release_stripe(sh); return STRIPE_SECTORS;}2.3.1 下發讀請求函數調用關系:
handle_stripe() \_ analyse_stripe() \_ handle_stripe_fill()\_ fetch_block() \_ ops_run_io()代碼邏輯如下:
static void handle_stripe(struct stripe_head *sh){ /* 在sync_request中設置了該標記 */ if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {spin_lock(&sh->stripe_lock);/* 此時條帶不是處理DISCARD請求 */if (!test_bit(STRIPE_DISCARD, &sh->state) &&/* 清掉STRIPE_SYNC_REQUESTED標記 */test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {/* 設置條帶同步中標記 */set_bit(STRIPE_SYNCING, &sh->state);/* 清除條帶一致狀態的標記 */clear_bit(STRIPE_INSYNC, &sh->state);}spin_unlock(&sh->stripe_lock); } clear_bit(STRIPE_DELAYED, &sh->state); /* 解析條帶狀態 */ analyse_stripe(sh, &s); /* s.replacing為真進入handle_stripe_fill */ if (s.to_read || s.non_overwrite|| (conf->level == 6 && s.to_write && s.failed)|| (s.syncing && (s.uptodate + s.compute < disks))|| s.replacing|| s.expanding)handle_stripe_fill(sh, &s, disks); /* 此時 s.locked == 0 條件不成立不會進入該if分支 */ if (s.replacing && s.locked == 0&& !test_bit(STRIPE_INSYNC, &sh->state)) {/* Write out to replacement devices where possible */for (i = 0; i < conf->raid_disks; i++)if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&test_bit(R5_NeedReplace, &sh->dev[i].flags)) {set_bit(R5_WantReplace, &sh->dev[i].flags);set_bit(R5_LOCKED, &sh->dev[i].flags);s.locked++;}set_bit(STRIPE_INSYNC, &sh->state); } /* 此時 s.locked == 0 條件不成立不會進入該if分支 */ if ((s.syncing || s.replacing) && s.locked == 0 &&test_bit(STRIPE_INSYNC, &sh->state)) {md_done_sync(conf->mddev, STRIPE_SECTORS, 1);clear_bit(STRIPE_SYNCING, &sh->state);if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))wake_up(&conf->wait_for_overlap); } /* 下發讀請求 */ ops_run_io(sh, &s);}static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s){ int do_recovery = 0; /* 遍歷所有條帶/設備 */ rcu_read_lock(); for (i=disks; i--; ) {/* 加入新盤的成員磁盤replacement存在但不滿足* rdev->recovery_offset >= sh->sector + STRIPE_SECTORS(同步時同步進度小于sh->sector)* 走到else分支*/rdev = rcu_dereference(conf->disks[i].replacement);if (rdev && !test_bit(Faulty, &rdev->flags) &&rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&!is_badblock(rdev, sh->sector, STRIPE_SECTORS,&first_bad, &bad_sectors))set_bit(R5_ReadRepl, &dev->flags);else {if (rdev)/* 設置R5_NeedReplace標記 */set_bit(R5_NeedReplace, &dev->flags);rdev = rcu_dereference(conf->disks[i].rdev);clear_bit(R5_ReadRepl, &dev->flags);}/* 在replacement處理中所有硬盤都是正常的,do_recovery為0,s->failed也為0 */if (!test_bit(R5_Insync, &dev->flags)) {if (s->failed < 2)s->failed_num[s->failed] = i;s->failed++;if (rdev && !test_bit(Faulty, &rdev->flags))do_recovery = 1;} } /* 在handle_stripe中設置了該標記 */ if (test_bit(STRIPE_SYNCING, &sh->state)) {/* 條件都未成立走else分支 */if (do_recovery ||sh->sector >= conf->mddev->recovery_cp ||test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))s->syncing = 1;elses->replacing = 1; } rcu_read_unlock();}static void handle_stripe_fill(struct stripe_head *sh,struct stripe_head_state *s,int disks){ int i; /* 未設置條帶狀態進入fetch_block */ if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&!sh->reconstruct_state)for (i = disks; i--; )if (fetch_block(sh, s, i, disks))break; set_bit(STRIPE_HANDLE, &sh->state);}static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,int disk_idx, int disks){ struct r5dev *dev = &sh->dev[disk_idx]; struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],&sh->dev[s->failed_num[1]] }; /* 此時所有條帶/設備都未發起請求且未包含最新數據 */ if (!test_bit(R5_LOCKED, &dev->flags) &&!test_bit(R5_UPTODATE, &dev->flags) &&(dev->toread ||(dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||s->syncing || s->expanding ||/* want_replace()函數中會判斷disk_idx對應的成員磁盤是否有replacemenet且* 條帶起始位置大于等于replacement重構位置返回1* 在replacing過程中設置了replacement的成員磁盤進入if*/(s->replacing && want_replace(sh, disk_idx)) ||(s->failed >= 1 && fdev[0]->toread) ||(s->failed >= 2 && fdev[1]->toread) ||(sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&!test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||(sh->raid_conf->level == 6 && s->failed && s->to_write))) {/* we would like to get this block, possibly by computing it,* otherwise read it if the backing disk is insync*/BUG_ON(test_bit(R5_Wantcompute, &dev->flags));BUG_ON(test_bit(R5_Wantread, &dev->flags));/* 對設置了replacement的成員磁盤下發讀請求 */if (test_bit(R5_Insync, &dev->flags)) {set_bit(R5_LOCKED, &dev->flags);set_bit(R5_Wantread, &dev->flags);/* 自增locked計數 */s->locked++;} } return 0;}static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s){ /* 遍歷所有條帶/設備 */ for (i = disks; i--; ) {/* 對設置了讀標記的下發讀請求 */if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))rw = READ;/* 跳過其他不需要讀的設備 */elsecontinue;if (rdev) {bio_reset(bi);bi->bi_bdev = rdev->bdev;bi->bi_rw = rw;bi->bi_end_io = raid5_end_read_request;bi->bi_private = sh;atomic_inc(&sh->count);if (use_new_offset(conf, sh))bi->bi_sector = (sh->sector + rdev->new_data_offset);elsebi->bi_sector = (sh->sector + rdev->data_offset);if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))bi->bi_rw |= REQ_FLUSH;bi->bi_vcnt = 1;bi->bi_io_vec[0].bv_len = STRIPE_SIZE;bi->bi_io_vec[0].bv_offset = 0;bi->bi_size = STRIPE_SIZE;/* 提交bio */generic_make_request(bi);} }}

推薦閱讀