Lines Matching refs:dd_idx
821 int dd_idx; in stripe_add_to_batch_list() local
869 dd_idx = 0; in stripe_add_to_batch_list()
870 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
871 dd_idx++; in stripe_add_to_batch_list()
872 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || in stripe_add_to_batch_list()
873 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) in stripe_add_to_batch_list()
2916 int previous, int *dd_idx, in raid5_compute_sector() argument
2945 *dd_idx = sector_div(stripe, data_disks); in raid5_compute_sector()
2959 if (*dd_idx >= pd_idx) in raid5_compute_sector()
2960 (*dd_idx)++; in raid5_compute_sector()
2964 if (*dd_idx >= pd_idx) in raid5_compute_sector()
2965 (*dd_idx)++; in raid5_compute_sector()
2969 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
2973 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
2977 (*dd_idx)++; in raid5_compute_sector()
2993 (*dd_idx)++; /* Q D D D P */ in raid5_compute_sector()
2995 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
2996 (*dd_idx) += 2; /* D D P Q D */ in raid5_compute_sector()
3002 (*dd_idx)++; /* Q D D D P */ in raid5_compute_sector()
3004 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3005 (*dd_idx) += 2; /* D D P Q D */ in raid5_compute_sector()
3010 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; in raid5_compute_sector()
3015 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; in raid5_compute_sector()
3021 (*dd_idx) += 2; in raid5_compute_sector()
3035 (*dd_idx)++; /* Q D D D P */ in raid5_compute_sector()
3037 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3038 (*dd_idx) += 2; /* D D P Q D */ in raid5_compute_sector()
3051 (*dd_idx)++; /* Q D D D P */ in raid5_compute_sector()
3053 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3054 (*dd_idx) += 2; /* D D P Q D */ in raid5_compute_sector()
3062 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
3069 if (*dd_idx >= pd_idx) in raid5_compute_sector()
3070 (*dd_idx)++; in raid5_compute_sector()
3076 if (*dd_idx >= pd_idx) in raid5_compute_sector()
3077 (*dd_idx)++; in raid5_compute_sector()
3083 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); in raid5_compute_sector()
3089 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); in raid5_compute_sector()
3095 (*dd_idx)++; in raid5_compute_sector()
3130 int dummy1, dd_idx = i; in raid5_compute_blocknr() local
3228 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
3407 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in add_stripe_bio() argument
3419 sh->dev[dd_idx].write_hint = bi->bi_write_hint; in add_stripe_bio()
3424 bip = &sh->dev[dd_idx].towrite; in add_stripe_bio()
3428 bip = &sh->dev[dd_idx].toread; in add_stripe_bio()
3453 (i == dd_idx || sh->dev[i].towrite)) { in add_stripe_bio()
3479 sector_t sector = sh->dev[dd_idx].sector; in add_stripe_bio()
3480 for (bi=sh->dev[dd_idx].towrite; in add_stripe_bio()
3481 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) && in add_stripe_bio()
3483 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) { in add_stripe_bio()
3487 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf)) in add_stripe_bio()
3488 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) in add_stripe_bio()
3494 (unsigned long long)sh->sector, dd_idx); in add_stripe_bio()
3527 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); in add_stripe_bio()
3539 int dd_idx; in stripe_set_idx() local
3547 &dd_idx, sh); in stripe_set_idx()
4524 int dd_idx, j; in handle_stripe_expansion() local
4530 &dd_idx, NULL); in handle_stripe_expansion()
4539 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { in handle_stripe_expansion()
4547 tx = async_memcpy(sh2->dev[dd_idx].page, in handle_stripe_expansion()
4548 sh->dev[i].page, sh2->dev[dd_idx].offset, in handle_stripe_expansion()
4552 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); in handle_stripe_expansion()
4553 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); in handle_stripe_expansion()
5402 int bad_sectors, dd_idx; in raid5_read_one_chunk() local
5412 &dd_idx, NULL); in raid5_read_one_chunk()
5419 rdev = rcu_dereference(conf->disks[dd_idx].replacement); in raid5_read_one_chunk()
5422 rdev = rcu_dereference(conf->disks[dd_idx].rdev); in raid5_read_one_chunk()
5772 int dd_idx; in raid5_make_request() local
5864 &dd_idx, NULL); in raid5_make_request()
5905 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { in raid5_make_request()
5964 int dd_idx; in reshape_request() local
6149 1, &dd_idx, NULL); in reshape_request()
6153 1, &dd_idx, NULL); in reshape_request()
6322 int dd_idx; in retry_aligned_read() local
6330 0, &dd_idx, NULL); in retry_aligned_read()
6351 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { in retry_aligned_read()
6358 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); in retry_aligned_read()