Lines Matching refs:map
124 static inline int nr_parity_stripes(struct map_lookup *map) in nr_parity_stripes() argument
126 if (map->type & BTRFS_BLOCK_GROUP_RAID5) in nr_parity_stripes()
128 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) in nr_parity_stripes()
134 static inline int nr_data_stripes(struct map_lookup *map) in nr_data_stripes() argument
136 return map->num_stripes - nr_parity_stripes(map); in nr_data_stripes()
520 struct map_lookup *map; in read_one_chunk() local
549 map = kmalloc(btrfs_map_lookup_size(num_stripes), GFP_NOFS); in read_one_chunk()
550 if (!map) in read_one_chunk()
553 map->ce.start = logical; in read_one_chunk()
554 map->ce.size = length; in read_one_chunk()
555 map->num_stripes = num_stripes; in read_one_chunk()
556 map->io_width = btrfs_chunk_io_width(leaf, chunk); in read_one_chunk()
557 map->io_align = btrfs_chunk_io_align(leaf, chunk); in read_one_chunk()
558 map->sector_size = btrfs_chunk_sector_size(leaf, chunk); in read_one_chunk()
559 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); in read_one_chunk()
560 map->type = btrfs_chunk_type(leaf, chunk); in read_one_chunk()
561 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); in read_one_chunk()
564 map->stripes[i].physical = in read_one_chunk()
570 map->stripes[i].dev = btrfs_find_device(fs_info, devid, uuid, in read_one_chunk()
572 if (!map->stripes[i].dev) { in read_one_chunk()
573 map->stripes[i].dev = fill_missing_device(devid); in read_one_chunk()
576 list_add(&map->stripes[i].dev->dev_list, in read_one_chunk()
581 ret = insert_cache_extent(&map_tree->cache_tree, &map->ce); in read_one_chunk()
585 map->ce.start, map->ce.size, ret); in read_one_chunk()
852 struct map_lookup *map; in btrfs_num_copies() local
870 map = container_of(ce, struct map_lookup, ce); in btrfs_num_copies()
872 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | in btrfs_num_copies()
874 ret = map->num_stripes; in btrfs_num_copies()
875 else if (map->type & BTRFS_BLOCK_GROUP_RAID10) in btrfs_num_copies()
876 ret = map->sub_stripes; in btrfs_num_copies()
877 else if (map->type & BTRFS_BLOCK_GROUP_RAID5) in btrfs_num_copies()
879 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) in btrfs_num_copies()
891 struct map_lookup *map; in btrfs_next_bg() local
909 map = container_of(ce, struct map_lookup, ce); in btrfs_next_bg()
910 if (map->type & type) { in btrfs_next_bg()
958 struct map_lookup *map; in __btrfs_map_block() local
991 map = container_of(ce, struct map_lookup, ce); in __btrfs_map_block()
995 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | in __btrfs_map_block()
999 stripes_required = map->num_stripes; in __btrfs_map_block()
1000 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { in __btrfs_map_block()
1001 stripes_required = map->sub_stripes; in __btrfs_map_block()
1004 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) in __btrfs_map_block()
1007 stripes_required = map->num_stripes; in __btrfs_map_block()
1011 raid_map = kmalloc(sizeof(u64) * map->num_stripes, GFP_NOFS); in __btrfs_map_block()
1031 stripe_nr = stripe_nr / map->stripe_len; in __btrfs_map_block()
1033 stripe_offset = stripe_nr * (u64)map->stripe_len; in __btrfs_map_block()
1039 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | in __btrfs_map_block()
1046 map->stripe_len - stripe_offset); in __btrfs_map_block()
1056 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | in __btrfs_map_block()
1060 multi->num_stripes = map->num_stripes; in __btrfs_map_block()
1064 stripe_index = stripe_nr % map->num_stripes; in __btrfs_map_block()
1065 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { in __btrfs_map_block()
1066 int factor = map->num_stripes / map->sub_stripes; in __btrfs_map_block()
1069 stripe_index *= map->sub_stripes; in __btrfs_map_block()
1072 multi->num_stripes = map->sub_stripes; in __btrfs_map_block()
1077 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { in __btrfs_map_block()
1079 multi->num_stripes = map->num_stripes; in __btrfs_map_block()
1082 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | in __btrfs_map_block()
1089 u64 full_stripe_len = nr_data_stripes(map) * map->stripe_len; in __btrfs_map_block()
1099 stripe_nr = raid56_full_stripe_start / map->stripe_len; in __btrfs_map_block()
1100 stripe_nr = stripe_nr / nr_data_stripes(map); in __btrfs_map_block()
1103 rot = stripe_nr % map->num_stripes; in __btrfs_map_block()
1106 tmp = (u64)stripe_nr * nr_data_stripes(map); in __btrfs_map_block()
1108 for (i = 0; i < nr_data_stripes(map); i++) in __btrfs_map_block()
1109 raid_map[(i+rot) % map->num_stripes] = in __btrfs_map_block()
1110 ce->start + (tmp + i) * map->stripe_len; in __btrfs_map_block()
1112 raid_map[(i+rot) % map->num_stripes] = BTRFS_RAID5_P_STRIPE; in __btrfs_map_block()
1113 if (map->type & BTRFS_BLOCK_GROUP_RAID6) in __btrfs_map_block()
1114 raid_map[(i+rot+1) % map->num_stripes] = BTRFS_RAID6_Q_STRIPE; in __btrfs_map_block()
1116 *length = map->stripe_len; in __btrfs_map_block()
1119 multi->num_stripes = map->num_stripes; in __btrfs_map_block()
1121 stripe_index = stripe_nr % nr_data_stripes(map); in __btrfs_map_block()
1122 stripe_nr = stripe_nr / nr_data_stripes(map); in __btrfs_map_block()
1130 stripe_index = nr_data_stripes(map) + mirror_num - 2; in __btrfs_map_block()
1133 stripe_index = (stripe_nr + stripe_index) % map->num_stripes; in __btrfs_map_block()
1141 stripe_index = stripe_nr % map->num_stripes; in __btrfs_map_block()
1142 stripe_nr = stripe_nr / map->num_stripes; in __btrfs_map_block()
1144 BUG_ON(stripe_index >= map->num_stripes); in __btrfs_map_block()
1148 map->stripes[stripe_index].physical + stripe_offset + in __btrfs_map_block()
1149 stripe_nr * map->stripe_len; in __btrfs_map_block()
1150 multi->stripes[i].dev = map->stripes[stripe_index].dev; in __btrfs_map_block()
1156 *type = map->type; in __btrfs_map_block()