| /linux/drivers/md/bcache/ |
| A D | request.c | 114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 116 while (bio_sectors(bio)) { in bch_data_invalidate() 117 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate() 195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) in bch_data_insert_start() 222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), in bch_data_insert_start() 398 bio_sectors(bio) & (c->cache->sb.block_size - 1)) { in check_should_bypass() 454 bch_rescale_priorities(c, bio_sectors(bio)); in check_should_bypass() 457 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); in check_should_bypass() 525 unsigned int bio_sectors = bio_sectors(bio); in cache_lookup_fn() local 536 BUG_ON(bio_sectors <= sectors); in cache_lookup_fn() [all …]
|
| A D | writeback.h | 117 bio_sectors(bio))) in should_writeback()
|
| /linux/drivers/md/ |
| A D | dm-zone.c | 135 return !op_is_flush(bio->bi_opf) && bio_sectors(bio); in dm_is_zone_write() 458 if (nr_sectors != bio_sectors(orig_bio)) { in dm_zone_map_bio_end() 502 if (op_is_flush(orig_bio->bi_opf) && !bio_sectors(orig_bio)) in dm_need_zone_wp_tracking() 651 if (WARN_ON_ONCE(zwp_offset < bio_sectors(orig_bio))) in dm_zone_endio() 656 zwp_offset - bio_sectors(orig_bio); in dm_zone_endio()
|
| A D | dm-log-writes.c | 692 if (!bio_sectors(bio) && !flush_bio) in log_writes_map() 726 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); in log_writes_map() 738 if (flush_bio && !bio_sectors(bio)) { in log_writes_map()
|
| A D | dm-linear.c | 93 if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio))) in linear_map_bio()
|
| A D | dm-zoned.h | 46 #define dmz_bio_blocks(bio) dmz_sect2blk(bio_sectors(bio))
|
| A D | dm-delay.c | 298 if (bio_sectors(bio)) in delay_map()
|
| A D | raid10.c | 1176 if (max_sectors < bio_sectors(bio)) { in raid10_read_request() 1479 if (r10_bio->sectors < bio_sectors(bio)) { in raid10_write_request() 1638 if (bio_sectors(bio) < stripe_size*2) in raid10_handle_discard() 1656 split_size = bio_sectors(bio) - remainder; in raid10_handle_discard() 1836 int sectors = bio_sectors(bio); in raid10_make_request() 2410 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); in sync_request_write() 2434 bio_sectors(tbio)); in sync_request_write() 2565 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); in recovery_request_write() 2571 bio_sectors(wbio2)); in recovery_request_write()
|
| A D | dm-flakey.c | 283 if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio))) in flakey_map_bio()
|
| A D | dm-crypt.c | 1144 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size) in dm_crypt_integrity_io_alloc() 1151 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); in dm_crypt_integrity_io_alloc() 2072 sector += bio_sectors(clone); in kcryptd_crypt_write_convert() 3399 if (bio_sectors(bio)) in crypt_map() 3426 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift); in crypt_map() 3431 if (bio_sectors(bio) > cc->tag_pool_max_sectors) in crypt_map()
|
| A D | dm-ebs-target.c | 49 sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio); in __nr_blocks()
|
| A D | dm.c | 495 bio->bi_iter.bi_sector, bio_sectors(bio), in start_io_acct() 508 bio->bi_iter.bi_sector, bio_sectors(bio), in end_io_acct() 1536 ci.sector_count = bio_sectors(bio); in __split_and_process_bio() 1547 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, in __split_and_process_bio()
|
| A D | raid1.c | 1183 r1_bio->sectors = bio_sectors(bio); in init_r1bio() 1283 if (max_sectors < bio_sectors(bio)) { in raid1_read_request() 1476 if (max_sectors < bio_sectors(bio)) { in raid1_write_request() 1590 bio->bi_iter.bi_sector, bio_sectors(bio)); in raid1_make_request() 2236 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); in sync_request_write()
|
| A D | dm-integrity.c | 1620 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { in dec_in_flight() 1869 sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len; in dm_integrity_map() 1891 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { in dm_integrity_map() 1893 dio->range.logical_sector, bio_sectors(bio), in dm_integrity_map() 1897 …if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1… in dm_integrity_map() 1900 dio->range.logical_sector, bio_sectors(bio)); in dm_integrity_map() 1919 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; in dm_integrity_map() 2127 dio->range.n_sectors = bio_sectors(bio); in dm_integrity_map_continue()
|
| A D | raid0.c | 541 if (sectors < bio_sectors(bio)) { in raid0_make_request()
|
| /linux/block/ |
| A D | blk-merge.c | 119 if (bio_sectors(bio) <= max_discard_sectors) in blk_bio_discard_split() 147 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) in blk_bio_write_zeroes_split() 163 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) in blk_bio_write_same_split() 632 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_back_merge_fn() 651 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_front_merge_fn() 667 if (blk_rq_sectors(req) + bio_sectors(next->bio) > in req_attempt_discard_merge() 942 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge() 1020 if (blk_rq_sectors(req) + bio_sectors(bio) > in bio_attempt_discard_merge()
|
| A D | bio-integrity.c | 215 if (!bio_sectors(bio)) in bio_integrity_prep() 231 intervals = bio_integrity_intervals(bi, bio_sectors(bio)); in bio_integrity_prep() 391 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); in bio_integrity_trim()
|
| A D | blk-core.c | 627 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in bio_check_ro() 656 unsigned int nr_sectors = bio_sectors(bio); in bio_check_eod() 676 if (bio_sectors(bio)) { in blk_partition_remap() 693 int nr_sectors = bio_sectors(bio); in blk_check_zone_append() 759 if (!bio_sectors(bio)) { in submit_bio_checks() 987 count = bio_sectors(bio); in submit_bio() 1284 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio)); in bio_start_io_acct()
|
| A D | bounce.c | 229 if (sectors < bio_sectors(*bio_orig)) { in __blk_queue_bounce()
|
| A D | blk-crypto-fallback.c | 223 if (num_sectors < bio_sectors(bio)) { in blk_crypto_fallback_split_bio_if_needed()
|
| /linux/include/trace/events/ |
| A D | block.h | 250 __entry->nr_sector = bio_sectors(bio); 278 __entry->nr_sector = bio_sectors(bio); 474 __entry->nr_sector = bio_sectors(bio);
|
| /linux/include/linux/ |
| A D | bio.h | 40 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) macro 341 if (sectors >= bio_sectors(bio)) in bio_next_split()
|
| A D | blk-cgroup.h | 531 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); in blkcg_bio_issue_init()
|
| /linux/drivers/nvme/target/ |
| A D | io-cmd-bdev.c | 201 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); in nvmet_bdev_alloc_bip()
|
| /linux/fs/ext4/ |
| A D | page-io.c | 331 (unsigned) bio_sectors(bio), in ext4_end_bio()
|