Lines Matching refs:bio
18 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_first_bvec() argument
20 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in bio_get_first_bvec()
23 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_last_bvec() argument
25 struct bvec_iter iter = bio->bi_iter; in bio_get_last_bvec()
28 bio_get_first_bvec(bio, bv); in bio_get_last_bvec()
29 if (bv->bv_len == bio->bi_iter.bi_size) in bio_get_last_bvec()
32 bio_advance_iter(bio, &iter, iter.bi_size); in bio_get_last_bvec()
39 *bv = bio->bi_io_vec[idx]; in bio_get_last_bvec()
50 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap()
63 bio_get_first_bvec(prev_rq->bio, &pb); in bio_will_gap()
85 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) in req_gap_back_merge() argument
87 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge()
90 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) in req_gap_front_merge() argument
92 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge()
95 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split()
96 struct bio *bio, in blk_bio_discard_split() argument
119 if (bio_sectors(bio) <= max_discard_sectors) in blk_bio_discard_split()
130 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; in blk_bio_discard_split()
136 return bio_split(bio, split_sectors, GFP_NOIO, bs); in blk_bio_discard_split()
139 static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, in blk_bio_write_zeroes_split()
140 struct bio *bio, struct bio_set *bs, unsigned *nsegs) in blk_bio_write_zeroes_split() argument
147 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) in blk_bio_write_zeroes_split()
150 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); in blk_bio_write_zeroes_split()
153 static struct bio *blk_bio_write_same_split(struct request_queue *q, in blk_bio_write_same_split()
154 struct bio *bio, in blk_bio_write_same_split() argument
163 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) in blk_bio_write_same_split()
166 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); in blk_bio_write_same_split()
178 struct bio *bio) in get_max_io_size() argument
180 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0); in get_max_io_size()
184 unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1); in get_max_io_size()
278 static struct bio *blk_bio_segment_split(struct request_queue *q, in blk_bio_segment_split()
279 struct bio *bio, in blk_bio_segment_split() argument
286 const unsigned max_sectors = get_max_io_size(q, bio); in blk_bio_segment_split()
289 bio_for_each_bvec(bv, bio, iter) { in blk_bio_segment_split()
321 bio_clear_polled(bio); in blk_bio_segment_split()
322 return bio_split(bio, sectors, GFP_NOIO, bs); in blk_bio_segment_split()
338 void __blk_queue_split(struct request_queue *q, struct bio **bio, in __blk_queue_split() argument
341 struct bio *split = NULL; in __blk_queue_split()
343 switch (bio_op(*bio)) { in __blk_queue_split()
346 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); in __blk_queue_split()
349 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, in __blk_queue_split()
353 split = blk_bio_write_same_split(q, *bio, &q->bio_split, in __blk_queue_split()
357 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); in __blk_queue_split()
365 bio_chain(split, *bio); in __blk_queue_split()
366 trace_block_split(split, (*bio)->bi_iter.bi_sector); in __blk_queue_split()
367 submit_bio_noacct(*bio); in __blk_queue_split()
368 *bio = split; in __blk_queue_split()
370 blk_throtl_charge_bio_split(*bio); in __blk_queue_split()
384 void blk_queue_split(struct bio **bio) in blk_queue_split() argument
386 struct request_queue *q = bdev_get_queue((*bio)->bi_bdev); in blk_queue_split()
389 if (blk_may_split(q, *bio)) in blk_queue_split()
390 __blk_queue_split(q, bio, &nr_segs); in blk_queue_split()
401 if (!rq->bio) in blk_recalc_rq_segments()
404 switch (bio_op(rq->bio)) { in blk_recalc_rq_segments()
408 struct bio *bio = rq->bio; in blk_recalc_rq_segments() local
410 for_each_bio(bio) in blk_recalc_rq_segments()
509 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, in __blk_bios_map_sg() argument
518 for_each_bio(bio) { in __blk_bios_map_sg()
519 bio_for_each_bvec(bvec, bio, iter) { in __blk_bios_map_sg()
536 if (likely(bio->bi_iter.bi_size)) { in __blk_bios_map_sg()
556 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) in __blk_rq_map_sg()
557 nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg); in __blk_rq_map_sg()
558 else if (rq->bio) in __blk_rq_map_sg()
559 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); in __blk_rq_map_sg()
598 static inline int ll_new_hw_segment(struct request *req, struct bio *bio, in ll_new_hw_segment() argument
601 if (blk_integrity_merge_bio(req->q, req, bio) == false) in ll_new_hw_segment()
623 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) in ll_back_merge_fn() argument
625 if (req_gap_back_merge(req, bio)) in ll_back_merge_fn()
628 integrity_req_gap_back_merge(req, bio)) in ll_back_merge_fn()
630 if (!bio_crypt_ctx_back_mergeable(req, bio)) in ll_back_merge_fn()
632 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_back_merge_fn()
638 return ll_new_hw_segment(req, bio, nr_segs); in ll_back_merge_fn()
641 static int ll_front_merge_fn(struct request *req, struct bio *bio, in ll_front_merge_fn() argument
644 if (req_gap_front_merge(req, bio)) in ll_front_merge_fn()
647 integrity_req_gap_front_merge(req, bio)) in ll_front_merge_fn()
649 if (!bio_crypt_ctx_front_mergeable(req, bio)) in ll_front_merge_fn()
651 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_front_merge_fn()
652 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { in ll_front_merge_fn()
657 return ll_new_hw_segment(req, bio, nr_segs); in ll_front_merge_fn()
667 if (blk_rq_sectors(req) + bio_sectors(next->bio) > in req_attempt_discard_merge()
683 if (req_gap_back_merge(req, next->bio)) in ll_merge_requests_fn()
720 struct bio *bio; in blk_rq_set_mixed_merge() local
730 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_set_mixed_merge()
731 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && in blk_rq_set_mixed_merge()
732 (bio->bi_opf & REQ_FAILFAST_MASK) != ff); in blk_rq_set_mixed_merge()
733 bio->bi_opf |= ff; in blk_rq_set_mixed_merge()
758 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) in blk_write_same_mergeable()
783 !blk_write_same_mergeable(req->bio, next->bio)) in attempt_merge()
838 req->biotail->bi_next = next->bio; in attempt_merge()
857 next->bio = NULL; in attempt_merge()
894 bool blk_rq_merge_ok(struct request *rq, struct bio *bio) in blk_rq_merge_ok() argument
896 if (!rq_mergeable(rq) || !bio_mergeable(bio)) in blk_rq_merge_ok()
899 if (req_op(rq) != bio_op(bio)) in blk_rq_merge_ok()
903 if (bio_data_dir(bio) != rq_data_dir(rq)) in blk_rq_merge_ok()
907 if (rq->rq_disk != bio->bi_bdev->bd_disk) in blk_rq_merge_ok()
911 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) in blk_rq_merge_ok()
915 if (!bio_crypt_rq_ctx_compatible(rq, bio)) in blk_rq_merge_ok()
920 !blk_write_same_mergeable(rq->bio, bio)) in blk_rq_merge_ok()
927 if (rq->write_hint != bio->bi_write_hint) in blk_rq_merge_ok()
930 if (rq->ioprio != bio_prio(bio)) in blk_rq_merge_ok()
936 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) in blk_try_merge() argument
940 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge()
942 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge()
964 struct bio *bio, unsigned int nr_segs) in bio_attempt_back_merge() argument
966 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; in bio_attempt_back_merge()
968 if (!ll_back_merge_fn(req, bio, nr_segs)) in bio_attempt_back_merge()
971 trace_block_bio_backmerge(bio); in bio_attempt_back_merge()
972 rq_qos_merge(req->q, req, bio); in bio_attempt_back_merge()
977 req->biotail->bi_next = bio; in bio_attempt_back_merge()
978 req->biotail = bio; in bio_attempt_back_merge()
979 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_back_merge()
981 bio_crypt_free_ctx(bio); in bio_attempt_back_merge()
988 struct bio *bio, unsigned int nr_segs) in bio_attempt_front_merge() argument
990 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; in bio_attempt_front_merge()
992 if (!ll_front_merge_fn(req, bio, nr_segs)) in bio_attempt_front_merge()
995 trace_block_bio_frontmerge(bio); in bio_attempt_front_merge()
996 rq_qos_merge(req->q, req, bio); in bio_attempt_front_merge()
1001 bio->bi_next = req->bio; in bio_attempt_front_merge()
1002 req->bio = bio; in bio_attempt_front_merge()
1004 req->__sector = bio->bi_iter.bi_sector; in bio_attempt_front_merge()
1005 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_front_merge()
1007 bio_crypt_do_front_merge(req, bio); in bio_attempt_front_merge()
1014 struct request *req, struct bio *bio) in bio_attempt_discard_merge() argument
1020 if (blk_rq_sectors(req) + bio_sectors(bio) > in bio_attempt_discard_merge()
1024 rq_qos_merge(q, req, bio); in bio_attempt_discard_merge()
1026 req->biotail->bi_next = bio; in bio_attempt_discard_merge()
1027 req->biotail = bio; in bio_attempt_discard_merge()
1028 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_discard_merge()
1040 struct bio *bio, in blk_attempt_bio_merge() argument
1044 if (!blk_rq_merge_ok(rq, bio)) in blk_attempt_bio_merge()
1047 switch (blk_try_merge(rq, bio)) { in blk_attempt_bio_merge()
1049 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1050 return bio_attempt_back_merge(rq, bio, nr_segs); in blk_attempt_bio_merge()
1053 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1054 return bio_attempt_front_merge(rq, bio, nr_segs); in blk_attempt_bio_merge()
1057 return bio_attempt_discard_merge(q, rq, bio); in blk_attempt_bio_merge()
1086 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, in blk_attempt_plug_merge() argument
1092 plug = blk_mq_plug(q, bio); in blk_attempt_plug_merge()
1105 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == in blk_attempt_plug_merge()
1117 struct bio *bio, unsigned int nr_segs) in blk_bio_list_merge() argument
1126 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) { in blk_bio_list_merge()
1141 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_try_merge() argument
1146 switch (elv_merge(q, &rq, bio)) { in blk_mq_sched_try_merge()
1148 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1150 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK) in blk_mq_sched_try_merge()
1157 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1159 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK) in blk_mq_sched_try_merge()
1166 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK; in blk_mq_sched_try_merge()