1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_INTERNAL_H
3 #define BLK_INTERNAL_H
4
5 #include <linux/idr.h>
6 #include <linux/blk-mq.h>
7 #include <linux/part_stat.h>
8 #include <linux/blk-crypto.h>
9 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
10 #include <xen/xen.h>
11 #include "blk-crypto-internal.h"
12 #include "blk-mq.h"
13 #include "blk-mq-sched.h"
14
15 struct elevator_type;
16
17 /* Max future timer expiry for timeouts */
18 #define BLK_MAX_TIMEOUT (5 * HZ)
19
20 extern struct dentry *blk_debugfs_root;
21
22 struct blk_flush_queue {
23 unsigned int flush_pending_idx:1;
24 unsigned int flush_running_idx:1;
25 blk_status_t rq_status;
26 unsigned long flush_pending_since;
27 struct list_head flush_queue[2];
28 struct list_head flush_data_in_flight;
29 struct request *flush_rq;
30
31 spinlock_t mq_flush_lock;
32 };
33
34 extern struct kmem_cache *blk_requestq_cachep;
35 extern struct kobj_type blk_queue_ktype;
36 extern struct ida blk_queue_ida;
37
38 static inline struct blk_flush_queue *
blk_get_flush_queue(struct request_queue * q,struct blk_mq_ctx * ctx)39 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
40 {
41 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
42 }
43
__blk_get_queue(struct request_queue * q)44 static inline void __blk_get_queue(struct request_queue *q)
45 {
46 kobject_get(&q->kobj);
47 }
48
49 bool is_flush_rq(struct request *req);
50
51 struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
52 gfp_t flags);
53 void blk_free_flush_queue(struct blk_flush_queue *q);
54
55 void blk_freeze_queue(struct request_queue *q);
56 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
57 void blk_queue_start_drain(struct request_queue *q);
58 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
59 bool submit_bio_checks(struct bio *bio);
60
blk_try_enter_queue(struct request_queue * q,bool pm)61 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
62 {
63 rcu_read_lock();
64 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
65 goto fail;
66
67 /*
68 * The code that increments the pm_only counter must ensure that the
69 * counter is globally visible before the queue is unfrozen.
70 */
71 if (blk_queue_pm_only(q) &&
72 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
73 goto fail_put;
74
75 rcu_read_unlock();
76 return true;
77
78 fail_put:
79 blk_queue_exit(q);
80 fail:
81 rcu_read_unlock();
82 return false;
83 }
84
bio_queue_enter(struct bio * bio)85 static inline int bio_queue_enter(struct bio *bio)
86 {
87 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
88
89 if (blk_try_enter_queue(q, false))
90 return 0;
91 return __bio_queue_enter(q, bio);
92 }
93
94 #define BIO_INLINE_VECS 4
95 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
96 gfp_t gfp_mask);
97 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
98
biovec_phys_mergeable(struct request_queue * q,struct bio_vec * vec1,struct bio_vec * vec2)99 static inline bool biovec_phys_mergeable(struct request_queue *q,
100 struct bio_vec *vec1, struct bio_vec *vec2)
101 {
102 unsigned long mask = queue_segment_boundary(q);
103 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
104 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
105
106 if (addr1 + vec1->bv_len != addr2)
107 return false;
108 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
109 return false;
110 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
111 return false;
112 return true;
113 }
114
__bvec_gap_to_prev(struct request_queue * q,struct bio_vec * bprv,unsigned int offset)115 static inline bool __bvec_gap_to_prev(struct request_queue *q,
116 struct bio_vec *bprv, unsigned int offset)
117 {
118 return (offset & queue_virt_boundary(q)) ||
119 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
120 }
121
122 /*
123 * Check if adding a bio_vec after bprv with offset would create a gap in
124 * the SG list. Most drivers don't care about this, but some do.
125 */
bvec_gap_to_prev(struct request_queue * q,struct bio_vec * bprv,unsigned int offset)126 static inline bool bvec_gap_to_prev(struct request_queue *q,
127 struct bio_vec *bprv, unsigned int offset)
128 {
129 if (!queue_virt_boundary(q))
130 return false;
131 return __bvec_gap_to_prev(q, bprv, offset);
132 }
133
rq_mergeable(struct request * rq)134 static inline bool rq_mergeable(struct request *rq)
135 {
136 if (blk_rq_is_passthrough(rq))
137 return false;
138
139 if (req_op(rq) == REQ_OP_FLUSH)
140 return false;
141
142 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
143 return false;
144
145 if (req_op(rq) == REQ_OP_ZONE_APPEND)
146 return false;
147
148 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
149 return false;
150 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
151 return false;
152
153 return true;
154 }
155
156 /*
157 * There are two different ways to handle DISCARD merges:
158 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
159 * send the bios to controller together. The ranges don't need to be
160 * contiguous.
161 * 2) Otherwise, the request will be normal read/write requests. The ranges
162 * need to be contiguous.
163 */
blk_discard_mergable(struct request * req)164 static inline bool blk_discard_mergable(struct request *req)
165 {
166 if (req_op(req) == REQ_OP_DISCARD &&
167 queue_max_discard_segments(req->q) > 1)
168 return true;
169 return false;
170 }
171
172 #ifdef CONFIG_BLK_DEV_INTEGRITY
173 void blk_flush_integrity(void);
174 bool __bio_integrity_endio(struct bio *);
175 void bio_integrity_free(struct bio *bio);
bio_integrity_endio(struct bio * bio)176 static inline bool bio_integrity_endio(struct bio *bio)
177 {
178 if (bio_integrity(bio))
179 return __bio_integrity_endio(bio);
180 return true;
181 }
182
183 bool blk_integrity_merge_rq(struct request_queue *, struct request *,
184 struct request *);
185 bool blk_integrity_merge_bio(struct request_queue *, struct request *,
186 struct bio *);
187
integrity_req_gap_back_merge(struct request * req,struct bio * next)188 static inline bool integrity_req_gap_back_merge(struct request *req,
189 struct bio *next)
190 {
191 struct bio_integrity_payload *bip = bio_integrity(req->bio);
192 struct bio_integrity_payload *bip_next = bio_integrity(next);
193
194 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
195 bip_next->bip_vec[0].bv_offset);
196 }
197
integrity_req_gap_front_merge(struct request * req,struct bio * bio)198 static inline bool integrity_req_gap_front_merge(struct request *req,
199 struct bio *bio)
200 {
201 struct bio_integrity_payload *bip = bio_integrity(bio);
202 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
203
204 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
205 bip_next->bip_vec[0].bv_offset);
206 }
207
208 int blk_integrity_add(struct gendisk *disk);
209 void blk_integrity_del(struct gendisk *);
210 #else /* CONFIG_BLK_DEV_INTEGRITY */
blk_integrity_merge_rq(struct request_queue * rq,struct request * r1,struct request * r2)211 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
212 struct request *r1, struct request *r2)
213 {
214 return true;
215 }
blk_integrity_merge_bio(struct request_queue * rq,struct request * r,struct bio * b)216 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
217 struct request *r, struct bio *b)
218 {
219 return true;
220 }
integrity_req_gap_back_merge(struct request * req,struct bio * next)221 static inline bool integrity_req_gap_back_merge(struct request *req,
222 struct bio *next)
223 {
224 return false;
225 }
integrity_req_gap_front_merge(struct request * req,struct bio * bio)226 static inline bool integrity_req_gap_front_merge(struct request *req,
227 struct bio *bio)
228 {
229 return false;
230 }
231
blk_flush_integrity(void)232 static inline void blk_flush_integrity(void)
233 {
234 }
bio_integrity_endio(struct bio * bio)235 static inline bool bio_integrity_endio(struct bio *bio)
236 {
237 return true;
238 }
bio_integrity_free(struct bio * bio)239 static inline void bio_integrity_free(struct bio *bio)
240 {
241 }
blk_integrity_add(struct gendisk * disk)242 static inline int blk_integrity_add(struct gendisk *disk)
243 {
244 return 0;
245 }
blk_integrity_del(struct gendisk * disk)246 static inline void blk_integrity_del(struct gendisk *disk)
247 {
248 }
249 #endif /* CONFIG_BLK_DEV_INTEGRITY */
250
251 unsigned long blk_rq_timeout(unsigned long timeout);
252 void blk_add_timer(struct request *req);
253 void blk_print_req_error(struct request *req, blk_status_t status);
254
255 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
256 unsigned int nr_segs, bool *same_queue_rq);
257 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
258 struct bio *bio, unsigned int nr_segs);
259
260 void __blk_account_io_start(struct request *req);
261 void __blk_account_io_done(struct request *req, u64 now);
262
263 /*
264 * Plug flush limits
265 */
266 #define BLK_MAX_REQUEST_COUNT 32
267 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
268
269 /*
270 * Internal elevator interface
271 */
272 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
273
274 void blk_insert_flush(struct request *rq);
275
276 int elevator_switch_mq(struct request_queue *q,
277 struct elevator_type *new_e);
278 void __elevator_exit(struct request_queue *, struct elevator_queue *);
279 int elv_register_queue(struct request_queue *q, bool uevent);
280 void elv_unregister_queue(struct request_queue *q);
281
elevator_exit(struct request_queue * q,struct elevator_queue * e)282 static inline void elevator_exit(struct request_queue *q,
283 struct elevator_queue *e)
284 {
285 lockdep_assert_held(&q->sysfs_lock);
286
287 blk_mq_sched_free_rqs(q);
288 __elevator_exit(q, e);
289 }
290
291 ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
292 char *buf);
293 ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
294 char *buf);
295 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
296 char *buf);
297 ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
298 char *buf);
299 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
300 const char *buf, size_t count);
301 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
302 ssize_t part_timeout_store(struct device *, struct device_attribute *,
303 const char *, size_t);
304
blk_may_split(struct request_queue * q,struct bio * bio)305 static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
306 {
307 switch (bio_op(bio)) {
308 case REQ_OP_DISCARD:
309 case REQ_OP_SECURE_ERASE:
310 case REQ_OP_WRITE_ZEROES:
311 case REQ_OP_WRITE_SAME:
312 return true; /* non-trivial splitting decisions */
313 default:
314 break;
315 }
316
317 /*
318 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
319 * This is a quick and dirty check that relies on the fact that
320 * bi_io_vec[0] is always valid if a bio has data. The check might
321 * lead to occasional false negatives when bios are cloned, but compared
322 * to the performance impact of cloned bios themselves the loop below
323 * doesn't matter anyway.
324 */
325 return q->limits.chunk_sectors || bio->bi_vcnt != 1 ||
326 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
327 }
328
329 void __blk_queue_split(struct request_queue *q, struct bio **bio,
330 unsigned int *nr_segs);
331 int ll_back_merge_fn(struct request *req, struct bio *bio,
332 unsigned int nr_segs);
333 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
334 struct request *next);
335 unsigned int blk_recalc_rq_segments(struct request *rq);
336 void blk_rq_set_mixed_merge(struct request *rq);
337 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
338 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
339
340 int blk_dev_init(void);
341
342 /*
343 * Contribute to IO statistics IFF:
344 *
345 * a) it's attached to a gendisk, and
346 * b) the queue had IO stats enabled when this request was started
347 */
blk_do_io_stat(struct request * rq)348 static inline bool blk_do_io_stat(struct request *rq)
349 {
350 return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk;
351 }
352
blk_account_io_done(struct request * req,u64 now)353 static inline void blk_account_io_done(struct request *req, u64 now)
354 {
355 /*
356 * Account IO completion. flush_rq isn't accounted as a
357 * normal IO on queueing nor completion. Accounting the
358 * containing request is enough.
359 */
360 if (blk_do_io_stat(req) && req->part &&
361 !(req->rq_flags & RQF_FLUSH_SEQ))
362 __blk_account_io_done(req, now);
363 }
364
blk_account_io_start(struct request * req)365 static inline void blk_account_io_start(struct request *req)
366 {
367 if (blk_do_io_stat(req))
368 __blk_account_io_start(req);
369 }
370
req_set_nomerge(struct request_queue * q,struct request * req)371 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
372 {
373 req->cmd_flags |= REQ_NOMERGE;
374 if (req == q->last_merge)
375 q->last_merge = NULL;
376 }
377
378 /*
379 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
380 * is defined as 'unsigned int', meantime it has to aligned to with logical
381 * block size which is the minimum accepted unit by hardware.
382 */
bio_allowed_max_sectors(struct request_queue * q)383 static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
384 {
385 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
386 }
387
388 /*
389 * The max bio size which is aligned to q->limits.discard_granularity. This
390 * is a hint to split large discard bio in generic block layer, then if device
391 * driver needs to split the discard bio into smaller ones, their bi_size can
392 * be very probably and easily aligned to discard_granularity of the device's
393 * queue.
394 */
bio_aligned_discard_max_sectors(struct request_queue * q)395 static inline unsigned int bio_aligned_discard_max_sectors(
396 struct request_queue *q)
397 {
398 return round_down(UINT_MAX, q->limits.discard_granularity) >>
399 SECTOR_SHIFT;
400 }
401
402 /*
403 * Internal io_context interface
404 */
405 void get_io_context(struct io_context *ioc);
406 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
407 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
408 gfp_t gfp_mask);
409 void ioc_clear_queue(struct request_queue *q);
410
411 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
412
413 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
414 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
415 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
416 const char *page, size_t count);
417 extern void blk_throtl_bio_endio(struct bio *bio);
418 extern void blk_throtl_stat_add(struct request *rq, u64 time);
419 #else
blk_throtl_bio_endio(struct bio * bio)420 static inline void blk_throtl_bio_endio(struct bio *bio) { }
blk_throtl_stat_add(struct request * rq,u64 time)421 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
422 #endif
423
424 void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
425
blk_queue_may_bounce(struct request_queue * q)426 static inline bool blk_queue_may_bounce(struct request_queue *q)
427 {
428 return IS_ENABLED(CONFIG_BOUNCE) &&
429 q->limits.bounce == BLK_BOUNCE_HIGH &&
430 max_low_pfn >= max_pfn;
431 }
432
blk_queue_bounce(struct request_queue * q,struct bio ** bio)433 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
434 {
435 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
436 __blk_queue_bounce(q, bio);
437 }
438
439 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
440 extern int blk_iolatency_init(struct request_queue *q);
441 #else
blk_iolatency_init(struct request_queue * q)442 static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
443 #endif
444
445 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
446
447 #ifdef CONFIG_BLK_DEV_ZONED
448 void blk_queue_free_zone_bitmaps(struct request_queue *q);
449 void blk_queue_clear_zone_settings(struct request_queue *q);
450 #else
blk_queue_free_zone_bitmaps(struct request_queue * q)451 static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
blk_queue_clear_zone_settings(struct request_queue * q)452 static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
453 #endif
454
455 int blk_alloc_ext_minor(void);
456 void blk_free_ext_minor(unsigned int minor);
457 #define ADDPART_FLAG_NONE 0
458 #define ADDPART_FLAG_RAID 1
459 #define ADDPART_FLAG_WHOLEDISK 2
460 int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
461 sector_t length);
462 int bdev_del_partition(struct gendisk *disk, int partno);
463 int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
464 sector_t length);
465
466 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
467 struct page *page, unsigned int len, unsigned int offset,
468 unsigned int max_sectors, bool *same_page);
469
470 struct request_queue *blk_alloc_queue(int node_id);
471
472 int disk_alloc_events(struct gendisk *disk);
473 void disk_add_events(struct gendisk *disk);
474 void disk_del_events(struct gendisk *disk);
475 void disk_release_events(struct gendisk *disk);
476 extern struct device_attribute dev_attr_events;
477 extern struct device_attribute dev_attr_events_async;
478 extern struct device_attribute dev_attr_events_poll_msecs;
479
bio_clear_polled(struct bio * bio)480 static inline void bio_clear_polled(struct bio *bio)
481 {
482 /* can't support alloc cache if we turn off polling */
483 bio_clear_flag(bio, BIO_PERCPU_CACHE);
484 bio->bi_opf &= ~REQ_POLLED;
485 }
486
487 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
488 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
489
490 extern const struct address_space_operations def_blk_aops;
491
492 int disk_register_independent_access_ranges(struct gendisk *disk,
493 struct blk_independent_access_ranges *new_iars);
494 void disk_unregister_independent_access_ranges(struct gendisk *disk);
495
496 #endif /* BLK_INTERNAL_H */
497