/linux/drivers/block/rnbd/ |
A D | rnbd-srv-dev.h | 51 return queue_max_segments(bdev_get_queue(dev->bdev)); in rnbd_dev_get_max_segs() 56 return queue_max_hw_sectors(bdev_get_queue(dev->bdev)); in rnbd_dev_get_max_hw_sects() 61 return blk_queue_secure_erase(bdev_get_queue(dev->bdev)); in rnbd_dev_get_secure_discard() 66 if (!blk_queue_discard(bdev_get_queue(dev->bdev))) in rnbd_dev_get_max_discard_sects() 69 return blk_queue_get_max_sectors(bdev_get_queue(dev->bdev), in rnbd_dev_get_max_discard_sects() 75 return bdev_get_queue(dev->bdev)->limits.discard_granularity; in rnbd_dev_get_discard_granularity() 80 return bdev_get_queue(dev->bdev)->limits.discard_alignment; in rnbd_dev_get_discard_alignment()
|
/linux/include/linux/ |
A D | blkdev.h | 971 return queue_io_min(bdev_get_queue(bdev)); in bdev_io_min() 981 return queue_io_opt(bdev_get_queue(bdev)); in bdev_io_opt() 1015 struct request_queue *q = bdev_get_queue(bdev); in bdev_alignment_offset() 1058 struct request_queue *q = bdev_get_queue(bdev); in bdev_discard_alignment() 1068 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_same() 1078 struct request_queue *q = bdev_get_queue(bdev); in bdev_write_zeroes_sectors() 1088 struct request_queue *q = bdev_get_queue(bdev); in bdev_zoned_model() 1098 struct request_queue *q = bdev_get_queue(bdev); in bdev_is_zoned() 1108 struct request_queue *q = bdev_get_queue(bdev); in bdev_zone_sectors() 1117 struct request_queue *q = bdev_get_queue(bdev); in bdev_max_open_zones() [all …]
|
/linux/block/ |
A D | blk-lib.c | 30 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_discard() 170 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_write_same() 253 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_write_zeroes() 307 struct request_queue *q = bdev_get_queue(bdev); in __blkdev_issue_zero_pages()
|
A D | blk-zoned.c | 153 if (!blk_queue_is_zoned(bdev_get_queue(bdev)) || in blkdev_report_zones() 193 struct request_queue *q = bdev_get_queue(bdev); in blkdev_zone_reset_all_emulated() 269 struct request_queue *q = bdev_get_queue(bdev); in blkdev_zone_mgmt() 356 q = bdev_get_queue(bdev); in blkdev_report_zones_ioctl() 414 q = bdev_get_queue(bdev); in blkdev_zone_mgmt_ioctl()
|
A D | bdev.c | 331 result = blk_queue_enter(bdev_get_queue(bdev), 0); in bdev_read_page() 336 blk_queue_exit(bdev_get_queue(bdev)); in bdev_read_page() 367 result = blk_queue_enter(bdev_get_queue(bdev), 0); in bdev_write_page() 380 blk_queue_exit(bdev_get_queue(bdev)); in bdev_write_page()
|
A D | blk-core.c | 724 struct request_queue *q = bdev_get_queue(bdev); in submit_bio_checks() 885 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in __submit_bio_noacct() 903 if (q == bdev_get_queue(bio->bi_bdev)) in __submit_bio_noacct() 985 bdev_get_queue(bio->bi_bdev)) >> 9; in submit_bio() 1031 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_poll()
|
A D | ioctl.c | 115 struct request_queue *q = bdev_get_queue(bdev); in blk_ioctl_discard() 514 queue_max_sectors(bdev_get_queue(bdev))); in blkdev_common_ioctl() 517 return put_ushort(argp, !blk_queue_nonrot(bdev_get_queue(bdev))); in blkdev_common_ioctl()
|
A D | bio.c | 968 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_add_zone_append_page() 1056 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_iov_bvec_set() 1139 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in __bio_iov_append_get_pages() 1465 rq_qos_done_bio(bdev_get_queue(bio->bi_bdev), bio); in bio_endio() 1468 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); in bio_endio()
|
A D | blk-cgroup.c | 635 q = bdev_get_queue(bdev); in blkg_conf_prep() 753 spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); in blkg_conf_finish() 856 blk_queue_root_blkg(bdev_get_queue(bdev)); in blkcg_fill_root_iostats() 1815 bdev_get_queue(bio->bi_bdev)); in blkg_tryget_closest() 1851 blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); in bio_associate_blkg_from_css() 1852 bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; in bio_associate_blkg_from_css()
|
A D | blk-crypto.c | 285 profile = bdev_get_queue(bio->bi_bdev)->crypto_profile; in __blk_crypto_bio_prep()
|
A D | bio-integrity.c | 137 bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), in bio_integrity_add_page()
|
A D | blk-iocost.c | 3175 ioc = q_to_ioc(bdev_get_queue(bdev)); in ioc_qos_write() 3177 ret = blk_iocost_init(bdev_get_queue(bdev)); in ioc_qos_write() 3180 ioc = q_to_ioc(bdev_get_queue(bdev)); in ioc_qos_write() 3342 ioc = q_to_ioc(bdev_get_queue(bdev)); in ioc_cost_model_write() 3344 ret = blk_iocost_init(bdev_get_queue(bdev)); in ioc_cost_model_write() 3347 ioc = q_to_ioc(bdev_get_queue(bdev)); in ioc_cost_model_write()
|
A D | blk.h | 87 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_queue_enter()
|
/linux/drivers/md/ |
A D | dm-table.c | 402 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits() 849 struct request_queue *q = bdev_get_queue(bdev); in device_is_rq_stackable() 1246 bdev_get_queue(dev->bdev)->crypto_profile; in device_intersect_crypto_capabilities() 1564 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_zoned_model() 1603 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_matches_zone_sectors() 1761 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable() 1811 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_rotational() 1819 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random() 1827 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_same_capable() 1854 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_zeroes_capable() [all …]
|
A D | dm-zoned-target.c | 590 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { in dmz_bdev_is_dying() 790 q = bdev_get_queue(zoned_dev->bdev); in dmz_fixup_devices() 808 q = bdev_get_queue(zoned_dev->bdev); in dmz_fixup_devices()
|
A D | dm-mpath.c | 532 q = bdev_get_queue(bdev); in multipath_clone_and_map() 873 struct request_queue *q = bdev_get_queue(bdev); in setup_scsi_dh() 952 q = bdev_get_queue(p->path.dev->bdev); in parse_path() 1618 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in activate_or_offline_path() 2091 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in pgpath_busy()
|
A D | md-linear.c | 100 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) in linear_conf()
|
A D | dm-clone-target.c | 2022 struct request_queue *q = bdev_get_queue(bdev); in bdev_supports_discards() 2034 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits; in disable_passdown_if_not_supported() 2056 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits; in set_discard_limits()
|
/linux/drivers/target/ |
A D | target_core_iblock.c | 117 q = bdev_get_queue(bd); in iblock_configure_device() 734 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); in iblock_execute_rw() 833 struct request_queue *q = bdev_get_queue(bd); in iblock_get_blocks() 895 struct request_queue *q = bdev_get_queue(bd); in iblock_get_write_cache()
|
/linux/fs/jfs/ |
A D | ioctl.c | 113 struct request_queue *q = bdev_get_queue(sb->s_bdev); in jfs_ioctl()
|
A D | super.c | 376 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options() 395 struct request_queue *q = bdev_get_queue(sb->s_bdev); in parse_options()
|
/linux/fs/xfs/ |
A D | xfs_discard.c | 155 struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); in xfs_ioc_trim()
|
/linux/block/partitions/ |
A D | core.c | 208 queue_limit_alignment_offset(&bdev_get_queue(bdev)->limits, in part_alignment_offset_show() 218 queue_limit_discard_alignment(&bdev_get_queue(bdev)->limits, in part_discard_alignment_show()
|
/linux/fs/crypto/ |
A D | inline_crypt.c | 40 devs[0] = bdev_get_queue(sb->s_bdev); in fscrypt_get_devices()
|
/linux/fs/exfat/ |
A D | file.c | 356 struct request_queue *q = bdev_get_queue(inode->i_sb->s_bdev); in exfat_ioctl_fitrim()
|