Lines Matching refs:request_queue

21 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)  in blk_queue_rq_timeout()
98 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) in blk_queue_bounce_limit()
123 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) in blk_queue_max_hw_sectors()
162 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) in blk_queue_chunk_sectors()
173 void blk_queue_max_discard_sectors(struct request_queue *q, in blk_queue_max_discard_sectors()
186 void blk_queue_max_write_same_sectors(struct request_queue *q, in blk_queue_max_write_same_sectors()
199 void blk_queue_max_write_zeroes_sectors(struct request_queue *q, in blk_queue_max_write_zeroes_sectors()
211 void blk_queue_max_zone_append_sectors(struct request_queue *q, in blk_queue_max_zone_append_sectors()
242 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) in blk_queue_max_segments()
263 void blk_queue_max_discard_segments(struct request_queue *q, in blk_queue_max_discard_segments()
279 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) in blk_queue_max_segment_size()
304 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) in blk_queue_logical_block_size()
333 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) in blk_queue_physical_block_size()
354 void blk_queue_zone_write_granularity(struct request_queue *q, in blk_queue_zone_write_granularity()
378 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) in blk_queue_alignment_offset()
388 struct request_queue *q = disk->queue; in disk_update_readahead()
437 void blk_queue_io_min(struct request_queue *q, unsigned int min) in blk_queue_io_min()
475 void blk_queue_io_opt(struct request_queue *q, unsigned int opt) in blk_queue_io_opt()
667 struct request_queue *t = disk->queue; in disk_stack_limits()
688 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) in blk_queue_update_dma_pad()
700 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) in blk_queue_segment_boundary()
717 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) in blk_queue_virt_boundary()
742 void blk_queue_dma_alignment(struct request_queue *q, int mask) in blk_queue_dma_alignment()
762 void blk_queue_update_dma_alignment(struct request_queue *q, int mask) in blk_queue_update_dma_alignment()
777 void blk_set_queue_depth(struct request_queue *q, unsigned int depth) in blk_set_queue_depth()
792 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) in blk_queue_write_cache()
816 void blk_queue_required_elevator_features(struct request_queue *q, in blk_queue_required_elevator_features()
830 bool blk_queue_can_use_dma_map_merging(struct request_queue *q, in blk_queue_can_use_dma_map_merging()
877 struct request_queue *q = disk->queue; in blk_queue_set_zoned()