/linux/block/ |
A D | blk-mq-sched.h | 49 if (rq->rq_flags & RQF_ELV) { in blk_mq_sched_allow_merge() 60 if (rq->rq_flags & RQF_ELV) { in blk_mq_sched_completed_request() 70 if (rq->rq_flags & RQF_ELV) { in blk_mq_sched_requeue_request() 74 if ((rq->rq_flags & RQF_ELVPRIV) && e->type->ops.requeue_request) in blk_mq_sched_requeue_request()
|
A D | blk-mq.c | 344 data->rq_flags |= RQF_PM; in blk_mq_rq_ctx_init() 346 data->rq_flags |= RQF_IO_STAT; in blk_mq_rq_ctx_init() 347 rq->rq_flags = data->rq_flags; in blk_mq_rq_ctx_init() 382 if (rq->rq_flags & RQF_ELV) { in blk_mq_rq_ctx_init() 395 rq->rq_flags |= RQF_ELVPRIV; in blk_mq_rq_ctx_init() 451 data->rq_flags |= RQF_ELV; in __blk_mq_alloc_requests() 468 if (!(data->rq_flags & RQF_ELV)) in __blk_mq_alloc_requests() 581 data.rq_flags |= RQF_ELV; in blk_mq_alloc_request_hctx() 796 if (rq->rq_flags & RQF_STATS) { in __blk_mq_end_request_acct() 1043 rq->rq_flags |= RQF_STATS; in blk_mq_start_request() [all …]
|
A D | blk-mq.h | 158 req_flags_t rq_flags; member 176 if (!(data->rq_flags & RQF_ELV)) in blk_mq_tags_from_data() 260 if (rq->rq_flags & RQF_MQ_INFLIGHT) { in __blk_mq_put_driver_tag() 261 rq->rq_flags &= ~RQF_MQ_INFLIGHT; in __blk_mq_put_driver_tag()
|
A D | blk-flush.c | 130 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request() 328 flush_rq->rq_flags |= RQF_MQ_INFLIGHT; in blk_kick_flush() 334 flush_rq->rq_flags |= RQF_FLUSH_SEQ; in blk_kick_flush() 434 rq->rq_flags |= RQF_FLUSH_SEQ; in blk_insert_flush()
|
A D | blk-zoned.c | 84 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in blk_req_zone_write_trylock() 85 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in blk_req_zone_write_trylock() 97 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in __blk_req_zone_write_lock() 98 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_lock() 104 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; in __blk_req_zone_write_unlock()
|
A D | blk.h | 150 if (rq->rq_flags & RQF_NOMERGE_FLAGS) in rq_mergeable() 272 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 350 return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk; in blk_do_io_stat() 361 !(req->rq_flags & RQF_FLUSH_SEQ)) in blk_account_io_done()
|
A D | blk-pm.h | 21 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
|
A D | blk-merge.c | 554 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in __blk_rq_map_sg() 722 if (rq->rq_flags & RQF_MIXED_MERGE) in blk_rq_set_mixed_merge() 735 rq->rq_flags |= RQF_MIXED_MERGE; in blk_rq_set_mixed_merge() 823 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || in attempt_merge()
|
A D | blk-timeout.c | 140 req->rq_flags &= ~RQF_TIMED_OUT; in blk_add_timer()
|
A D | blk-mq-sched.c | 421 if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) in blk_mq_sched_bypass_insert() 459 at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; in blk_mq_sched_insert_request()
|
/linux/include/linux/ |
A D | blk-mq.h | 84 req_flags_t rq_flags; member 766 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); in blk_mq_need_time_stamp() 777 if (!iob || (req->rq_flags & RQF_ELV) || req->end_io || ioerror) in blk_mq_add_to_batch() 999 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_payload_bytes() 1010 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in req_bvec() 1049 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in blk_rq_nr_phys_segments() 1098 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) in blk_req_zone_write_unlock()
|
/linux/kernel/sched/ |
A D | sched.h | 1510 struct rq_flags { struct 1535 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock() 1548 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock() 1573 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) 1595 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave() 1603 rq_lock_irq(struct rq *rq, struct rq_flags *rf) in rq_lock_irq() 1611 rq_lock(struct rq *rq, struct rq_flags *rf) in rq_lock() 1619 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) in rq_unlock_irqrestore() 1627 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) in rq_unlock_irq() 1635 rq_unlock(struct rq *rq, struct rq_flags *rf) in rq_unlock() [all …]
|
A D | core.c | 708 struct rq_flags rf; in hrtick() 736 struct rq_flags rf; in __hrtick_start() 1384 struct rq_flags rf; in uclamp_update_util_min_rt_default() 1663 struct rq_flags rf; in uclamp_update_active() 2320 struct rq_flags rf; in migration_cpu_stop() 2867 struct rq_flags rf; in __set_cpus_allowed_ptr() 2892 struct rq_flags rf; in restrict_cpus_allowed_ptr() 3203 struct rq_flags rf; in wait_task_inactive() 3631 struct rq_flags rf; in ttwu_runnable() 3653 struct rq_flags rf; in sched_ttwu_pending() [all …]
|
A D | stop_task.c | 20 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop()
|
A D | core_sched.c | 60 struct rq_flags rf; in sched_core_update_cookie()
|
A D | stats.h | 166 struct rq_flags rf; in psi_ttwu_dequeue()
|
/linux/drivers/scsi/ |
A D | scsi_lib.c | 125 if (rq->rq_flags & RQF_DONTPREP) { in scsi_mq_requeue_cmd() 126 rq->rq_flags &= ~RQF_DONTPREP; in scsi_mq_requeue_cmd() 239 req->rq_flags |= rq_flags | RQF_QUIET; in __scsi_execute() 776 if (!(req->rq_flags & RQF_QUIET)) { in scsi_io_completion_action() 865 else if (req->rq_flags & RQF_QUIET) in scsi_io_completion_nz_result() 1116 if (rq->rq_flags & RQF_DONTPREP) { in scsi_cleanup_rq() 1118 rq->rq_flags &= ~RQF_DONTPREP; in scsi_cleanup_rq() 1237 if (req && !(req->rq_flags & RQF_PM)) in scsi_device_state_check() 1692 req->rq_flags |= RQF_DONTPREP; in scsi_queue_rq() 1735 if (req->rq_flags & RQF_DONTPREP) in scsi_queue_rq() [all …]
|
/linux/net/sunrpc/ |
A D | svc_xprt.c | 381 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_reserve_slot() 385 set_bit(RQ_DATA, &rqstp->rq_flags); in svc_xprt_reserve_slot() 393 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { in svc_xprt_release_slot() 457 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) in svc_xprt_do_enqueue() 581 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) in svc_wake_up() 751 clear_bit(RQ_BUSY, &rqstp->rq_flags); in svc_get_next_xprt() 761 set_bit(RQ_BUSY, &rqstp->rq_flags); in svc_get_next_xprt() 1196 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) in svc_defer() 1226 set_bit(RQ_DROPME, &rqstp->rq_flags); in svc_defer()
|
A D | svc.c | 614 __set_bit(RQ_BUSY, &rqstp->rq_flags); in svc_rqst_alloc() 703 set_bit(RQ_VICTIM, &rqstp->rq_flags); in choose_victim() 889 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags)) in svc_exit_thread() 1264 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags); in svc_process_common() 1266 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); in svc_process_common() 1267 clear_bit(RQ_DROPME, &rqstp->rq_flags); in svc_process_common()
|
A D | svcsock.c | 312 set_bit(RQ_SECURE, &rqstp->rq_flags); in svc_sock_secure_port() 314 clear_bit(RQ_SECURE, &rqstp->rq_flags); in svc_sock_secure_port() 1019 set_bit(RQ_LOCAL, &rqstp->rq_flags); in svc_tcp_recvfrom() 1021 clear_bit(RQ_LOCAL, &rqstp->rq_flags); in svc_tcp_recvfrom()
|
/linux/drivers/mmc/core/ |
A D | queue.c | 240 req->rq_flags |= RQF_QUIET; in mmc_mq_queue_rq() 292 if (!(req->rq_flags & RQF_DONTPREP)) { in mmc_mq_queue_rq() 294 req->rq_flags |= RQF_DONTPREP; in mmc_mq_queue_rq()
|
/linux/include/scsi/ |
A D | scsi_device.h | 454 req_flags_t rq_flags, int *resid); 457 sshdr, timeout, retries, flags, rq_flags, resid) \ argument 462 sense, sshdr, timeout, retries, flags, rq_flags, \
|
/linux/drivers/md/ |
A D | dm-rq.c | 268 if (rq->rq_flags & RQF_FAILED) in dm_softirq_done() 295 rq->rq_flags |= RQF_FAILED; in dm_kill_unmapped_request() 311 clone->rq_flags |= RQF_IO_STAT; in dm_dispatch_clone_request()
|
/linux/drivers/scsi/device_handler/ |
A D | scsi_dh_hp_sw.c | 167 req->rq_flags |= RQF_QUIET; in hp_sw_prep_fn()
|
/linux/fs/nfsd/ |
A D | nfscache.c | 481 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) in nfsd_cache_lookup() 575 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags); in nfsd_cache_update()
|