Lines Matching refs:cpu_rq
1568 struct rq *rq = cpu_rq(cpu); in update_numa_stats()
1600 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1615 rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1630 rq = cpu_rq(env->best_cpu); in task_numa_assign()
1689 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
2037 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()
2465 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group()
5224 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); in sync_throttle()
5551 return sched_idle_rq(cpu_rq(cpu)); in sched_idle_cpu()
5826 return cpu_rq(cpu)->cpu_capacity; in capacity_of()
5906 if (sync && cpu_rq(this_cpu)->nr_running == 1) in wake_affine_idle()
5922 this_eff_load = cpu_load(cpu_rq(this_cpu)); in wake_affine_weight()
5940 prev_eff_load = cpu_load(cpu_rq(prev_cpu)); in wake_affine_weight()
6000 struct rq *rq = cpu_rq(i); in find_idlest_group_cpu()
6030 load = cpu_load(cpu_rq(i)); in find_idlest_group_cpu()
6097 sched_cpu_cookie_match(cpu_rq(cpu), p)) in __select_idle_cpu()
6502 cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util()
6533 cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_without()
6606 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_next()
6753 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; in find_energy_efficient_cpu()
6802 util = uclamp_rq_util_with(cpu_rq(cpu), util, p); in find_energy_efficient_cpu()
7675 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) in task_hot()
8253 struct rq *rq = cpu_rq(cpu); in update_blocked_averages()
8332 struct rq *rq = cpu_rq(cpu); in scale_rt_capacity()
8365 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu); in update_cpu_capacity()
8370 cpu_rq(cpu)->cpu_capacity = capacity; in update_cpu_capacity()
8371 trace_sched_cpu_capacity_tp(cpu_rq(cpu)); in update_cpu_capacity()
8681 struct rq *rq = cpu_rq(i); in update_sg_lb_stats()
8919 struct rq *rq = cpu_rq(cpu); in idle_cpu_without()
8955 struct rq *rq = cpu_rq(i); in update_sg_wakeup_stats()
9085 if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) in find_idlest_group()
9621 rq = cpu_rq(i); in find_busiest_queue()
9965 env.dst_rq = cpu_rq(env.new_dst_cpu); in load_balance()
10158 struct rq *target_rq = cpu_rq(target_cpu); in active_load_balance_cpu_stop()
10421 smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd); in kick_ilb()
10594 struct rq *rq = cpu_rq(cpu); in nohz_balance_enter_idle()
10728 rq = cpu_rq(balance_cpu); in _nohz_idle_balance()
10808 _nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK, CPU_IDLE); in nohz_run_idle_balance()
11519 rq = cpu_rq(i); in online_fair_sched_group()
11548 rq = cpu_rq(cpu); in unregister_fair_sched_group()
11560 struct rq *rq = cpu_rq(cpu); in init_tg_cfs_entry()
11608 struct rq *rq = cpu_rq(i); in __sched_group_set_shares()
11659 struct rq *rq = cpu_rq(i); in sched_group_set_idle()
11798 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) in print_cfs_stats()