Lines Matching refs:rt_rq

78 void init_rt_rq(struct rt_rq *rt_rq)  in init_rt_rq()  argument
83 array = &rt_rq->active; in init_rt_rq()
92 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_rt_rq()
93 rt_rq->highest_prio.next = MAX_RT_PRIO-1; in init_rt_rq()
94 rt_rq->rt_nr_migratory = 0; in init_rt_rq()
95 rt_rq->overloaded = 0; in init_rt_rq()
96 plist_head_init(&rt_rq->pushable_tasks); in init_rt_rq()
99 rt_rq->rt_queued = 0; in init_rt_rq()
101 rt_rq->rt_time = 0; in init_rt_rq()
102 rt_rq->rt_throttled = 0; in init_rt_rq()
103 rt_rq->rt_runtime = 0; in init_rt_rq()
104 raw_spin_lock_init(&rt_rq->rt_runtime_lock); in init_rt_rq()
123 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() argument
125 return rt_rq->rq; in rq_of_rt_rq()
128 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) in rt_rq_of_se()
130 return rt_se->rt_rq; in rt_rq_of_se()
135 struct rt_rq *rt_rq = rt_se->rt_rq; in rq_of_rt_se() local
137 return rt_rq->rq; in rq_of_rt_se()
152 if (tg->rt_rq) in free_rt_sched_group()
153 kfree(tg->rt_rq[i]); in free_rt_sched_group()
158 kfree(tg->rt_rq); in free_rt_sched_group()
162 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, in init_tg_rt_entry() argument
168 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_tg_rt_entry()
169 rt_rq->rt_nr_boosted = 0; in init_tg_rt_entry()
170 rt_rq->rq = rq; in init_tg_rt_entry()
171 rt_rq->tg = tg; in init_tg_rt_entry()
173 tg->rt_rq[cpu] = rt_rq; in init_tg_rt_entry()
180 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
182 rt_se->rt_rq = parent->my_q; in init_tg_rt_entry()
184 rt_se->my_q = rt_rq; in init_tg_rt_entry()
191 struct rt_rq *rt_rq; in alloc_rt_sched_group() local
195 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); in alloc_rt_sched_group()
196 if (!tg->rt_rq) in alloc_rt_sched_group()
206 rt_rq = kzalloc_node(sizeof(struct rt_rq), in alloc_rt_sched_group()
208 if (!rt_rq) in alloc_rt_sched_group()
216 init_rt_rq(rt_rq); in alloc_rt_sched_group()
217 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; in alloc_rt_sched_group()
218 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); in alloc_rt_sched_group()
224 kfree(rt_rq); in alloc_rt_sched_group()
238 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() argument
240 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
250 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) in rt_rq_of_se()
311 static void update_rt_migration(struct rt_rq *rt_rq) in update_rt_migration() argument
313 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { in update_rt_migration()
314 if (!rt_rq->overloaded) { in update_rt_migration()
315 rt_set_overload(rq_of_rt_rq(rt_rq)); in update_rt_migration()
316 rt_rq->overloaded = 1; in update_rt_migration()
318 } else if (rt_rq->overloaded) { in update_rt_migration()
319 rt_clear_overload(rq_of_rt_rq(rt_rq)); in update_rt_migration()
320 rt_rq->overloaded = 0; in update_rt_migration()
324 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_migration() argument
332 rt_rq = &rq_of_rt_rq(rt_rq)->rt; in inc_rt_migration()
334 rt_rq->rt_nr_total++; in inc_rt_migration()
336 rt_rq->rt_nr_migratory++; in inc_rt_migration()
338 update_rt_migration(rt_rq); in inc_rt_migration()
341 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_migration() argument
349 rt_rq = &rq_of_rt_rq(rt_rq)->rt; in dec_rt_migration()
351 rt_rq->rt_nr_total--; in dec_rt_migration()
353 rt_rq->rt_nr_migratory--; in dec_rt_migration()
355 update_rt_migration(rt_rq); in dec_rt_migration()
418 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_migration() argument
423 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_migration() argument
441 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
442 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
490 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) in sched_rt_runtime() argument
492 if (!rt_rq->tg) in sched_rt_runtime()
495 return rt_rq->rt_runtime; in sched_rt_runtime()
498 static inline u64 sched_rt_period(struct rt_rq *rt_rq) in sched_rt_period() argument
500 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); in sched_rt_period()
518 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
521 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
526 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) in group_rt_rq()
534 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) in sched_rt_rq_enqueue() argument
536 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue()
537 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue()
542 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_enqueue()
544 if (rt_rq->rt_nr_running) { in sched_rt_rq_enqueue()
546 enqueue_top_rt_rq(rt_rq); in sched_rt_rq_enqueue()
550 if (rt_rq->highest_prio.curr < curr->prio) in sched_rt_rq_enqueue()
555 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) in sched_rt_rq_dequeue() argument
558 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue()
560 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_dequeue()
563 dequeue_top_rt_rq(rt_rq); in sched_rt_rq_dequeue()
565 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); in sched_rt_rq_dequeue()
571 static inline int rt_rq_throttled(struct rt_rq *rt_rq) in rt_rq_throttled() argument
573 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; in rt_rq_throttled()
578 struct rt_rq *rt_rq = group_rt_rq(rt_se); in rt_se_boosted() local
581 if (rt_rq) in rt_se_boosted()
582 return !!rt_rq->rt_nr_boosted; in rt_se_boosted()
601 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
603 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; in sched_rt_period_rt_rq()
606 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth() argument
608 return &rt_rq->tg->rt_bandwidth; in sched_rt_bandwidth()
613 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) in sched_rt_runtime() argument
615 return rt_rq->rt_runtime; in sched_rt_runtime()
618 static inline u64 sched_rt_period(struct rt_rq *rt_rq) in sched_rt_period() argument
623 typedef struct rt_rq *rt_rq_iter_t;
625 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
626 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
631 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) in group_rt_rq()
636 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) in sched_rt_rq_enqueue() argument
638 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue()
640 if (!rt_rq->rt_nr_running) in sched_rt_rq_enqueue()
643 enqueue_top_rt_rq(rt_rq); in sched_rt_rq_enqueue()
647 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) in sched_rt_rq_dequeue() argument
649 dequeue_top_rt_rq(rt_rq); in sched_rt_rq_dequeue()
652 static inline int rt_rq_throttled(struct rt_rq *rt_rq) in rt_rq_throttled() argument
654 return rt_rq->rt_throttled; in rt_rq_throttled()
663 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
668 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth() argument
675 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) in sched_rt_bandwidth_account() argument
677 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in sched_rt_bandwidth_account()
680 rt_rq->rt_time < rt_b->rt_runtime); in sched_rt_bandwidth_account()
687 static void do_balance_runtime(struct rt_rq *rt_rq) in do_balance_runtime() argument
689 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in do_balance_runtime()
690 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; in do_balance_runtime()
699 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in do_balance_runtime()
702 if (iter == rt_rq) in do_balance_runtime()
721 if (rt_rq->rt_runtime + diff > rt_period) in do_balance_runtime()
722 diff = rt_period - rt_rq->rt_runtime; in do_balance_runtime()
724 rt_rq->rt_runtime += diff; in do_balance_runtime()
725 if (rt_rq->rt_runtime == rt_period) { in do_balance_runtime()
743 struct rt_rq *rt_rq; in __disable_runtime() local
748 for_each_rt_rq(rt_rq, iter, rq) { in __disable_runtime()
749 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __disable_runtime()
754 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
760 if (rt_rq->rt_runtime == RUNTIME_INF || in __disable_runtime()
761 rt_rq->rt_runtime == rt_b->rt_runtime) in __disable_runtime()
763 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
770 want = rt_b->rt_runtime - rt_rq->rt_runtime; in __disable_runtime()
776 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in __disable_runtime()
782 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) in __disable_runtime()
800 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
811 rt_rq->rt_runtime = RUNTIME_INF; in __disable_runtime()
812 rt_rq->rt_throttled = 0; in __disable_runtime()
813 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
817 sched_rt_rq_enqueue(rt_rq); in __disable_runtime()
824 struct rt_rq *rt_rq; in __enable_runtime() local
832 for_each_rt_rq(rt_rq, iter, rq) { in __enable_runtime()
833 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __enable_runtime()
836 raw_spin_lock(&rt_rq->rt_runtime_lock); in __enable_runtime()
837 rt_rq->rt_runtime = rt_b->rt_runtime; in __enable_runtime()
838 rt_rq->rt_time = 0; in __enable_runtime()
839 rt_rq->rt_throttled = 0; in __enable_runtime()
840 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __enable_runtime()
845 static void balance_runtime(struct rt_rq *rt_rq) in balance_runtime() argument
850 if (rt_rq->rt_time > rt_rq->rt_runtime) { in balance_runtime()
851 raw_spin_unlock(&rt_rq->rt_runtime_lock); in balance_runtime()
852 do_balance_runtime(rt_rq); in balance_runtime()
853 raw_spin_lock(&rt_rq->rt_runtime_lock); in balance_runtime()
857 static inline void balance_runtime(struct rt_rq *rt_rq) {} in balance_runtime() argument
881 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); in do_sched_rt_period_timer() local
882 struct rq *rq = rq_of_rt_rq(rt_rq); in do_sched_rt_period_timer()
889 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
890 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) in do_sched_rt_period_timer()
891 rt_rq->rt_runtime = rt_b->rt_runtime; in do_sched_rt_period_timer()
892 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; in do_sched_rt_period_timer()
893 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
900 if (rt_rq->rt_time) { in do_sched_rt_period_timer()
903 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
904 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
905 balance_runtime(rt_rq); in do_sched_rt_period_timer()
906 runtime = rt_rq->rt_runtime; in do_sched_rt_period_timer()
907 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); in do_sched_rt_period_timer()
908 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { in do_sched_rt_period_timer()
909 rt_rq->rt_throttled = 0; in do_sched_rt_period_timer()
919 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
922 if (rt_rq->rt_time || rt_rq->rt_nr_running) in do_sched_rt_period_timer()
924 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
925 } else if (rt_rq->rt_nr_running) { in do_sched_rt_period_timer()
927 if (!rt_rq_throttled(rt_rq)) in do_sched_rt_period_timer()
930 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
934 sched_rt_rq_enqueue(rt_rq); in do_sched_rt_period_timer()
947 struct rt_rq *rt_rq = group_rt_rq(rt_se); in rt_se_prio() local
949 if (rt_rq) in rt_se_prio()
950 return rt_rq->highest_prio.curr; in rt_se_prio()
956 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) in sched_rt_runtime_exceeded() argument
958 u64 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
960 if (rt_rq->rt_throttled) in sched_rt_runtime_exceeded()
961 return rt_rq_throttled(rt_rq); in sched_rt_runtime_exceeded()
963 if (runtime >= sched_rt_period(rt_rq)) in sched_rt_runtime_exceeded()
966 balance_runtime(rt_rq); in sched_rt_runtime_exceeded()
967 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
971 if (rt_rq->rt_time > runtime) { in sched_rt_runtime_exceeded()
972 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in sched_rt_runtime_exceeded()
979 rt_rq->rt_throttled = 1; in sched_rt_runtime_exceeded()
987 rt_rq->rt_time = 0; in sched_rt_runtime_exceeded()
990 if (rt_rq_throttled(rt_rq)) { in sched_rt_runtime_exceeded()
991 sched_rt_rq_dequeue(rt_rq); in sched_rt_runtime_exceeded()
1033 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in update_curr_rt() local
1035 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { in update_curr_rt()
1036 raw_spin_lock(&rt_rq->rt_runtime_lock); in update_curr_rt()
1037 rt_rq->rt_time += delta_exec; in update_curr_rt()
1038 if (sched_rt_runtime_exceeded(rt_rq)) in update_curr_rt()
1040 raw_spin_unlock(&rt_rq->rt_runtime_lock); in update_curr_rt()
1046 dequeue_top_rt_rq(struct rt_rq *rt_rq) in dequeue_top_rt_rq() argument
1048 struct rq *rq = rq_of_rt_rq(rt_rq); in dequeue_top_rt_rq()
1050 BUG_ON(&rq->rt != rt_rq); in dequeue_top_rt_rq()
1052 if (!rt_rq->rt_queued) in dequeue_top_rt_rq()
1057 sub_nr_running(rq, rt_rq->rt_nr_running); in dequeue_top_rt_rq()
1058 rt_rq->rt_queued = 0; in dequeue_top_rt_rq()
1063 enqueue_top_rt_rq(struct rt_rq *rt_rq) in enqueue_top_rt_rq() argument
1065 struct rq *rq = rq_of_rt_rq(rt_rq); in enqueue_top_rt_rq()
1067 BUG_ON(&rq->rt != rt_rq); in enqueue_top_rt_rq()
1069 if (rt_rq->rt_queued) in enqueue_top_rt_rq()
1072 if (rt_rq_throttled(rt_rq)) in enqueue_top_rt_rq()
1075 if (rt_rq->rt_nr_running) { in enqueue_top_rt_rq()
1076 add_nr_running(rq, rt_rq->rt_nr_running); in enqueue_top_rt_rq()
1077 rt_rq->rt_queued = 1; in enqueue_top_rt_rq()
1087 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) in inc_rt_prio_smp() argument
1089 struct rq *rq = rq_of_rt_rq(rt_rq); in inc_rt_prio_smp()
1095 if (&rq->rt != rt_rq) in inc_rt_prio_smp()
1103 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) in dec_rt_prio_smp() argument
1105 struct rq *rq = rq_of_rt_rq(rt_rq); in dec_rt_prio_smp()
1111 if (&rq->rt != rt_rq) in dec_rt_prio_smp()
1114 if (rq->online && rt_rq->highest_prio.curr != prev_prio) in dec_rt_prio_smp()
1115 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); in dec_rt_prio_smp()
1121 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} in inc_rt_prio_smp() argument
1123 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} in dec_rt_prio_smp() argument
1129 inc_rt_prio(struct rt_rq *rt_rq, int prio) in inc_rt_prio() argument
1131 int prev_prio = rt_rq->highest_prio.curr; in inc_rt_prio()
1134 rt_rq->highest_prio.curr = prio; in inc_rt_prio()
1136 inc_rt_prio_smp(rt_rq, prio, prev_prio); in inc_rt_prio()
1140 dec_rt_prio(struct rt_rq *rt_rq, int prio) in dec_rt_prio() argument
1142 int prev_prio = rt_rq->highest_prio.curr; in dec_rt_prio()
1144 if (rt_rq->rt_nr_running) { in dec_rt_prio()
1153 struct rt_prio_array *array = &rt_rq->active; in dec_rt_prio()
1155 rt_rq->highest_prio.curr = in dec_rt_prio()
1160 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in dec_rt_prio()
1163 dec_rt_prio_smp(rt_rq, prio, prev_prio); in dec_rt_prio()
1168 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} in inc_rt_prio() argument
1169 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} in dec_rt_prio() argument
1176 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_group() argument
1179 rt_rq->rt_nr_boosted++; in inc_rt_group()
1181 if (rt_rq->tg) in inc_rt_group()
1182 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); in inc_rt_group()
1186 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_group() argument
1189 rt_rq->rt_nr_boosted--; in dec_rt_group()
1191 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); in dec_rt_group()
1197 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_group() argument
1203 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} in dec_rt_group() argument
1210 struct rt_rq *group_rq = group_rt_rq(rt_se); in rt_se_nr_running()
1221 struct rt_rq *group_rq = group_rt_rq(rt_se); in rt_se_rr_nr_running()
1233 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_tasks() argument
1238 rt_rq->rt_nr_running += rt_se_nr_running(rt_se); in inc_rt_tasks()
1239 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); in inc_rt_tasks()
1241 inc_rt_prio(rt_rq, prio); in inc_rt_tasks()
1242 inc_rt_migration(rt_se, rt_rq); in inc_rt_tasks()
1243 inc_rt_group(rt_se, rt_rq); in inc_rt_tasks()
1247 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_tasks() argument
1250 WARN_ON(!rt_rq->rt_nr_running); in dec_rt_tasks()
1251 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); in dec_rt_tasks()
1252 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); in dec_rt_tasks()
1254 dec_rt_prio(rt_rq, rt_se_prio(rt_se)); in dec_rt_tasks()
1255 dec_rt_migration(rt_se, rt_rq); in dec_rt_tasks()
1256 dec_rt_group(rt_se, rt_rq); in dec_rt_tasks()
1295 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) in update_stats_wait_start_rt() argument
1310 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_start_rt()
1314 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) in update_stats_enqueue_sleeper_rt() argument
1329 __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats); in update_stats_enqueue_sleeper_rt()
1333 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, in update_stats_enqueue_rt() argument
1340 update_stats_enqueue_sleeper_rt(rt_rq, rt_se); in update_stats_enqueue_rt()
1344 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) in update_stats_wait_end_rt() argument
1359 __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_end_rt()
1363 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, in update_stats_dequeue_rt() argument
1380 rq_clock(rq_of_rt_rq(rt_rq))); in update_stats_dequeue_rt()
1384 rq_clock(rq_of_rt_rq(rt_rq))); in update_stats_dequeue_rt()
1390 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in __enqueue_rt_entity() local
1391 struct rt_prio_array *array = &rt_rq->active; in __enqueue_rt_entity()
1392 struct rt_rq *group_rq = group_rt_rq(rt_se); in __enqueue_rt_entity()
1419 inc_rt_tasks(rt_se, rt_rq); in __enqueue_rt_entity()
1424 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in __dequeue_rt_entity() local
1425 struct rt_prio_array *array = &rt_rq->active; in __dequeue_rt_entity()
1433 dec_rt_tasks(rt_se, rt_rq); in __dequeue_rt_entity()
1478 struct rt_rq *rt_rq = group_rt_rq(rt_se); in dequeue_rt_entity() local
1480 if (rt_rq && rt_rq->rt_nr_running) in dequeue_rt_entity()
1521 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) in requeue_rt_entity() argument
1524 struct rt_prio_array *array = &rt_rq->active; in requeue_rt_entity()
1537 struct rt_rq *rt_rq; in requeue_task_rt() local
1540 rt_rq = rt_rq_of_se(rt_se); in requeue_task_rt()
1541 requeue_rt_entity(rt_rq, rt_se, head); in requeue_task_rt()
1701 struct rt_rq *rt_rq = &rq->rt; in set_next_task_rt() local
1705 update_stats_wait_end_rt(rt_rq, rt_se); in set_next_task_rt()
1725 struct rt_rq *rt_rq) in pick_next_rt_entity() argument
1727 struct rt_prio_array *array = &rt_rq->active; in pick_next_rt_entity()
1744 struct rt_rq *rt_rq = &rq->rt; in _pick_next_task_rt() local
1747 rt_se = pick_next_rt_entity(rq, rt_rq); in _pick_next_task_rt()
1749 rt_rq = group_rt_rq(rt_se); in _pick_next_task_rt()
1750 } while (rt_rq); in _pick_next_task_rt()
1780 struct rt_rq *rt_rq = &rq->rt; in put_prev_task_rt() local
1783 update_stats_wait_start_rt(rt_rq, rt_se); in put_prev_task_rt()
2797 struct rt_rq *rt_rq = tg->rt_rq[i]; in tg_set_rt_bandwidth() local
2799 raw_spin_lock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2800 rt_rq->rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
2801 raw_spin_unlock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2886 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints() local
2888 raw_spin_lock(&rt_rq->rt_runtime_lock); in sched_rt_global_constraints()
2889 rt_rq->rt_runtime = global_rt_runtime(); in sched_rt_global_constraints()
2890 raw_spin_unlock(&rt_rq->rt_runtime_lock); in sched_rt_global_constraints()
2983 struct rt_rq *rt_rq; in print_rt_stats() local
2986 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
2987 print_rt_rq(m, cpu, rt_rq); in print_rt_stats()