Lines Matching refs:dl_bw

69 static inline struct dl_bw *dl_bw_of(int i)  in dl_bw_of()
73 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of()
134 static inline struct dl_bw *dl_bw_of(int i) in dl_bw_of()
136 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of()
156 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) in __add_running_bw() argument
161 dl_rq->running_bw += dl_bw; in __add_running_bw()
169 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) in __sub_running_bw() argument
174 dl_rq->running_bw -= dl_bw; in __sub_running_bw()
183 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) in __add_rq_bw() argument
188 dl_rq->this_bw += dl_bw; in __add_rq_bw()
193 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) in __sub_rq_bw() argument
198 dl_rq->this_bw -= dl_bw; in __sub_rq_bw()
209 __add_rq_bw(dl_se->dl_bw, dl_rq); in add_rq_bw()
216 __sub_rq_bw(dl_se->dl_bw, dl_rq); in sub_rq_bw()
223 __add_running_bw(dl_se->dl_bw, dl_rq); in add_running_bw()
230 __sub_running_bw(dl_se->dl_bw, dl_rq); in sub_running_bw()
256 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
352 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending()
357 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
423 void init_dl_bw(struct dl_bw *dl_b) in init_dl_bw()
447 init_dl_bw(&dl_rq->dl_bw); in init_dl_rq()
597 struct dl_bw *dl_b; in dl_task_offline_migration()
648 dl_b = &rq->rd->dl_bw; in dl_task_offline_migration()
650 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration()
653 dl_b = &later_rq->rd->dl_bw; in dl_task_offline_migration()
655 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); in dl_task_offline_migration()
1219 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT; in grub_reclaim()
1361 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer()
1370 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
2446 struct dl_bw *src_dl_b; in set_cpus_allowed_dl()
2455 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl()
2496 struct dl_bw *dl_b; in dl_add_task_root_domain()
2506 dl_b = &rq->rd->dl_bw; in dl_add_task_root_domain()
2509 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_add_task_root_domain()
2520 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags); in dl_clear_root_domain()
2521 rd->dl_bw.total_bw = 0; in dl_clear_root_domain()
2522 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags); in dl_clear_root_domain()
2680 struct dl_bw *dl_b; in sched_dl_global_validate()
2730 struct dl_bw *dl_b; in sched_dl_do_global()
2774 struct dl_bw *dl_b = dl_bw_of(cpu); in sched_dl_overflow()
2781 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) in sched_dl_overflow()
2796 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2800 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { in sched_dl_overflow()
2808 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2841 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); in __setparam_dl()
2931 dl_se->dl_bw = 0; in __dl_clear_params()
2962 struct dl_bw *dl_b; in dl_task_can_attach()
2972 overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw); in dl_task_can_attach()
2984 __dl_add(dl_b, p->dl.dl_bw, cpus); in dl_task_can_attach()
2997 struct dl_bw *cur_dl_b; in dl_cpuset_cpumask_can_shrink()
3017 struct dl_bw *dl_b; in dl_cpu_busy()