Lines Matching refs:dl

25 	return container_of(dl_se, struct task_struct, dl);  in dl_task_of()
30 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
38 return &rq->dl; in dl_rq_of_se()
136 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of()
237 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV); in dl_change_utilization()
243 if (p->dl.dl_non_contending) { in dl_change_utilization()
244 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
245 p->dl.dl_non_contending = 0; in dl_change_utilization()
253 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in dl_change_utilization()
256 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
257 __add_rq_bw(new_bw, &rq->dl); in dl_change_utilization()
316 struct sched_dl_entity *dl_se = &p->dl; in task_non_contending()
355 sub_rq_bw(&p->dl, &rq->dl); in task_non_contending()
357 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
409 struct sched_dl_entity *dl_se = &p->dl; in is_leftmost()
525 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl); in __pushable_less()
539 &rq->dl.pushable_dl_tasks_root, in enqueue_pushable_dl_task()
542 rq->dl.earliest_dl.next = p->dl.deadline; in enqueue_pushable_dl_task()
547 struct dl_rq *dl_rq = &rq->dl; in dequeue_pushable_dl_task()
556 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline; in dequeue_pushable_dl_task()
563 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root); in has_pushable_dl_tasks()
626 if (p->dl.dl_non_contending || p->dl.dl_throttled) { in dl_task_offline_migration()
633 sub_running_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
634 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
636 add_rq_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
637 add_running_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
639 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
640 add_rq_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
650 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration()
655 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); in dl_task_offline_migration()
984 struct sched_dl_entity *dl_se = &p->dl; in start_dl_timer()
1217 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ in grub_reclaim()
1219 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT; in grub_reclaim()
1229 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min) in grub_reclaim()
1232 u_act = BW_UNIT - u_inact - rq->dl.extra_bw; in grub_reclaim()
1244 struct sched_dl_entity *dl_se = &curr->dl; in update_curr_dl()
1292 &curr->dl); in update_curr_dl()
1316 if (!is_leftmost(curr, &rq->dl)) in update_curr_dl()
1364 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer()
1365 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer()
1370 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
1379 sub_running_bw(dl_se, &rq->dl); in inactive_task_timer()
1611 if (is_dl_boosted(&p->dl)) { in enqueue_task_dl()
1624 if (p->dl.dl_throttled) { in enqueue_task_dl()
1630 hrtimer_try_to_cancel(&p->dl.dl_timer); in enqueue_task_dl()
1631 p->dl.dl_throttled = 0; in enqueue_task_dl()
1643 p->dl.dl_throttled = 0; in enqueue_task_dl()
1644 BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH); in enqueue_task_dl()
1654 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl)) in enqueue_task_dl()
1655 dl_check_constrained_dl(&p->dl); in enqueue_task_dl()
1658 add_rq_bw(&p->dl, &rq->dl); in enqueue_task_dl()
1659 add_running_bw(&p->dl, &rq->dl); in enqueue_task_dl()
1674 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) { in enqueue_task_dl()
1676 task_contending(&p->dl, flags); in enqueue_task_dl()
1682 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl); in enqueue_task_dl()
1684 enqueue_dl_entity(&p->dl, flags); in enqueue_task_dl()
1692 update_stats_dequeue_dl(&rq->dl, &p->dl, flags); in __dequeue_task_dl()
1693 dequeue_dl_entity(&p->dl); in __dequeue_task_dl()
1703 sub_running_bw(&p->dl, &rq->dl); in dequeue_task_dl()
1704 sub_rq_bw(&p->dl, &rq->dl); in dequeue_task_dl()
1738 rq->curr->dl.dl_yielded = 1; in yield_task_dl()
1780 !dl_entity_preempt(&p->dl, &curr->dl)) && in select_task_rq_dl()
1794 (dl_time_before(p->dl.deadline, in select_task_rq_dl()
1795 cpu_rq(target)->dl.earliest_dl.curr) || in select_task_rq_dl()
1796 (cpu_rq(target)->dl.dl_nr_running == 0))) in select_task_rq_dl()
1819 if (p->dl.dl_non_contending) { in migrate_task_rq_dl()
1821 sub_running_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1822 p->dl.dl_non_contending = 0; in migrate_task_rq_dl()
1830 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in migrate_task_rq_dl()
1833 sub_rq_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1860 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { in balance_dl()
1883 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { in check_preempt_curr_dl()
1893 if ((p->dl.deadline == rq->curr->dl.deadline) && in check_preempt_curr_dl()
1902 hrtick_start(rq, p->dl.runtime); in start_hrtick_dl()
1912 struct sched_dl_entity *dl_se = &p->dl; in set_next_task_dl()
1913 struct dl_rq *dl_rq = &rq->dl; in set_next_task_dl()
1916 if (on_dl_rq(&p->dl)) in set_next_task_dl()
1948 struct dl_rq *dl_rq = &rq->dl; in pick_task_dl()
1974 struct sched_dl_entity *dl_se = &p->dl; in put_prev_task_dl()
1975 struct dl_rq *dl_rq = &rq->dl; in put_prev_task_dl()
1977 if (on_dl_rq(&p->dl)) in put_prev_task_dl()
1983 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) in put_prev_task_dl()
2005 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 && in task_tick_dl()
2006 is_leftmost(p, &rq->dl)) in task_tick_dl()
2037 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost; in pick_earliest_pushable_dl_task()
2161 if (later_rq->dl.dl_nr_running && in find_lock_later_rq()
2162 !dl_time_before(task->dl.deadline, in find_lock_later_rq()
2163 later_rq->dl.earliest_dl.curr)) { in find_lock_later_rq()
2191 if (!later_rq->dl.dl_nr_running || in find_lock_later_rq()
2192 dl_time_before(task->dl.deadline, in find_lock_later_rq()
2193 later_rq->dl.earliest_dl.curr)) in find_lock_later_rq()
2211 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost, in pick_next_pushable_dl_task()
2235 if (!rq->dl.overloaded) in push_dl_task()
2255 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && in push_dl_task()
2347 if (this_rq->dl.dl_nr_running && in pull_dl_task()
2348 dl_time_before(this_rq->dl.earliest_dl.curr, in pull_dl_task()
2349 src_rq->dl.earliest_dl.next)) in pull_dl_task()
2360 if (src_rq->dl.dl_nr_running <= 1) in pull_dl_task()
2370 if (p && dl_time_before(p->dl.deadline, dmin) && in pull_dl_task()
2371 (!this_rq->dl.dl_nr_running || in pull_dl_task()
2372 dl_time_before(p->dl.deadline, in pull_dl_task()
2373 this_rq->dl.earliest_dl.curr))) { in pull_dl_task()
2381 if (dl_time_before(p->dl.deadline, in pull_dl_task()
2382 src_rq->curr->dl.deadline)) in pull_dl_task()
2391 dmin = p->dl.deadline; in pull_dl_task()
2423 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { in task_woken_dl()
2455 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl()
2465 if (rq->dl.overloaded) in rq_online_dl()
2469 if (rq->dl.dl_nr_running > 0) in rq_online_dl()
2470 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); in rq_online_dl()
2476 if (rq->dl.overloaded) in rq_offline_dl()
2509 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_add_task_root_domain()
2537 if (task_on_rq_queued(p) && p->dl.dl_runtime) in switched_from_dl()
2547 if (p->dl.dl_non_contending) in switched_from_dl()
2548 sub_running_bw(&p->dl, &rq->dl); in switched_from_dl()
2549 sub_rq_bw(&p->dl, &rq->dl); in switched_from_dl()
2557 if (p->dl.dl_non_contending) in switched_from_dl()
2558 p->dl.dl_non_contending = 0; in switched_from_dl()
2565 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) in switched_from_dl()
2577 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in switched_to_dl()
2582 add_rq_bw(&p->dl, &rq->dl); in switched_to_dl()
2589 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) in switched_to_dl()
2616 if (!rq->dl.overloaded) in prio_changed_dl()
2624 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) in prio_changed_dl()
2637 DEFINE_SCHED_CLASS(dl) = {
2755 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); in sched_dl_do_global()
2781 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) in sched_dl_overflow()
2795 if (hrtimer_active(&p->dl.inactive_timer)) in sched_dl_overflow()
2796 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2800 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { in sched_dl_overflow()
2808 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2835 struct sched_dl_entity *dl_se = &p->dl; in __setparam_dl()
2847 struct sched_dl_entity *dl_se = &p->dl; in __getparam_dl()
2925 struct sched_dl_entity *dl_se = &p->dl; in __dl_clear_params()
2946 struct sched_dl_entity *dl_se = &p->dl; in dl_param_changed()
2972 overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw); in dl_task_can_attach()
2984 __dl_add(dl_b, p->dl.dl_bw, cpus); in dl_task_can_attach()
3035 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); in print_dl_stats()