Lines Matching refs:cs
213 static inline struct cpuset *parent_cs(struct cpuset *cs) in parent_cs() argument
215 return css_cs(cs->css.parent); in parent_cs()
231 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument
233 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online()
236 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
238 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
241 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
243 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
246 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
248 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
251 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
253 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
256 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
258 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
261 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
263 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
266 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
268 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
271 static inline int is_partition_root(const struct cpuset *cs) in is_partition_root() argument
273 return cs->partition_root_state > 0; in is_partition_root()
279 static inline void notify_partition_change(struct cpuset *cs, in notify_partition_change() argument
283 cgroup_file_notify(&cs->partition_file); in notify_partition_change()
422 struct cpuset *cs; in guarantee_online_cpus() local
428 cs = task_cs(tsk); in guarantee_online_cpus()
430 while (!cpumask_intersects(cs->effective_cpus, pmask)) { in guarantee_online_cpus()
431 cs = parent_cs(cs); in guarantee_online_cpus()
432 if (unlikely(!cs)) { in guarantee_online_cpus()
443 cpumask_and(pmask, pmask, cs->effective_cpus); in guarantee_online_cpus()
460 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
462 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
463 cs = parent_cs(cs); in guarantee_online_mems()
464 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
472 static void cpuset_update_task_spread_flag(struct cpuset *cs, in cpuset_update_task_spread_flag() argument
475 if (is_spread_page(cs)) in cpuset_update_task_spread_flag()
480 if (is_spread_slab(cs)) in cpuset_update_task_spread_flag()
510 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
514 if (cs) { in alloc_cpumasks()
515 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
516 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
517 pmask3 = &cs->subparts_cpus; in alloc_cpumasks()
547 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
549 if (cs) { in free_cpumasks()
550 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
551 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
552 free_cpumask_var(cs->subparts_cpus); in free_cpumasks()
565 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
569 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
578 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
579 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
587 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
589 free_cpumasks(cs, NULL); in free_cpuset()
590 kfree(cs); in free_cpuset()
960 static void update_tasks_root_domain(struct cpuset *cs) in update_tasks_root_domain() argument
965 css_task_iter_start(&cs->css, 0, &it); in update_tasks_root_domain()
975 struct cpuset *cs = NULL; in rebuild_root_domains() local
990 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_root_domains()
992 if (cpumask_empty(cs->effective_cpus)) { in rebuild_root_domains()
997 css_get(&cs->css); in rebuild_root_domains()
1001 update_tasks_root_domain(cs); in rebuild_root_domains()
1004 css_put(&cs->css); in rebuild_root_domains()
1035 struct cpuset *cs; in rebuild_sched_domains_locked() local
1061 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_sched_domains_locked()
1062 if (!is_partition_root(cs)) { in rebuild_sched_domains_locked()
1066 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1104 static void update_tasks_cpumask(struct cpuset *cs) in update_tasks_cpumask() argument
1109 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask()
1111 set_cpus_allowed_ptr(task, cs->effective_cpus); in update_tasks_cpumask()
1127 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1132 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); in compute_effective_cpumask()
1135 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1372 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) in update_cpumasks_hier() argument
1380 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
1418 if ((cp != cs) && old_prs) { in update_cpumasks_hier()
1527 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
1540 if (sibling == cs) in update_sibling_cpumasks()
1556 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
1563 if (cs == &top_cpuset) in update_cpumask()
1585 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
1588 retval = validate_change(cs, trialcs); in update_cpumask()
1602 if (cs->partition_root_state) { in update_cpumask()
1606 if (update_parent_subparts_cpumask(cs, partcmd_update, in update_cpumask()
1612 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
1617 if (cs->nr_subparts_cpus) { in update_cpumask()
1618 cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, in update_cpumask()
1619 cs->cpus_allowed); in update_cpumask()
1620 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); in update_cpumask()
1624 update_cpumasks_hier(cs, &tmp); in update_cpumask()
1626 if (cs->partition_root_state) { in update_cpumask()
1627 struct cpuset *parent = parent_cs(cs); in update_cpumask()
1634 update_sibling_cpumasks(parent, cs, &tmp); in update_cpumask()
1730 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
1736 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
1738 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
1750 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask()
1761 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
1763 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
1765 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
1775 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
1793 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
1799 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
1849 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
1858 if (cs == &top_cpuset) { in update_nodemask()
1883 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
1887 retval = validate_change(cs, trialcs); in update_nodemask()
1894 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
1898 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
1914 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
1921 if (val != cs->relax_domain_level) { in update_relax_domain_level()
1922 cs->relax_domain_level = val; in update_relax_domain_level()
1923 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
1924 is_sched_load_balance(cs)) in update_relax_domain_level()
1939 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
1944 css_task_iter_start(&cs->css, 0, &it); in update_tasks_flags()
1946 cpuset_update_task_spread_flag(cs, task); in update_tasks_flags()
1959 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
1967 trialcs = alloc_trial_cpuset(cs); in update_flag()
1976 err = validate_change(cs, trialcs); in update_flag()
1980 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
1983 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
1984 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
1987 cs->flags = trialcs->flags; in update_flag()
1994 update_tasks_flags(cs); in update_flag()
2007 static int update_prstate(struct cpuset *cs, int new_prs) in update_prstate() argument
2009 int err, old_prs = cs->partition_root_state; in update_prstate()
2010 struct cpuset *parent = parent_cs(cs); in update_prstate()
2033 if (cpumask_empty(cs->cpus_allowed)) in update_prstate()
2036 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); in update_prstate()
2040 err = update_parent_subparts_cpumask(cs, partcmd_enable, in update_prstate()
2043 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2052 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2057 err = update_parent_subparts_cpumask(cs, partcmd_disable, in update_prstate()
2063 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2074 update_sibling_cpumasks(parent, cs, &tmpmask); in update_prstate()
2080 cs->partition_root_state = new_prs; in update_prstate()
2082 notify_partition_change(cs, old_prs, new_prs); in update_prstate()
2196 struct cpuset *cs; in cpuset_can_attach() local
2202 cs = css_cs(css); in cpuset_can_attach()
2209 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) in cpuset_can_attach()
2213 ret = task_can_attach(task, cs->cpus_allowed); in cpuset_can_attach()
2225 cs->attach_in_progress++; in cpuset_can_attach()
2257 struct cpuset *cs; in cpuset_attach() local
2261 cs = css_cs(css); in cpuset_attach()
2265 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
2268 if (cs != &top_cpuset) in cpuset_attach()
2279 cpuset_update_task_spread_flag(cs, task); in cpuset_attach()
2286 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2301 if (is_memory_migrate(cs)) in cpuset_attach()
2309 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
2311 cs->attach_in_progress--; in cpuset_attach()
2312 if (!cs->attach_in_progress) in cpuset_attach()
2342 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
2348 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
2355 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
2358 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
2361 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
2364 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
2367 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
2373 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
2376 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
2391 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
2397 if (!is_cpuset_online(cs)) in cpuset_write_s64()
2402 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
2420 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
2445 css_get(&cs->css); in cpuset_write_resmask()
2451 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
2454 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
2462 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
2465 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
2477 css_put(&cs->css); in cpuset_write_resmask()
2492 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
2500 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
2503 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
2506 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
2509 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
2512 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); in cpuset_common_seq_show()
2524 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
2528 return is_cpu_exclusive(cs); in cpuset_read_u64()
2530 return is_mem_exclusive(cs); in cpuset_read_u64()
2532 return is_mem_hardwall(cs); in cpuset_read_u64()
2534 return is_sched_load_balance(cs); in cpuset_read_u64()
2536 return is_memory_migrate(cs); in cpuset_read_u64()
2540 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
2542 return is_spread_page(cs); in cpuset_read_u64()
2544 return is_spread_slab(cs); in cpuset_read_u64()
2555 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
2559 return cs->relax_domain_level; in cpuset_read_s64()
2570 struct cpuset *cs = css_cs(seq_css(seq)); in sched_partition_show() local
2572 switch (cs->partition_root_state) { in sched_partition_show()
2589 struct cpuset *cs = css_cs(of_css(of)); in sched_partition_write() local
2605 css_get(&cs->css); in sched_partition_write()
2608 if (!is_cpuset_online(cs)) in sched_partition_write()
2611 retval = update_prstate(cs, val); in sched_partition_write()
2615 css_put(&cs->css); in sched_partition_write()
2788 struct cpuset *cs; in cpuset_css_alloc() local
2793 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
2794 if (!cs) in cpuset_css_alloc()
2797 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
2798 kfree(cs); in cpuset_css_alloc()
2802 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
2803 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
2804 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
2805 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
2806 cs->relax_domain_level = -1; in cpuset_css_alloc()
2810 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); in cpuset_css_alloc()
2812 return &cs->css; in cpuset_css_alloc()
2817 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
2818 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
2828 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
2830 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
2832 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
2838 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
2839 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
2840 cs->use_parent_ecpus = true; in cpuset_css_online()
2871 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
2872 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
2873 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
2874 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
2895 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
2900 if (is_partition_root(cs)) in cpuset_css_offline()
2901 update_prstate(cs, 0); in cpuset_css_offline()
2904 is_sched_load_balance(cs)) in cpuset_css_offline()
2905 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
2907 if (cs->use_parent_ecpus) { in cpuset_css_offline()
2908 struct cpuset *parent = parent_cs(cs); in cpuset_css_offline()
2910 cs->use_parent_ecpus = false; in cpuset_css_offline()
2915 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
2923 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
2925 free_cpuset(cs); in cpuset_css_free()
3012 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
3020 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
3025 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
3027 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
3033 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
3040 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
3041 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
3042 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
3043 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
3050 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
3051 update_tasks_cpumask(cs); in hotplug_update_tasks_legacy()
3052 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
3053 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
3055 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
3056 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
3066 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
3072 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
3077 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3079 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3082 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3083 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3087 update_tasks_cpumask(cs); in hotplug_update_tasks()
3089 update_tasks_nodemask(cs); in hotplug_update_tasks()
3108 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3116 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3124 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3129 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3130 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3131 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3133 if (cs->nr_subparts_cpus) in cpuset_hotplug_update_tasks()
3138 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3140 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3148 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || in cpuset_hotplug_update_tasks()
3150 if (cs->nr_subparts_cpus) { in cpuset_hotplug_update_tasks()
3152 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3153 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3155 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3168 update_parent_subparts_cpumask(cs, partcmd_disable, in cpuset_hotplug_update_tasks()
3170 old_prs = cs->partition_root_state; in cpuset_hotplug_update_tasks()
3173 cs->partition_root_state = PRS_ERROR; in cpuset_hotplug_update_tasks()
3175 notify_partition_change(cs, old_prs, PRS_ERROR); in cpuset_hotplug_update_tasks()
3187 ((cs->partition_root_state == PRS_ERROR) || in cpuset_hotplug_update_tasks()
3189 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) in cpuset_hotplug_update_tasks()
3193 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3194 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3200 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3203 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3297 struct cpuset *cs; in cpuset_hotplug_workfn() local
3301 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
3302 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
3306 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_hotplug_workfn()
3309 css_put(&cs->css); in cpuset_hotplug_workfn()
3489 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
3491 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
3492 cs = parent_cs(cs); in nearest_hardwall_ancestor()
3493 return cs; in nearest_hardwall_ancestor()
3538 struct cpuset *cs; /* current cpuset ancestors */ in __cpuset_node_allowed() local
3562 cs = nearest_hardwall_ancestor(task_cs(current)); in __cpuset_node_allowed()
3563 allowed = node_isset(node, cs->mems_allowed); in __cpuset_node_allowed()