Lines Matching refs:c

152 unsigned int cpupool_get_granularity(const struct cpupool *c)  in cpupool_get_granularity()  argument
154 return c ? sched_granularity : 1; in cpupool_get_granularity()
157 static void free_cpupool_struct(struct cpupool *c) in free_cpupool_struct() argument
159 if ( c ) in free_cpupool_struct()
161 free_cpumask_var(c->res_valid); in free_cpupool_struct()
162 free_cpumask_var(c->cpu_valid); in free_cpupool_struct()
164 xfree(c); in free_cpupool_struct()
169 struct cpupool *c = xzalloc(struct cpupool); in alloc_cpupool_struct() local
171 if ( !c ) in alloc_cpupool_struct()
174 if ( !zalloc_cpumask_var(&c->cpu_valid) || in alloc_cpupool_struct()
175 !zalloc_cpumask_var(&c->res_valid) ) in alloc_cpupool_struct()
177 free_cpupool_struct(c); in alloc_cpupool_struct()
178 c = NULL; in alloc_cpupool_struct()
181 return c; in alloc_cpupool_struct()
210 struct cpupool *c; in __cpupool_get_by_id() local
212 c = __cpupool_find_by_id(poolid, exact); in __cpupool_get_by_id()
213 if ( c != NULL ) in __cpupool_get_by_id()
214 atomic_inc(&c->refcnt); in __cpupool_get_by_id()
216 return c; in __cpupool_get_by_id()
248 struct cpupool *c; in cpupool_create() local
253 if ( (c = alloc_cpupool_struct()) == NULL ) in cpupool_create()
257 atomic_set(&c->refcnt, 2); in cpupool_create()
276 c->next = *q; in cpupool_create()
279 c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid; in cpupool_create()
282 c->sched = scheduler_get_default(); in cpupool_create()
286 c->sched = scheduler_alloc(sched_id, perr); in cpupool_create()
287 if ( c->sched == NULL ) in cpupool_create()
290 c->sched->cpupool = c; in cpupool_create()
291 c->gran = opt_sched_granularity; in cpupool_create()
293 *q = c; in cpupool_create()
298 c->cpupool_id, c->sched->name, c->sched->opt_name); in cpupool_create()
301 return c; in cpupool_create()
305 free_cpupool_struct(c); in cpupool_create()
316 static int cpupool_destroy(struct cpupool *c) in cpupool_destroy() argument
322 if ( *q == c ) in cpupool_destroy()
324 if ( *q != c ) in cpupool_destroy()
329 if ( (c->n_dom != 0) || cpumask_weight(c->cpu_valid) ) in cpupool_destroy()
334 *q = c->next; in cpupool_destroy()
337 cpupool_put(c); in cpupool_destroy()
339 debugtrace_printk("cpupool_destroy(pool=%d)\n", c->cpupool_id); in cpupool_destroy()
346 static int cpupool_move_domain_locked(struct domain *d, struct cpupool *c) in cpupool_move_domain_locked() argument
350 if ( unlikely(d->cpupool == c) ) in cpupool_move_domain_locked()
354 ret = sched_move_domain(d, c); in cpupool_move_domain_locked()
358 c->n_dom++; in cpupool_move_domain_locked()
362 int cpupool_move_domain(struct domain *d, struct cpupool *c) in cpupool_move_domain() argument
368 ret = cpupool_move_domain_locked(d, c); in cpupool_move_domain()
379 static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu) in cpupool_assign_cpu_locked() argument
385 cpus = sched_get_opt_cpumask(c->gran, cpu); in cpupool_assign_cpu_locked()
387 if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) ) in cpupool_assign_cpu_locked()
389 ret = schedule_cpu_add(cpumask_first(cpus), c); in cpupool_assign_cpu_locked()
402 cpumask_or(c->cpu_valid, c->cpu_valid, cpus); in cpupool_assign_cpu_locked()
403 cpumask_and(c->res_valid, c->cpu_valid, &sched_res_mask); in cpupool_assign_cpu_locked()
408 for_each_domain_in_cpupool(d, c) in cpupool_assign_cpu_locked()
417 static int cpupool_unassign_cpu_finish(struct cpupool *c) in cpupool_unassign_cpu_finish() argument
424 if ( c != cpupool_cpu_moving ) in cpupool_unassign_cpu_finish()
459 for_each_domain_in_cpupool(d, c) in cpupool_unassign_cpu_finish()
468 static int cpupool_unassign_cpu_start(struct cpupool *c, unsigned int cpu) in cpupool_unassign_cpu_start() argument
476 if ( ((cpupool_moving_cpu != -1) || !cpumask_test_cpu(cpu, c->cpu_valid)) in cpupool_unassign_cpu_start()
484 if ( (c->n_dom > 0) && in cpupool_unassign_cpu_start()
485 (cpumask_weight(c->cpu_valid) == cpumask_weight(cpus)) && in cpupool_unassign_cpu_start()
489 for_each_domain_in_cpupool(d, c) in cpupool_unassign_cpu_start()
505 atomic_inc(&c->refcnt); in cpupool_unassign_cpu_start()
506 cpupool_cpu_moving = c; in cpupool_unassign_cpu_start()
507 cpumask_andnot(c->cpu_valid, c->cpu_valid, cpus); in cpupool_unassign_cpu_start()
508 cpumask_and(c->res_valid, c->cpu_valid, &sched_res_mask); in cpupool_unassign_cpu_start()
520 struct cpupool *c = info; in cpupool_unassign_cpu_helper() local
527 ret = cpupool_unassign_cpu_finish(c); in cpupool_unassign_cpu_helper()
547 static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu) in cpupool_unassign_cpu() argument
554 c->cpupool_id, cpu); in cpupool_unassign_cpu()
560 ret = cpupool_unassign_cpu_start(c, master_cpu); in cpupool_unassign_cpu()
564 c->cpupool_id, cpu, ret); in cpupool_unassign_cpu()
575 return continue_hypercall_on_cpu(work_cpu, cpupool_unassign_cpu_helper, c); in cpupool_unassign_cpu()
586 struct cpupool *c; in cpupool_add_domain() local
593 c = cpupool_find_by_id(poolid); in cpupool_add_domain()
594 if ( c == NULL ) in cpupool_add_domain()
596 else if ( !cpumask_weight(c->cpu_valid) ) in cpupool_add_domain()
600 c->n_dom++; in cpupool_add_domain()
601 n_dom = c->n_dom; in cpupool_add_domain()
602 d->cpupool = c; in cpupool_add_domain()
734 struct cpupool **c; in cpupool_cpu_remove_forced() local
738 for_each_cpupool ( c ) in cpupool_cpu_remove_forced()
740 if ( cpumask_test_cpu(master_cpu, (*c)->cpu_valid) ) in cpupool_cpu_remove_forced()
742 ret = cpupool_unassign_cpu_start(*c, master_cpu); in cpupool_cpu_remove_forced()
744 ret = cpupool_unassign_cpu_finish(*c); in cpupool_cpu_remove_forced()
762 struct cpupool *c; in cpupool_do_sysctl() local
773 c = cpupool_create(poolid, op->sched_id, &ret); in cpupool_do_sysctl()
774 if ( c != NULL ) in cpupool_do_sysctl()
776 op->cpupool_id = c->cpupool_id; in cpupool_do_sysctl()
777 cpupool_put(c); in cpupool_do_sysctl()
784 c = cpupool_get_by_id(op->cpupool_id); in cpupool_do_sysctl()
786 if ( c == NULL ) in cpupool_do_sysctl()
788 ret = cpupool_destroy(c); in cpupool_do_sysctl()
789 cpupool_put(c); in cpupool_do_sysctl()
795 c = cpupool_get_next_by_id(op->cpupool_id); in cpupool_do_sysctl()
797 if ( c == NULL ) in cpupool_do_sysctl()
799 op->cpupool_id = c->cpupool_id; in cpupool_do_sysctl()
800 op->sched_id = c->sched->sched_id; in cpupool_do_sysctl()
801 op->n_dom = c->n_dom; in cpupool_do_sysctl()
802 ret = cpumask_to_xenctl_bitmap(&op->cpumap, c->cpu_valid); in cpupool_do_sysctl()
803 cpupool_put(c); in cpupool_do_sysctl()
818 c = cpupool_find_by_id(op->cpupool_id); in cpupool_do_sysctl()
820 if ( c == NULL ) in cpupool_do_sysctl()
826 cpus = sched_get_opt_cpumask(c->gran, cpu); in cpupool_do_sysctl()
838 cpus = sched_get_opt_cpumask(c->gran, cpu); in cpupool_do_sysctl()
842 ret = cpupool_assign_cpu_locked(c, cpu); in cpupool_do_sysctl()
856 c = cpupool_get_by_id(op->cpupool_id); in cpupool_do_sysctl()
858 if ( c == NULL ) in cpupool_do_sysctl()
862 cpu = cpumask_last(c->cpu_valid); in cpupool_do_sysctl()
863 ret = (cpu < nr_cpu_ids) ? cpupool_unassign_cpu(c, cpu) : -EINVAL; in cpupool_do_sysctl()
864 cpupool_put(c); in cpupool_do_sysctl()
892 c = cpupool_find_by_id(op->cpupool_id); in cpupool_do_sysctl()
893 if ( (c != NULL) && cpumask_weight(c->cpu_valid) ) in cpupool_do_sysctl()
894 ret = cpupool_move_domain_locked(d, c); in cpupool_do_sysctl()
931 struct cpupool **c; in dump_runq() local
946 for_each_cpupool(c) in dump_runq()
948 printk("Cpupool %d:\n", (*c)->cpupool_id); in dump_runq()
949 printk("Cpus: %*pbl\n", CPUMASK_PR((*c)->cpu_valid)); in dump_runq()
950 sched_gran_print((*c)->gran, cpupool_get_granularity(*c)); in dump_runq()
951 schedule_dump(*c); in dump_runq()