Lines Matching refs:sgc

93 				group->sgc->id,  in sched_domain_debug_one()
102 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) in sched_domain_debug_one()
103 printk(KERN_CONT " cap=%lu", group->sgc->capacity); in sched_domain_debug_one()
595 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) in free_sched_groups()
596 kfree(sg->sgc); in free_sched_groups()
949 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in init_overlap_sched_group()
950 if (atomic_inc_return(&sg->sgc->ref) == 1) in init_overlap_sched_group()
961 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); in init_overlap_sched_group()
962 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
963 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in init_overlap_sched_group()
1168 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
1173 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1)); in get_group()
1188 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg)); in get_group()
1189 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE; in get_group()
1190 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE; in get_group()
1482 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) in claim_allocations()
1483 *per_cpu_ptr(sdd->sgc, cpu) = NULL; in claim_allocations()
2036 sdd->sgc = alloc_percpu(struct sched_group_capacity *); in __sdt_alloc()
2037 if (!sdd->sgc) in __sdt_alloc()
2044 struct sched_group_capacity *sgc; in __sdt_alloc() local
2069 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), in __sdt_alloc()
2071 if (!sgc) in __sdt_alloc()
2075 sgc->id = j; in __sdt_alloc()
2078 *per_cpu_ptr(sdd->sgc, j) = sgc; in __sdt_alloc()
2107 if (sdd->sgc) in __sdt_free()
2108 kfree(*per_cpu_ptr(sdd->sgc, j)); in __sdt_free()
2116 free_percpu(sdd->sgc); in __sdt_free()
2117 sdd->sgc = NULL; in __sdt_free()