1 #ifndef __XEN_CPUMASK_H
2 #define __XEN_CPUMASK_H
3
4 /*
5 * Cpumasks provide a bitmap suitable for representing the
6 * set of CPU's in a system, one bit position per CPU number.
7 *
8 * See detailed comments in the file xen/bitmap.h describing the
9 * data type on which these cpumasks are based.
10 *
11 * The available cpumask operations are:
12 *
13 * void cpumask_set_cpu(cpu, mask) turn on bit 'cpu' in mask
14 * void cpumask_clear_cpu(cpu, mask) turn off bit 'cpu' in mask
15 * void cpumask_setall(mask) set all bits
16 * void cpumask_clear(mask) clear all bits
17 * bool cpumask_test_cpu(cpu, mask) true iff bit 'cpu' set in mask
18 * int cpumask_test_and_set_cpu(cpu, mask) test and set bit 'cpu' in mask
19 * int cpumask_test_and_clear_cpu(cpu, mask) test and clear bit 'cpu' in mask
20 *
21 * void cpumask_and(dst, src1, src2) dst = src1 & src2 [intersection]
22 * void cpumask_or(dst, src1, src2) dst = src1 | src2 [union]
23 * void cpumask_xor(dst, src1, src2) dst = src1 ^ src2
24 * void cpumask_andnot(dst, src1, src2) dst = src1 & ~src2
25 * void cpumask_complement(dst, src) dst = ~src
26 *
27 * int cpumask_equal(mask1, mask2) Does mask1 == mask2?
28 * int cpumask_intersects(mask1, mask2) Do mask1 and mask2 intersect?
29 * int cpumask_subset(mask1, mask2) Is mask1 a subset of mask2?
30 * int cpumask_empty(mask) Is mask empty (no bits sets)?
31 * int cpumask_full(mask) Is mask full (all bits sets)?
32 * int cpumask_weight(mask) Hamming weigh - number of set bits
33 *
34 * int cpumask_first(mask) Number lowest set bit, or NR_CPUS
35 * int cpumask_next(cpu, mask) Next cpu past 'cpu', or NR_CPUS
36 * int cpumask_last(mask) Number highest set bit, or NR_CPUS
37 * int cpumask_any(mask) Any cpu in mask, or NR_CPUS
38 * int cpumask_cycle(cpu, mask) Next cpu cycling from 'cpu', or NR_CPUS
39 *
40 * const cpumask_t *cpumask_of(cpu) Return cpumask with bit 'cpu' set
41 * unsigned long *cpumask_bits(mask) Array of unsigned long's in mask
42 *
43 * for_each_cpu(cpu, mask) for-loop cpu over mask
44 *
45 * int num_online_cpus() Number of online CPUs
46 * int num_possible_cpus() Number of all possible CPUs
47 * int num_present_cpus() Number of present CPUs
48 *
49 * int cpu_online(cpu) Is some cpu online?
50 * int cpu_possible(cpu) Is some cpu possible?
51 * int cpu_present(cpu) Is some cpu present (can schedule)?
52 *
53 * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
54 * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
55 * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
56 */
57
58 #include <xen/bitmap.h>
59 #include <xen/kernel.h>
60 #include <xen/random.h>
61
62 typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
63
64 /*
65 * printf arguments for a cpumask. Shorthand for using '%*pb[l]' when
66 * printing a cpumask.
67 */
68 #define CPUMASK_PR(src) nr_cpu_ids, cpumask_bits(src)
69
70 extern unsigned int nr_cpu_ids;
71
72 #if NR_CPUS > 4 * BITS_PER_LONG
73 /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
74 * not all bits may be allocated. */
75 extern unsigned int nr_cpumask_bits;
76 #else
77 # define nr_cpumask_bits (BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG)
78 #endif
79
80 /* verify cpu argument to cpumask_* operators */
cpumask_check(unsigned int cpu)81 static inline unsigned int cpumask_check(unsigned int cpu)
82 {
83 ASSERT(cpu < nr_cpu_ids);
84 return cpu;
85 }
86
cpumask_set_cpu(int cpu,volatile cpumask_t * dstp)87 static inline void cpumask_set_cpu(int cpu, volatile cpumask_t *dstp)
88 {
89 set_bit(cpumask_check(cpu), dstp->bits);
90 }
91
__cpumask_set_cpu(int cpu,cpumask_t * dstp)92 static inline void __cpumask_set_cpu(int cpu, cpumask_t *dstp)
93 {
94 __set_bit(cpumask_check(cpu), dstp->bits);
95 }
96
cpumask_clear_cpu(int cpu,volatile cpumask_t * dstp)97 static inline void cpumask_clear_cpu(int cpu, volatile cpumask_t *dstp)
98 {
99 clear_bit(cpumask_check(cpu), dstp->bits);
100 }
101
__cpumask_clear_cpu(int cpu,cpumask_t * dstp)102 static inline void __cpumask_clear_cpu(int cpu, cpumask_t *dstp)
103 {
104 __clear_bit(cpumask_check(cpu), dstp->bits);
105 }
106
cpumask_setall(cpumask_t * dstp)107 static inline void cpumask_setall(cpumask_t *dstp)
108 {
109 bitmap_fill(dstp->bits, nr_cpumask_bits);
110 }
111
cpumask_clear(cpumask_t * dstp)112 static inline void cpumask_clear(cpumask_t *dstp)
113 {
114 bitmap_zero(dstp->bits, nr_cpumask_bits);
115 }
116
cpumask_test_cpu(unsigned int cpu,const cpumask_t * src)117 static inline bool cpumask_test_cpu(unsigned int cpu, const cpumask_t *src)
118 {
119 return test_bit(cpumask_check(cpu), src->bits);
120 }
121
cpumask_test_and_set_cpu(int cpu,volatile cpumask_t * addr)122 static inline int cpumask_test_and_set_cpu(int cpu, volatile cpumask_t *addr)
123 {
124 return test_and_set_bit(cpumask_check(cpu), addr->bits);
125 }
126
__cpumask_test_and_set_cpu(int cpu,cpumask_t * addr)127 static inline int __cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
128 {
129 return __test_and_set_bit(cpumask_check(cpu), addr->bits);
130 }
131
cpumask_test_and_clear_cpu(int cpu,volatile cpumask_t * addr)132 static inline int cpumask_test_and_clear_cpu(int cpu, volatile cpumask_t *addr)
133 {
134 return test_and_clear_bit(cpumask_check(cpu), addr->bits);
135 }
136
__cpumask_test_and_clear_cpu(int cpu,cpumask_t * addr)137 static inline int __cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
138 {
139 return __test_and_clear_bit(cpumask_check(cpu), addr->bits);
140 }
141
cpumask_and(cpumask_t * dstp,const cpumask_t * src1p,const cpumask_t * src2p)142 static inline void cpumask_and(cpumask_t *dstp, const cpumask_t *src1p,
143 const cpumask_t *src2p)
144 {
145 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
146 }
147
cpumask_or(cpumask_t * dstp,const cpumask_t * src1p,const cpumask_t * src2p)148 static inline void cpumask_or(cpumask_t *dstp, const cpumask_t *src1p,
149 const cpumask_t *src2p)
150 {
151 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
152 }
153
cpumask_xor(cpumask_t * dstp,const cpumask_t * src1p,const cpumask_t * src2p)154 static inline void cpumask_xor(cpumask_t *dstp, const cpumask_t *src1p,
155 const cpumask_t *src2p)
156 {
157 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
158 }
159
cpumask_andnot(cpumask_t * dstp,const cpumask_t * src1p,const cpumask_t * src2p)160 static inline void cpumask_andnot(cpumask_t *dstp, const cpumask_t *src1p,
161 const cpumask_t *src2p)
162 {
163 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
164 }
165
cpumask_complement(cpumask_t * dstp,const cpumask_t * srcp)166 static inline void cpumask_complement(cpumask_t *dstp, const cpumask_t *srcp)
167 {
168 bitmap_complement(dstp->bits, srcp->bits, nr_cpumask_bits);
169 }
170
cpumask_equal(const cpumask_t * src1p,const cpumask_t * src2p)171 static inline int cpumask_equal(const cpumask_t *src1p,
172 const cpumask_t *src2p)
173 {
174 return bitmap_equal(src1p->bits, src2p->bits, nr_cpu_ids);
175 }
176
cpumask_intersects(const cpumask_t * src1p,const cpumask_t * src2p)177 static inline int cpumask_intersects(const cpumask_t *src1p,
178 const cpumask_t *src2p)
179 {
180 return bitmap_intersects(src1p->bits, src2p->bits, nr_cpu_ids);
181 }
182
cpumask_subset(const cpumask_t * src1p,const cpumask_t * src2p)183 static inline int cpumask_subset(const cpumask_t *src1p,
184 const cpumask_t *src2p)
185 {
186 return bitmap_subset(src1p->bits, src2p->bits, nr_cpu_ids);
187 }
188
cpumask_empty(const cpumask_t * srcp)189 static inline int cpumask_empty(const cpumask_t *srcp)
190 {
191 return bitmap_empty(srcp->bits, nr_cpu_ids);
192 }
193
cpumask_full(const cpumask_t * srcp)194 static inline int cpumask_full(const cpumask_t *srcp)
195 {
196 return bitmap_full(srcp->bits, nr_cpu_ids);
197 }
198
cpumask_weight(const cpumask_t * srcp)199 static inline int cpumask_weight(const cpumask_t *srcp)
200 {
201 return bitmap_weight(srcp->bits, nr_cpu_ids);
202 }
203
cpumask_copy(cpumask_t * dstp,const cpumask_t * srcp)204 static inline void cpumask_copy(cpumask_t *dstp, const cpumask_t *srcp)
205 {
206 bitmap_copy(dstp->bits, srcp->bits, nr_cpumask_bits);
207 }
208
cpumask_first(const cpumask_t * srcp)209 static inline int cpumask_first(const cpumask_t *srcp)
210 {
211 return min_t(int, nr_cpu_ids, find_first_bit(srcp->bits, nr_cpu_ids));
212 }
213
cpumask_next(int n,const cpumask_t * srcp)214 static inline int cpumask_next(int n, const cpumask_t *srcp)
215 {
216 /* -1 is a legal arg here. */
217 if (n != -1)
218 cpumask_check(n);
219
220 return min_t(int, nr_cpu_ids,
221 find_next_bit(srcp->bits, nr_cpu_ids, n + 1));
222 }
223
cpumask_last(const cpumask_t * srcp)224 static inline int cpumask_last(const cpumask_t *srcp)
225 {
226 int cpu, pcpu = nr_cpu_ids;
227
228 for (cpu = cpumask_first(srcp);
229 cpu < nr_cpu_ids;
230 cpu = cpumask_next(cpu, srcp))
231 pcpu = cpu;
232 return pcpu;
233 }
234
cpumask_cycle(int n,const cpumask_t * srcp)235 static inline int cpumask_cycle(int n, const cpumask_t *srcp)
236 {
237 int nxt = cpumask_next(n, srcp);
238
239 if (nxt == nr_cpu_ids)
240 nxt = cpumask_first(srcp);
241 return nxt;
242 }
243
cpumask_test_or_cycle(int n,const cpumask_t * srcp)244 static inline int cpumask_test_or_cycle(int n, const cpumask_t *srcp)
245 {
246 if ( cpumask_test_cpu(n, srcp) )
247 return n;
248
249 return cpumask_cycle(n, srcp);
250 }
251
cpumask_any(const cpumask_t * srcp)252 static inline unsigned int cpumask_any(const cpumask_t *srcp)
253 {
254 unsigned int cpu = cpumask_first(srcp);
255 unsigned int w = cpumask_weight(srcp);
256
257 if ( w > 1 && cpu < nr_cpu_ids )
258 for ( w = get_random() % w; w--; )
259 {
260 unsigned int next = cpumask_next(cpu, srcp);
261
262 if ( next >= nr_cpu_ids )
263 break;
264 cpu = next;
265 }
266
267 return cpu;
268 }
269
270 /*
271 * Special-case data structure for "single bit set only" constant CPU masks.
272 *
273 * We pre-generate all the 64 (or 32) possible bit positions, with enough
274 * padding to the left and the right, and return the constant pointer
275 * appropriately offset.
276 */
277 extern const unsigned long
278 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
279
cpumask_of(unsigned int cpu)280 static inline const cpumask_t *cpumask_of(unsigned int cpu)
281 {
282 const unsigned long *p = cpu_bit_bitmap[1 + cpumask_check(cpu) %
283 BITS_PER_LONG];
284
285 return (const cpumask_t *)(p - cpu / BITS_PER_LONG);
286 }
287
288 #define cpumask_bits(maskp) ((maskp)->bits)
289
290 extern const cpumask_t cpumask_all;
291
292 /*
293 * cpumask_var_t: struct cpumask for stack usage.
294 *
295 * Oh, the wicked games we play! In order to make kernel coding a
296 * little more difficult, we typedef cpumask_var_t to an array or a
297 * pointer: doing &mask on an array is a noop, so it still works.
298 *
299 * ie.
300 * cpumask_var_t tmpmask;
301 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
302 * return -ENOMEM;
303 *
304 * ... use 'tmpmask' like a normal struct cpumask * ...
305 *
306 * free_cpumask_var(tmpmask);
307 */
308 #if NR_CPUS > 2 * BITS_PER_LONG
309 #include <xen/xmalloc.h>
310
311 typedef cpumask_t *cpumask_var_t;
312
alloc_cpumask_var(cpumask_var_t * mask)313 static inline bool alloc_cpumask_var(cpumask_var_t *mask)
314 {
315 *mask = _xmalloc(nr_cpumask_bits / 8, sizeof(long));
316 return *mask != NULL;
317 }
318
cond_alloc_cpumask_var(cpumask_var_t * mask)319 static inline bool cond_alloc_cpumask_var(cpumask_var_t *mask)
320 {
321 if (*mask == NULL)
322 *mask = _xmalloc(nr_cpumask_bits / 8, sizeof(long));
323 return *mask != NULL;
324 }
325
zalloc_cpumask_var(cpumask_var_t * mask)326 static inline bool zalloc_cpumask_var(cpumask_var_t *mask)
327 {
328 *mask = _xzalloc(nr_cpumask_bits / 8, sizeof(long));
329 return *mask != NULL;
330 }
331
cond_zalloc_cpumask_var(cpumask_var_t * mask)332 static inline bool cond_zalloc_cpumask_var(cpumask_var_t *mask)
333 {
334 if (*mask == NULL)
335 *mask = _xzalloc(nr_cpumask_bits / 8, sizeof(long));
336 else
337 cpumask_clear(*mask);
338 return *mask != NULL;
339 }
340
free_cpumask_var(cpumask_var_t mask)341 static inline void free_cpumask_var(cpumask_var_t mask)
342 {
343 xfree(mask);
344 }
345
346 /* Free an allocated mask, and zero the pointer to it. */
347 #define FREE_CPUMASK_VAR(m) XFREE(m)
348 #else
349 typedef cpumask_t cpumask_var_t[1];
350
alloc_cpumask_var(cpumask_var_t * mask)351 static inline bool alloc_cpumask_var(cpumask_var_t *mask)
352 {
353 return true;
354 }
355 #define cond_alloc_cpumask_var alloc_cpumask_var
356
zalloc_cpumask_var(cpumask_var_t * mask)357 static inline bool zalloc_cpumask_var(cpumask_var_t *mask)
358 {
359 cpumask_clear(*mask);
360 return true;
361 }
362 #define cond_zalloc_cpumask_var zalloc_cpumask_var
363
free_cpumask_var(cpumask_var_t mask)364 static inline void free_cpumask_var(cpumask_var_t mask)
365 {
366 }
367
368 #define FREE_CPUMASK_VAR(m) free_cpumask_var(m)
369 #endif
370
371 #if NR_CPUS > 1
372 #define for_each_cpu(cpu, mask) \
373 for ((cpu) = cpumask_first(mask); \
374 (cpu) < nr_cpu_ids; \
375 (cpu) = cpumask_next(cpu, mask))
376 #else /* NR_CPUS == 1 */
377 #define for_each_cpu(cpu, mask) \
378 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)(mask))
379 #endif /* NR_CPUS */
380
381 /*
382 * The following particular system cpumasks and operations manage
383 * possible, present and online cpus. Each of them is a fixed size
384 * bitmap of size NR_CPUS.
385 *
386 * #ifdef CONFIG_HOTPLUG_CPU
387 * cpu_possible_map - has bit 'cpu' set iff cpu is populatable
388 * cpu_present_map - has bit 'cpu' set iff cpu is populated
389 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
390 * #else
391 * cpu_possible_map - has bit 'cpu' set iff cpu is populated
392 * cpu_present_map - copy of cpu_possible_map
393 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
394 * #endif
395 *
396 * In either case, NR_CPUS is fixed at compile time, as the static
397 * size of these bitmaps. The cpu_possible_map is fixed at boot
398 * time, as the set of CPU id's that it is possible might ever
399 * be plugged in at anytime during the life of that system boot.
400 * The cpu_present_map is dynamic(*), representing which CPUs
401 * are currently plugged in. And cpu_online_map is the dynamic
402 * subset of cpu_present_map, indicating those CPUs available
403 * for scheduling.
404 *
405 * If HOTPLUG is enabled, then cpu_possible_map is forced to have
406 * all NR_CPUS bits set, otherwise it is just the set of CPUs that
407 * ACPI reports present at boot.
408 *
409 * If HOTPLUG is enabled, then cpu_present_map varies dynamically,
410 * depending on what ACPI reports as currently plugged in, otherwise
411 * cpu_present_map is just a copy of cpu_possible_map.
412 *
413 * (*) Well, cpu_present_map is dynamic in the hotplug case. If not
414 * hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
415 *
416 * Subtleties:
417 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
418 * assumption that their single CPU is online. The UP
419 * cpu_{online,possible,present}_maps are placebos. Changing them
420 * will have no useful affect on the following num_*_cpus()
421 * and cpu_*() macros in the UP case. This ugliness is a UP
422 * optimization - don't waste any instructions or memory references
423 * asking if you're online or how many CPUs there are if there is
424 * only one CPU.
425 * 2) Most SMP arch's #define some of these maps to be some
426 * other map specific to that arch. Therefore, the following
427 * must be #define macros, not inlines. To see why, examine
428 * the assembly code produced by the following. Note that
429 * set1() writes phys_x_map, but set2() writes x_map:
430 * int x_map, phys_x_map;
431 * #define set1(a) x_map = a
432 * inline void set2(int a) { x_map = a; }
433 * #define x_map phys_x_map
434 * main(){ set1(3); set2(5); }
435 */
436
437 extern cpumask_t cpu_possible_map;
438 extern cpumask_t cpu_online_map;
439 extern cpumask_t cpu_present_map;
440
441 #if NR_CPUS > 1
442 #define num_online_cpus() cpumask_weight(&cpu_online_map)
443 #define num_possible_cpus() cpumask_weight(&cpu_possible_map)
444 #define num_present_cpus() cpumask_weight(&cpu_present_map)
445 #define cpu_online(cpu) cpumask_test_cpu(cpu, &cpu_online_map)
446 #define cpu_possible(cpu) cpumask_test_cpu(cpu, &cpu_possible_map)
447 #define cpu_present(cpu) cpumask_test_cpu(cpu, &cpu_present_map)
448 #else
449 #define num_online_cpus() 1
450 #define num_possible_cpus() 1
451 #define num_present_cpus() 1
452 #define cpu_online(cpu) ((cpu) == 0)
453 #define cpu_possible(cpu) ((cpu) == 0)
454 #define cpu_present(cpu) ((cpu) == 0)
455 #endif
456
457 #define for_each_possible_cpu(cpu) for_each_cpu(cpu, &cpu_possible_map)
458 #define for_each_online_cpu(cpu) for_each_cpu(cpu, &cpu_online_map)
459 #define for_each_present_cpu(cpu) for_each_cpu(cpu, &cpu_present_map)
460
461 /* Copy to/from cpumap provided by control tools. */
462 struct xenctl_bitmap;
463 int cpumask_to_xenctl_bitmap(struct xenctl_bitmap *, const cpumask_t *);
464 int xenctl_bitmap_to_cpumask(cpumask_var_t *, const struct xenctl_bitmap *);
465
466 #endif /* __XEN_CPUMASK_H */
467