1 /*
2 * Copyright 2009-2017 Citrix Ltd and other contributors
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU Lesser General Public License as published
6 * by the Free Software Foundation; version 2.1 only. with the special
7 * exception on linking described in file LICENSE.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU Lesser General Public License for more details.
13 */
14
15 #include "libxl_osdeps.h"
16
17 #include "libxl_internal.h"
18
19 /* Returns:
20 * 0 - success
21 * ERROR_FAIL + errno == ENOENT - no entry found
22 * ERROR_$FOO + errno != ENOENT - other failure
23 */
cpupool_info(libxl__gc * gc,libxl_cpupoolinfo * info,uint32_t poolid,bool exact)24 static int cpupool_info(libxl__gc *gc,
25 libxl_cpupoolinfo *info,
26 uint32_t poolid,
27 bool exact /* exactly poolid or >= poolid */)
28 {
29 xc_cpupoolinfo_t *xcinfo;
30 int rc = ERROR_FAIL;
31
32 xcinfo = xc_cpupool_getinfo(CTX->xch, poolid);
33 if (xcinfo == NULL)
34 {
35 if (exact || errno != ENOENT)
36 LOGE(ERROR, "failed to get info for cpupool%d", poolid);
37 return ERROR_FAIL;
38 }
39
40 if (exact && xcinfo->cpupool_id != poolid)
41 {
42 LOG(ERROR, "got info for cpupool%d, wanted cpupool%d\n",
43 xcinfo->cpupool_id, poolid);
44 goto out;
45 }
46
47 info->poolid = xcinfo->cpupool_id;
48 info->pool_name = libxl_cpupoolid_to_name(CTX, info->poolid);
49 if (!info->pool_name) {
50 rc = ERROR_FAIL;
51 goto out;
52 }
53 info->sched = xcinfo->sched_id;
54 info->n_dom = xcinfo->n_dom;
55 rc = libxl_cpu_bitmap_alloc(CTX, &info->cpumap, 0);
56 if (rc)
57 goto out;
58
59 memcpy(info->cpumap.map, xcinfo->cpumap, info->cpumap.size);
60
61 rc = 0;
62 out:
63 xc_cpupool_infofree(CTX->xch, xcinfo);
64 return rc;
65 }
66
libxl_cpupool_info(libxl_ctx * ctx,libxl_cpupoolinfo * info,uint32_t poolid)67 int libxl_cpupool_info(libxl_ctx *ctx,
68 libxl_cpupoolinfo *info, uint32_t poolid)
69 {
70 GC_INIT(ctx);
71 int rc = cpupool_info(gc, info, poolid, true);
72 GC_FREE;
73 return rc;
74 }
75
libxl_list_cpupool(libxl_ctx * ctx,int * nb_pool_out)76 libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx *ctx, int *nb_pool_out)
77 {
78 GC_INIT(ctx);
79 libxl_cpupoolinfo info, *ptr;
80
81 int i;
82 uint32_t poolid;
83
84 ptr = NULL;
85
86 poolid = 0;
87 for (i = 0;; i++) {
88 libxl_cpupoolinfo_init(&info);
89 if (cpupool_info(gc, &info, poolid, false)) {
90 libxl_cpupoolinfo_dispose(&info);
91 if (errno != ENOENT) goto out;
92 break;
93 }
94
95 ptr = libxl__realloc(NOGC, ptr, (i+1) * sizeof(libxl_cpupoolinfo));
96 ptr[i] = info;
97 poolid = info.poolid + 1;
98 /* Don't dispose of info because it will be returned to caller */
99 }
100
101 *nb_pool_out = i;
102
103 GC_FREE;
104 return ptr;
105
106 out:
107 libxl_cpupoolinfo_list_free(ptr, i);
108 *nb_pool_out = 0;
109 GC_FREE;
110 return NULL;
111 }
112
libxl_get_freecpus(libxl_ctx * ctx,libxl_bitmap * cpumap)113 int libxl_get_freecpus(libxl_ctx *ctx, libxl_bitmap *cpumap)
114 {
115 int ncpus;
116
117 ncpus = libxl_get_max_cpus(ctx);
118 if (ncpus < 0)
119 return ncpus;
120
121 cpumap->map = xc_cpupool_freeinfo(ctx->xch);
122 if (cpumap->map == NULL)
123 return ERROR_FAIL;
124
125 cpumap->size = (ncpus + 7) / 8;
126
127 return 0;
128 }
129
libxl_cpupool_create(libxl_ctx * ctx,const char * name,libxl_scheduler sched,libxl_bitmap cpumap,libxl_uuid * uuid,uint32_t * poolid)130 int libxl_cpupool_create(libxl_ctx *ctx, const char *name,
131 libxl_scheduler sched,
132 libxl_bitmap cpumap, libxl_uuid *uuid,
133 uint32_t *poolid)
134 {
135 GC_INIT(ctx);
136 int rc;
137 int i;
138 xs_transaction_t t;
139 char *uuid_string;
140 uint32_t xcpoolid;
141
142 /* Accept '0' as 'any poolid' for backwards compatibility */
143 if ( *poolid == LIBXL_CPUPOOL_POOLID_ANY
144 || *poolid == 0 )
145 xcpoolid = XC_CPUPOOL_POOLID_ANY;
146 else
147 xcpoolid = *poolid;
148
149 uuid_string = libxl__uuid2string(gc, *uuid);
150 if (!uuid_string) {
151 GC_FREE;
152 return ERROR_NOMEM;
153 }
154
155 rc = xc_cpupool_create(ctx->xch, &xcpoolid, sched);
156 if (rc) {
157 LOGEV(ERROR, rc, "Could not create cpupool");
158 GC_FREE;
159 return ERROR_FAIL;
160 }
161 *poolid = xcpoolid;
162
163 libxl_for_each_bit(i, cpumap)
164 if (libxl_bitmap_test(&cpumap, i)) {
165 rc = xc_cpupool_addcpu(ctx->xch, *poolid, i);
166 if (rc) {
167 LOGEV(ERROR, rc, "Error moving cpu to cpupool");
168 libxl_cpupool_destroy(ctx, *poolid);
169 GC_FREE;
170 return ERROR_FAIL;
171 }
172 }
173
174 for (;;) {
175 t = xs_transaction_start(ctx->xsh);
176
177 xs_mkdir(ctx->xsh, t, GCSPRINTF("/local/pool/%d", *poolid));
178 libxl__xs_printf(gc, t,
179 GCSPRINTF("/local/pool/%d/uuid", *poolid),
180 "%s", uuid_string);
181 libxl__xs_printf(gc, t,
182 GCSPRINTF("/local/pool/%d/name", *poolid),
183 "%s", name);
184
185 if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN)) {
186 GC_FREE;
187 return 0;
188 }
189 }
190 }
191
libxl_cpupool_destroy(libxl_ctx * ctx,uint32_t poolid)192 int libxl_cpupool_destroy(libxl_ctx *ctx, uint32_t poolid)
193 {
194 GC_INIT(ctx);
195 int rc, i;
196 xc_cpupoolinfo_t *info;
197 xs_transaction_t t;
198 libxl_bitmap cpumap;
199
200 info = xc_cpupool_getinfo(ctx->xch, poolid);
201 if (info == NULL) {
202 GC_FREE;
203 return ERROR_NOMEM;
204 }
205
206 rc = ERROR_INVAL;
207 if ((info->cpupool_id != poolid) || (info->n_dom))
208 goto out;
209
210 rc = libxl_cpu_bitmap_alloc(ctx, &cpumap, 0);
211 if (rc)
212 goto out;
213
214 memcpy(cpumap.map, info->cpumap, cpumap.size);
215 libxl_for_each_bit(i, cpumap)
216 if (libxl_bitmap_test(&cpumap, i)) {
217 rc = xc_cpupool_removecpu(ctx->xch, poolid, i);
218 if (rc) {
219 LOGEV(ERROR, rc, "Error removing cpu from cpupool");
220 rc = ERROR_FAIL;
221 goto out1;
222 }
223 }
224
225 rc = xc_cpupool_destroy(ctx->xch, poolid);
226 if (rc) {
227 LOGEV(ERROR, rc, "Could not destroy cpupool");
228 rc = ERROR_FAIL;
229 goto out1;
230 }
231
232 for (;;) {
233 t = xs_transaction_start(ctx->xsh);
234
235 xs_rm(ctx->xsh, XBT_NULL, GCSPRINTF("/local/pool/%d", poolid));
236
237 if (xs_transaction_end(ctx->xsh, t, 0) || (errno != EAGAIN))
238 break;
239 }
240
241 rc = 0;
242
243 out1:
244 libxl_bitmap_dispose(&cpumap);
245 out:
246 xc_cpupool_infofree(ctx->xch, info);
247 GC_FREE;
248
249 return rc;
250 }
251
libxl_cpupool_rename(libxl_ctx * ctx,const char * name,uint32_t poolid)252 int libxl_cpupool_rename(libxl_ctx *ctx, const char *name, uint32_t poolid)
253 {
254 GC_INIT(ctx);
255 xs_transaction_t t;
256 xc_cpupoolinfo_t *info;
257 int rc;
258
259 info = xc_cpupool_getinfo(ctx->xch, poolid);
260 if (info == NULL) {
261 GC_FREE;
262 return ERROR_NOMEM;
263 }
264
265 rc = ERROR_INVAL;
266 if (info->cpupool_id != poolid)
267 goto out;
268
269 rc = 0;
270
271 for (;;) {
272 t = xs_transaction_start(ctx->xsh);
273
274 libxl__xs_printf(gc, t,
275 GCSPRINTF("/local/pool/%d/name", poolid),
276 "%s", name);
277
278 if (xs_transaction_end(ctx->xsh, t, 0))
279 break;
280
281 if (errno == EAGAIN)
282 continue;
283
284 rc = ERROR_FAIL;
285 break;
286 }
287
288 out:
289 xc_cpupool_infofree(ctx->xch, info);
290 GC_FREE;
291
292 return rc;
293 }
294
libxl_cpupool_cpuadd(libxl_ctx * ctx,uint32_t poolid,int cpu)295 int libxl_cpupool_cpuadd(libxl_ctx *ctx, uint32_t poolid, int cpu)
296 {
297 GC_INIT(ctx);
298 int rc = 0;
299
300 rc = xc_cpupool_addcpu(ctx->xch, poolid, cpu);
301 if (rc) {
302 LOGE(ERROR, "Error moving cpu %d to cpupool", cpu);
303 rc = ERROR_FAIL;
304 }
305
306 GC_FREE;
307 return rc;
308 }
309
libxl_cpupool_cpuadd_cpumap(libxl_ctx * ctx,uint32_t poolid,const libxl_bitmap * cpumap)310 int libxl_cpupool_cpuadd_cpumap(libxl_ctx *ctx, uint32_t poolid,
311 const libxl_bitmap *cpumap)
312 {
313 int c, ncpus = 0, rc = 0;
314
315 libxl_for_each_set_bit(c, *cpumap) {
316 if (!libxl_cpupool_cpuadd(ctx, poolid, c))
317 ncpus++;
318 }
319
320 if (ncpus != libxl_bitmap_count_set(cpumap))
321 rc = ERROR_FAIL;
322
323 return rc;
324 }
325
libxl_cpupool_cpuadd_node(libxl_ctx * ctx,uint32_t poolid,int node,int * cpus)326 int libxl_cpupool_cpuadd_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus)
327 {
328 int rc = 0;
329 int cpu, nr;
330 libxl_bitmap freemap;
331 libxl_cputopology *topology;
332
333 if (libxl_get_freecpus(ctx, &freemap)) {
334 return ERROR_FAIL;
335 }
336
337 topology = libxl_get_cpu_topology(ctx, &nr);
338 if (!topology) {
339 rc = ERROR_FAIL;
340 goto out;
341 }
342
343 *cpus = 0;
344 for (cpu = 0; cpu < nr; cpu++) {
345 if (libxl_bitmap_test(&freemap, cpu) && (topology[cpu].node == node) &&
346 !libxl_cpupool_cpuadd(ctx, poolid, cpu)) {
347 (*cpus)++;
348 }
349 libxl_cputopology_dispose(&topology[cpu]);
350 }
351
352 free(topology);
353 out:
354 libxl_bitmap_dispose(&freemap);
355 return rc;
356 }
357
libxl_cpupool_cpuremove(libxl_ctx * ctx,uint32_t poolid,int cpu)358 int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu)
359 {
360 GC_INIT(ctx);
361 int rc = 0;
362
363 rc = xc_cpupool_removecpu(ctx->xch, poolid, cpu);
364 if (rc) {
365 LOGE(ERROR, "Error removing cpu %d from cpupool", cpu);
366 rc = ERROR_FAIL;
367 }
368
369 GC_FREE;
370 return rc;
371 }
372
libxl_cpupool_cpuremove_cpumap(libxl_ctx * ctx,uint32_t poolid,const libxl_bitmap * cpumap)373 int libxl_cpupool_cpuremove_cpumap(libxl_ctx *ctx, uint32_t poolid,
374 const libxl_bitmap *cpumap)
375 {
376 int c, ncpus = 0, rc = 0;
377
378 libxl_for_each_set_bit(c, *cpumap) {
379 if (!libxl_cpupool_cpuremove(ctx, poolid, c))
380 ncpus++;
381 }
382
383 if (ncpus != libxl_bitmap_count_set(cpumap))
384 rc = ERROR_FAIL;
385
386 return rc;
387 }
388
libxl_cpupool_cpuremove_node(libxl_ctx * ctx,uint32_t poolid,int node,int * cpus)389 int libxl_cpupool_cpuremove_node(libxl_ctx *ctx, uint32_t poolid, int node, int *cpus)
390 {
391 int ret = 0;
392 int n_pools;
393 int p;
394 int cpu, nr_cpus;
395 libxl_cputopology *topology;
396 libxl_cpupoolinfo *poolinfo;
397
398 poolinfo = libxl_list_cpupool(ctx, &n_pools);
399 if (!poolinfo) {
400 return ERROR_NOMEM;
401 }
402
403 topology = libxl_get_cpu_topology(ctx, &nr_cpus);
404 if (!topology) {
405 ret = ERROR_FAIL;
406 goto out;
407 }
408
409 *cpus = 0;
410 for (p = 0; p < n_pools; p++) {
411 if (poolinfo[p].poolid == poolid) {
412 for (cpu = 0; cpu < nr_cpus; cpu++) {
413 if ((topology[cpu].node == node) &&
414 libxl_bitmap_test(&poolinfo[p].cpumap, cpu) &&
415 !libxl_cpupool_cpuremove(ctx, poolid, cpu)) {
416 (*cpus)++;
417 }
418 }
419 }
420 }
421
422 libxl_cputopology_list_free(topology, nr_cpus);
423
424 out:
425 libxl_cpupoolinfo_list_free(poolinfo, n_pools);
426
427 return ret;
428 }
429
libxl_cpupool_movedomain(libxl_ctx * ctx,uint32_t poolid,uint32_t domid)430 int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid)
431 {
432 GC_INIT(ctx);
433 int rc;
434
435 rc = xc_cpupool_movedomain(ctx->xch, poolid, domid);
436 if (rc) {
437 LOGEVD(ERROR, rc, domid, "Error moving domain to cpupool");
438 GC_FREE;
439 return ERROR_FAIL;
440 }
441
442 GC_FREE;
443 return 0;
444 }
445
446 /*
447 * Local variables:
448 * mode: C
449 * c-basic-offset: 4
450 * indent-tabs-mode: nil
451 * End:
452 */
453