1 /******************************************************************************
2  * xc_cpuid_x86.c
3  *
4  * Compute cpuid of a domain.
5  *
6  * Copyright (c) 2008, Citrix Systems, Inc.
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation;
11  * version 2.1 of the License.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this library; If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include <stdlib.h>
23 #include <stdbool.h>
24 #include <limits.h>
25 #include "xc_private.h"
26 #include "xc_bitops.h"
27 #include <xen/hvm/params.h>
28 #include <xen-tools/libs.h>
29 
30 enum {
31 #define XEN_CPUFEATURE(name, value) X86_FEATURE_##name = value,
32 #include <xen/arch-x86/cpufeatureset.h>
33 };
34 
35 #include <xen/asm/x86-vendors.h>
36 
37 #include <xen/lib/x86/cpu-policy.h>
38 
39 #define bitmaskof(idx)      (1u << ((idx) & 31))
40 #define featureword_of(idx) ((idx) >> 5)
41 
xc_get_cpu_levelling_caps(xc_interface * xch,uint32_t * caps)42 int xc_get_cpu_levelling_caps(xc_interface *xch, uint32_t *caps)
43 {
44     DECLARE_SYSCTL;
45     int ret;
46 
47     sysctl.cmd = XEN_SYSCTL_get_cpu_levelling_caps;
48     ret = do_sysctl(xch, &sysctl);
49 
50     if ( !ret )
51         *caps = sysctl.u.cpu_levelling_caps.caps;
52 
53     return ret;
54 }
55 
xc_get_cpu_featureset(xc_interface * xch,uint32_t index,uint32_t * nr_features,uint32_t * featureset)56 int xc_get_cpu_featureset(xc_interface *xch, uint32_t index,
57                           uint32_t *nr_features, uint32_t *featureset)
58 {
59     DECLARE_SYSCTL;
60     DECLARE_HYPERCALL_BOUNCE(featureset,
61                              *nr_features * sizeof(*featureset),
62                              XC_HYPERCALL_BUFFER_BOUNCE_OUT);
63     int ret;
64 
65     if ( xc_hypercall_bounce_pre(xch, featureset) )
66         return -1;
67 
68     sysctl.cmd = XEN_SYSCTL_get_cpu_featureset;
69     sysctl.u.cpu_featureset.index = index;
70     sysctl.u.cpu_featureset.nr_features = *nr_features;
71     set_xen_guest_handle(sysctl.u.cpu_featureset.features, featureset);
72 
73     ret = do_sysctl(xch, &sysctl);
74 
75     xc_hypercall_bounce_post(xch, featureset);
76 
77     if ( !ret )
78         *nr_features = sysctl.u.cpu_featureset.nr_features;
79 
80     return ret;
81 }
82 
xc_get_cpu_featureset_size(void)83 uint32_t xc_get_cpu_featureset_size(void)
84 {
85     return FEATURESET_NR_ENTRIES;
86 }
87 
xc_get_static_cpu_featuremask(enum xc_static_cpu_featuremask mask)88 const uint32_t *xc_get_static_cpu_featuremask(
89     enum xc_static_cpu_featuremask mask)
90 {
91     static const uint32_t masks[][FEATURESET_NR_ENTRIES] = {
92 #define MASK(x) [XC_FEATUREMASK_ ## x] = INIT_ ## x ## _FEATURES
93 
94         MASK(KNOWN),
95         MASK(SPECIAL),
96         MASK(PV_MAX),
97         MASK(PV_DEF),
98         MASK(HVM_SHADOW_MAX),
99         MASK(HVM_SHADOW_DEF),
100         MASK(HVM_HAP_MAX),
101         MASK(HVM_HAP_DEF),
102 
103 #undef MASK
104     };
105 
106     if ( (unsigned int)mask >= ARRAY_SIZE(masks) )
107         return NULL;
108 
109     return masks[mask];
110 }
111 
xc_get_cpu_policy_size(xc_interface * xch,uint32_t * nr_leaves,uint32_t * nr_msrs)112 int xc_get_cpu_policy_size(xc_interface *xch, uint32_t *nr_leaves,
113                            uint32_t *nr_msrs)
114 {
115     struct xen_sysctl sysctl = {};
116     int ret;
117 
118     sysctl.cmd = XEN_SYSCTL_get_cpu_policy;
119 
120     ret = do_sysctl(xch, &sysctl);
121 
122     if ( !ret )
123     {
124         *nr_leaves = sysctl.u.cpu_policy.nr_leaves;
125         *nr_msrs = sysctl.u.cpu_policy.nr_msrs;
126     }
127 
128     return ret;
129 }
130 
xc_get_system_cpu_policy(xc_interface * xch,uint32_t index,uint32_t * nr_leaves,xen_cpuid_leaf_t * leaves,uint32_t * nr_msrs,xen_msr_entry_t * msrs)131 int xc_get_system_cpu_policy(xc_interface *xch, uint32_t index,
132                              uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
133                              uint32_t *nr_msrs, xen_msr_entry_t *msrs)
134 {
135     struct xen_sysctl sysctl = {};
136     DECLARE_HYPERCALL_BOUNCE(leaves,
137                              *nr_leaves * sizeof(*leaves),
138                              XC_HYPERCALL_BUFFER_BOUNCE_OUT);
139     DECLARE_HYPERCALL_BOUNCE(msrs,
140                              *nr_msrs * sizeof(*msrs),
141                              XC_HYPERCALL_BUFFER_BOUNCE_OUT);
142     int ret;
143 
144     if ( xc_hypercall_bounce_pre(xch, leaves) ||
145          xc_hypercall_bounce_pre(xch, msrs) )
146         return -1;
147 
148     sysctl.cmd = XEN_SYSCTL_get_cpu_policy;
149     sysctl.u.cpu_policy.index = index;
150     sysctl.u.cpu_policy.nr_leaves = *nr_leaves;
151     set_xen_guest_handle(sysctl.u.cpu_policy.cpuid_policy, leaves);
152     sysctl.u.cpu_policy.nr_msrs = *nr_msrs;
153     set_xen_guest_handle(sysctl.u.cpu_policy.msr_policy, msrs);
154 
155     ret = do_sysctl(xch, &sysctl);
156 
157     xc_hypercall_bounce_post(xch, leaves);
158     xc_hypercall_bounce_post(xch, msrs);
159 
160     if ( !ret )
161     {
162         *nr_leaves = sysctl.u.cpu_policy.nr_leaves;
163         *nr_msrs = sysctl.u.cpu_policy.nr_msrs;
164     }
165 
166     return ret;
167 }
168 
xc_get_domain_cpu_policy(xc_interface * xch,uint32_t domid,uint32_t * nr_leaves,xen_cpuid_leaf_t * leaves,uint32_t * nr_msrs,xen_msr_entry_t * msrs)169 int xc_get_domain_cpu_policy(xc_interface *xch, uint32_t domid,
170                              uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
171                              uint32_t *nr_msrs, xen_msr_entry_t *msrs)
172 {
173     DECLARE_DOMCTL;
174     DECLARE_HYPERCALL_BOUNCE(leaves,
175                              *nr_leaves * sizeof(*leaves),
176                              XC_HYPERCALL_BUFFER_BOUNCE_OUT);
177     DECLARE_HYPERCALL_BOUNCE(msrs,
178                              *nr_msrs * sizeof(*msrs),
179                              XC_HYPERCALL_BUFFER_BOUNCE_OUT);
180     int ret;
181 
182     if ( xc_hypercall_bounce_pre(xch, leaves) ||
183          xc_hypercall_bounce_pre(xch, msrs) )
184         return -1;
185 
186     domctl.cmd = XEN_DOMCTL_get_cpu_policy;
187     domctl.domain = domid;
188     domctl.u.cpu_policy.nr_leaves = *nr_leaves;
189     set_xen_guest_handle(domctl.u.cpu_policy.cpuid_policy, leaves);
190     domctl.u.cpu_policy.nr_msrs = *nr_msrs;
191     set_xen_guest_handle(domctl.u.cpu_policy.msr_policy, msrs);
192 
193     ret = do_domctl(xch, &domctl);
194 
195     xc_hypercall_bounce_post(xch, leaves);
196     xc_hypercall_bounce_post(xch, msrs);
197 
198     if ( !ret )
199     {
200         *nr_leaves = domctl.u.cpu_policy.nr_leaves;
201         *nr_msrs = domctl.u.cpu_policy.nr_msrs;
202     }
203 
204     return ret;
205 }
206 
xc_set_domain_cpu_policy(xc_interface * xch,uint32_t domid,uint32_t nr_leaves,xen_cpuid_leaf_t * leaves,uint32_t nr_msrs,xen_msr_entry_t * msrs,uint32_t * err_leaf_p,uint32_t * err_subleaf_p,uint32_t * err_msr_p)207 int xc_set_domain_cpu_policy(xc_interface *xch, uint32_t domid,
208                              uint32_t nr_leaves, xen_cpuid_leaf_t *leaves,
209                              uint32_t nr_msrs, xen_msr_entry_t *msrs,
210                              uint32_t *err_leaf_p, uint32_t *err_subleaf_p,
211                              uint32_t *err_msr_p)
212 {
213     DECLARE_DOMCTL;
214     DECLARE_HYPERCALL_BOUNCE(leaves,
215                              nr_leaves * sizeof(*leaves),
216                              XC_HYPERCALL_BUFFER_BOUNCE_IN);
217     DECLARE_HYPERCALL_BOUNCE(msrs,
218                              nr_msrs * sizeof(*msrs),
219                              XC_HYPERCALL_BUFFER_BOUNCE_IN);
220     int ret;
221 
222     if ( err_leaf_p )
223         *err_leaf_p = -1;
224     if ( err_subleaf_p )
225         *err_subleaf_p = -1;
226     if ( err_msr_p )
227         *err_msr_p = -1;
228 
229     if ( xc_hypercall_bounce_pre(xch, leaves) )
230         return -1;
231 
232     if ( xc_hypercall_bounce_pre(xch, msrs) )
233         return -1;
234 
235     domctl.cmd = XEN_DOMCTL_set_cpu_policy;
236     domctl.domain = domid;
237     domctl.u.cpu_policy.nr_leaves = nr_leaves;
238     set_xen_guest_handle(domctl.u.cpu_policy.cpuid_policy, leaves);
239     domctl.u.cpu_policy.nr_msrs = nr_msrs;
240     set_xen_guest_handle(domctl.u.cpu_policy.msr_policy, msrs);
241     domctl.u.cpu_policy.err_leaf = -1;
242     domctl.u.cpu_policy.err_subleaf = -1;
243     domctl.u.cpu_policy.err_msr = -1;
244 
245     ret = do_domctl(xch, &domctl);
246 
247     xc_hypercall_bounce_post(xch, leaves);
248     xc_hypercall_bounce_post(xch, msrs);
249 
250     if ( err_leaf_p )
251         *err_leaf_p = domctl.u.cpu_policy.err_leaf;
252     if ( err_subleaf_p )
253         *err_subleaf_p = domctl.u.cpu_policy.err_subleaf;
254     if ( err_msr_p )
255         *err_msr_p = domctl.u.cpu_policy.err_msr;
256 
257     return ret;
258 }
259 
compare_leaves(const void * l,const void * r)260 static int compare_leaves(const void *l, const void *r)
261 {
262     const xen_cpuid_leaf_t *lhs = l;
263     const xen_cpuid_leaf_t *rhs = r;
264 
265     if ( lhs->leaf != rhs->leaf )
266         return lhs->leaf < rhs->leaf ? -1 : 1;
267 
268     if ( lhs->subleaf != rhs->subleaf )
269         return lhs->subleaf < rhs->subleaf ? -1 : 1;
270 
271     return 0;
272 }
273 
find_leaf(xen_cpuid_leaf_t * leaves,unsigned int nr_leaves,const struct xc_xend_cpuid * xend)274 static xen_cpuid_leaf_t *find_leaf(
275     xen_cpuid_leaf_t *leaves, unsigned int nr_leaves,
276     const struct xc_xend_cpuid *xend)
277 {
278     const xen_cpuid_leaf_t key = { xend->leaf, xend->subleaf };
279 
280     return bsearch(&key, leaves, nr_leaves, sizeof(*leaves), compare_leaves);
281 }
282 
xc_cpuid_xend_policy(xc_interface * xch,uint32_t domid,const struct xc_xend_cpuid * xend)283 static int xc_cpuid_xend_policy(
284     xc_interface *xch, uint32_t domid, const struct xc_xend_cpuid *xend)
285 {
286     int rc;
287     xc_dominfo_t di;
288     unsigned int nr_leaves, nr_msrs;
289     uint32_t err_leaf = -1, err_subleaf = -1, err_msr = -1;
290     /*
291      * Three full policies.  The host, domain max, and domain current for the
292      * domain type.
293      */
294     xen_cpuid_leaf_t *host = NULL, *max = NULL, *cur = NULL;
295     unsigned int nr_host, nr_max, nr_cur;
296 
297     if ( xc_domain_getinfo(xch, domid, 1, &di) != 1 ||
298          di.domid != domid )
299     {
300         ERROR("Failed to obtain d%d info", domid);
301         rc = -ESRCH;
302         goto fail;
303     }
304 
305     rc = xc_get_cpu_policy_size(xch, &nr_leaves, &nr_msrs);
306     if ( rc )
307     {
308         PERROR("Failed to obtain policy info size");
309         rc = -errno;
310         goto fail;
311     }
312 
313     rc = -ENOMEM;
314     if ( (host = calloc(nr_leaves, sizeof(*host))) == NULL ||
315          (max  = calloc(nr_leaves, sizeof(*max)))  == NULL ||
316          (cur  = calloc(nr_leaves, sizeof(*cur)))  == NULL )
317     {
318         ERROR("Unable to allocate memory for %u CPUID leaves", nr_leaves);
319         goto fail;
320     }
321 
322     /* Get the domain's current policy. */
323     nr_msrs = 0;
324     nr_cur = nr_leaves;
325     rc = xc_get_domain_cpu_policy(xch, domid, &nr_cur, cur, &nr_msrs, NULL);
326     if ( rc )
327     {
328         PERROR("Failed to obtain d%d current policy", domid);
329         rc = -errno;
330         goto fail;
331     }
332 
333     /* Get the domain's max policy. */
334     nr_msrs = 0;
335     nr_max = nr_leaves;
336     rc = xc_get_system_cpu_policy(xch, di.hvm ? XEN_SYSCTL_cpu_policy_hvm_max
337                                               : XEN_SYSCTL_cpu_policy_pv_max,
338                                   &nr_max, max, &nr_msrs, NULL);
339     if ( rc )
340     {
341         PERROR("Failed to obtain %s max policy", di.hvm ? "hvm" : "pv");
342         rc = -errno;
343         goto fail;
344     }
345 
346     /* Get the host policy. */
347     nr_msrs = 0;
348     nr_host = nr_leaves;
349     rc = xc_get_system_cpu_policy(xch, XEN_SYSCTL_cpu_policy_host,
350                                   &nr_host, host, &nr_msrs, NULL);
351     if ( rc )
352     {
353         PERROR("Failed to obtain host policy");
354         rc = -errno;
355         goto fail;
356     }
357 
358     rc = -EINVAL;
359     for ( ; xend->leaf != XEN_CPUID_INPUT_UNUSED; ++xend )
360     {
361         xen_cpuid_leaf_t *cur_leaf = find_leaf(cur, nr_cur, xend);
362         const xen_cpuid_leaf_t *max_leaf = find_leaf(max, nr_max, xend);
363         const xen_cpuid_leaf_t *host_leaf = find_leaf(host, nr_host, xend);
364 
365         if ( cur_leaf == NULL || max_leaf == NULL || host_leaf == NULL )
366         {
367             ERROR("Missing leaf %#x, subleaf %#x", xend->leaf, xend->subleaf);
368             goto fail;
369         }
370 
371         for ( unsigned int i = 0; i < ARRAY_SIZE(xend->policy); i++ )
372         {
373             uint32_t *cur_reg = &cur_leaf->a + i;
374             const uint32_t *max_reg = &max_leaf->a + i;
375             const uint32_t *host_reg = &host_leaf->a + i;
376 
377             if ( xend->policy[i] == NULL )
378                 continue;
379 
380             for ( unsigned int j = 0; j < 32; j++ )
381             {
382                 bool val;
383 
384                 if ( xend->policy[i][j] == '1' )
385                     val = true;
386                 else if ( xend->policy[i][j] == '0' )
387                     val = false;
388                 else if ( xend->policy[i][j] == 'x' )
389                     val = test_bit(31 - j, max_reg);
390                 else if ( xend->policy[i][j] == 'k' ||
391                           xend->policy[i][j] == 's' )
392                     val = test_bit(31 - j, host_reg);
393                 else
394                 {
395                     ERROR("Bad character '%c' in policy[%d] string '%s'",
396                           xend->policy[i][j], i, xend->policy[i]);
397                     goto fail;
398                 }
399 
400                 clear_bit(31 - j, cur_reg);
401                 if ( val )
402                     set_bit(31 - j, cur_reg);
403             }
404         }
405     }
406 
407     /* Feed the transformed currrent policy back up to Xen. */
408     rc = xc_set_domain_cpu_policy(xch, domid, nr_cur, cur, 0, NULL,
409                                   &err_leaf, &err_subleaf, &err_msr);
410     if ( rc )
411     {
412         PERROR("Failed to set d%d's policy (err leaf %#x, subleaf %#x, msr %#x)",
413                domid, err_leaf, err_subleaf, err_msr);
414         rc = -errno;
415         goto fail;
416     }
417 
418     /* Success! */
419 
420  fail:
421     free(cur);
422     free(max);
423     free(host);
424 
425     return rc;
426 }
427 
xc_cpuid_apply_policy(xc_interface * xch,uint32_t domid,bool restore,const uint32_t * featureset,unsigned int nr_features,bool pae,const struct xc_xend_cpuid * xend)428 int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore,
429                           const uint32_t *featureset, unsigned int nr_features,
430                           bool pae,
431                           const struct xc_xend_cpuid *xend)
432 {
433     int rc;
434     xc_dominfo_t di;
435     unsigned int i, nr_leaves, nr_msrs;
436     xen_cpuid_leaf_t *leaves = NULL;
437     struct cpuid_policy *p = NULL;
438     uint32_t err_leaf = -1, err_subleaf = -1, err_msr = -1;
439     uint32_t host_featureset[FEATURESET_NR_ENTRIES] = {};
440     uint32_t len = ARRAY_SIZE(host_featureset);
441 
442     if ( xc_domain_getinfo(xch, domid, 1, &di) != 1 ||
443          di.domid != domid )
444     {
445         ERROR("Failed to obtain d%d info", domid);
446         rc = -ESRCH;
447         goto out;
448     }
449 
450     rc = xc_get_cpu_policy_size(xch, &nr_leaves, &nr_msrs);
451     if ( rc )
452     {
453         PERROR("Failed to obtain policy info size");
454         rc = -errno;
455         goto out;
456     }
457 
458     rc = -ENOMEM;
459     if ( (leaves = calloc(nr_leaves, sizeof(*leaves))) == NULL ||
460          (p = calloc(1, sizeof(*p))) == NULL )
461         goto out;
462 
463     /* Get the host policy. */
464     rc = xc_get_cpu_featureset(xch, XEN_SYSCTL_cpu_featureset_host,
465                                &len, host_featureset);
466     if ( rc )
467     {
468         /* Tolerate "buffer too small", as we've got the bits we need. */
469         if ( errno == ENOBUFS )
470             rc = 0;
471         else
472         {
473             PERROR("Failed to obtain host featureset");
474             rc = -errno;
475             goto out;
476         }
477     }
478 
479     /* Get the domain's default policy. */
480     nr_msrs = 0;
481     rc = xc_get_system_cpu_policy(xch, di.hvm ? XEN_SYSCTL_cpu_policy_hvm_default
482                                               : XEN_SYSCTL_cpu_policy_pv_default,
483                                   &nr_leaves, leaves, &nr_msrs, NULL);
484     if ( rc )
485     {
486         PERROR("Failed to obtain %s default policy", di.hvm ? "hvm" : "pv");
487         rc = -errno;
488         goto out;
489     }
490 
491     rc = x86_cpuid_copy_from_buffer(p, leaves, nr_leaves,
492                                     &err_leaf, &err_subleaf);
493     if ( rc )
494     {
495         ERROR("Failed to deserialise CPUID (err leaf %#x, subleaf %#x) (%d = %s)",
496               err_leaf, err_subleaf, -rc, strerror(-rc));
497         goto out;
498     }
499 
500     /*
501      * Account for feature which have been disabled by default since Xen 4.13,
502      * so migrated-in VM's don't risk seeing features disappearing.
503      */
504     if ( restore )
505     {
506         p->basic.rdrand = test_bit(X86_FEATURE_RDRAND, host_featureset);
507 
508         if ( di.hvm )
509         {
510             p->feat.mpx = test_bit(X86_FEATURE_MPX, host_featureset);
511         }
512     }
513 
514     if ( featureset )
515     {
516         uint32_t disabled_features[FEATURESET_NR_ENTRIES],
517             feat[FEATURESET_NR_ENTRIES] = {};
518         static const uint32_t deep_features[] = INIT_DEEP_FEATURES;
519         unsigned int i, b;
520 
521         /*
522          * The user supplied featureset may be shorter or longer than
523          * FEATURESET_NR_ENTRIES.  Shorter is fine, and we will zero-extend.
524          * Longer is fine, so long as it only padded with zeros.
525          */
526         unsigned int user_len = min(FEATURESET_NR_ENTRIES + 0u, nr_features);
527 
528         /* Check for truncated set bits. */
529         rc = -EOPNOTSUPP;
530         for ( i = user_len; i < nr_features; ++i )
531             if ( featureset[i] != 0 )
532                 goto out;
533 
534         memcpy(feat, featureset, sizeof(*featureset) * user_len);
535 
536         /* Disable deep dependencies of disabled features. */
537         for ( i = 0; i < ARRAY_SIZE(disabled_features); ++i )
538             disabled_features[i] = ~feat[i] & deep_features[i];
539 
540         for ( b = 0; b < sizeof(disabled_features) * CHAR_BIT; ++b )
541         {
542             const uint32_t *dfs;
543 
544             if ( !test_bit(b, disabled_features) ||
545                  !(dfs = x86_cpuid_lookup_deep_deps(b)) )
546                 continue;
547 
548             for ( i = 0; i < ARRAY_SIZE(disabled_features); ++i )
549             {
550                 feat[i] &= ~dfs[i];
551                 disabled_features[i] &= ~dfs[i];
552             }
553         }
554 
555         cpuid_featureset_to_policy(feat, p);
556     }
557     else
558     {
559         if ( di.hvm )
560             p->basic.pae = pae;
561     }
562 
563     if ( !di.hvm )
564     {
565         /*
566          * On hardware without CPUID Faulting, PV guests see real topology.
567          * As a consequence, they also need to see the host htt/cmp fields.
568          */
569         p->basic.htt       = test_bit(X86_FEATURE_HTT, host_featureset);
570         p->extd.cmp_legacy = test_bit(X86_FEATURE_CMP_LEGACY, host_featureset);
571     }
572     else
573     {
574         /*
575          * Topology for HVM guests is entirely controlled by Xen.  For now, we
576          * hardcode APIC_ID = vcpu_id * 2 to give the illusion of no SMT.
577          */
578         p->basic.htt = true;
579         p->extd.cmp_legacy = false;
580 
581         /*
582          * Leaf 1 EBX[23:16] is Maximum Logical Processors Per Package.
583          * Update to reflect vLAPIC_ID = vCPU_ID * 2, but make sure to avoid
584          * overflow.
585          */
586         if ( !(p->basic.lppp & 0x80) )
587             p->basic.lppp *= 2;
588 
589         switch ( p->x86_vendor )
590         {
591         case X86_VENDOR_INTEL:
592             for ( i = 0; (p->cache.subleaf[i].type &&
593                           i < ARRAY_SIZE(p->cache.raw)); ++i )
594             {
595                 p->cache.subleaf[i].cores_per_package =
596                     (p->cache.subleaf[i].cores_per_package << 1) | 1;
597                 p->cache.subleaf[i].threads_per_cache = 0;
598             }
599             break;
600 
601         case X86_VENDOR_AMD:
602         case X86_VENDOR_HYGON:
603             /*
604              * Leaf 0x80000008 ECX[15:12] is ApicIdCoreSize.
605              * Leaf 0x80000008 ECX[7:0] is NumberOfCores (minus one).
606              * Update to reflect vLAPIC_ID = vCPU_ID * 2.  But avoid
607              * - overflow,
608              * - going out of sync with leaf 1 EBX[23:16],
609              * - incrementing ApicIdCoreSize when it's zero (which changes the
610              *   meaning of bits 7:0).
611              *
612              * UPDATE: I addition to avoiding overflow, some
613              * proprietary operating systems have trouble with
614              * apic_id_size values greater than 7.  Limit the value to
615              * 7 for now.
616              */
617             if ( p->extd.nc < 0x7f )
618             {
619                 if ( p->extd.apic_id_size != 0 && p->extd.apic_id_size < 0x7 )
620                     p->extd.apic_id_size++;
621 
622                 p->extd.nc = (p->extd.nc << 1) | 1;
623             }
624             break;
625         }
626 
627         /*
628          * These settings are necessary to cause earlier HVM_PARAM_NESTEDHVM /
629          * XEN_DOMCTL_disable_migrate settings to be reflected correctly in
630          * CPUID.  Xen will discard these bits if configuration hasn't been
631          * set for the domain.
632          */
633         p->extd.itsc = true;
634         p->basic.vmx = true;
635         p->extd.svm = true;
636     }
637 
638     rc = x86_cpuid_copy_to_buffer(p, leaves, &nr_leaves);
639     if ( rc )
640     {
641         ERROR("Failed to serialise CPUID (%d = %s)", -rc, strerror(-rc));
642         goto out;
643     }
644 
645     rc = xc_set_domain_cpu_policy(xch, domid, nr_leaves, leaves, 0, NULL,
646                                   &err_leaf, &err_subleaf, &err_msr);
647     if ( rc )
648     {
649         PERROR("Failed to set d%d's policy (err leaf %#x, subleaf %#x, msr %#x)",
650                domid, err_leaf, err_subleaf, err_msr);
651         rc = -errno;
652         goto out;
653     }
654 
655     if ( xend && (rc = xc_cpuid_xend_policy(xch, domid, xend)) )
656         goto out;
657 
658     rc = 0;
659 
660 out:
661     free(p);
662     free(leaves);
663 
664     return rc;
665 }
666