1 #include "private.h"
2 
3 #include <xen/lib/x86/cpuid.h>
4 
zero_leaves(struct cpuid_leaf * l,unsigned int first,unsigned int last)5 static void zero_leaves(struct cpuid_leaf *l,
6                         unsigned int first, unsigned int last)
7 {
8     if ( first <= last )
9         memset(&l[first], 0, sizeof(*l) * (last - first + 1));
10 }
11 
x86_cpuid_lookup_vendor(uint32_t ebx,uint32_t ecx,uint32_t edx)12 unsigned int x86_cpuid_lookup_vendor(uint32_t ebx, uint32_t ecx, uint32_t edx)
13 {
14     switch ( ebx )
15     {
16     case X86_VENDOR_INTEL_EBX:
17         if ( ecx == X86_VENDOR_INTEL_ECX &&
18              edx == X86_VENDOR_INTEL_EDX )
19             return X86_VENDOR_INTEL;
20         break;
21 
22     case X86_VENDOR_AMD_EBX:
23         if ( ecx == X86_VENDOR_AMD_ECX &&
24              edx == X86_VENDOR_AMD_EDX )
25             return X86_VENDOR_AMD;
26         break;
27 
28     case X86_VENDOR_CENTAUR_EBX:
29         if ( ecx == X86_VENDOR_CENTAUR_ECX &&
30              edx == X86_VENDOR_CENTAUR_EDX )
31             return X86_VENDOR_CENTAUR;
32         break;
33 
34     case X86_VENDOR_SHANGHAI_EBX:
35         if ( ecx == X86_VENDOR_SHANGHAI_ECX &&
36              edx == X86_VENDOR_SHANGHAI_EDX )
37             return X86_VENDOR_SHANGHAI;
38         break;
39 
40     case X86_VENDOR_HYGON_EBX:
41         if ( ecx == X86_VENDOR_HYGON_ECX &&
42              edx == X86_VENDOR_HYGON_EDX )
43             return X86_VENDOR_HYGON;
44         break;
45     }
46 
47     return X86_VENDOR_UNKNOWN;
48 }
49 
x86_cpuid_vendor_to_str(unsigned int vendor)50 const char *x86_cpuid_vendor_to_str(unsigned int vendor)
51 {
52     switch ( vendor )
53     {
54     case X86_VENDOR_INTEL:    return "Intel";
55     case X86_VENDOR_AMD:      return "AMD";
56     case X86_VENDOR_CENTAUR:  return "Centaur";
57     case X86_VENDOR_SHANGHAI: return "Shanghai";
58     case X86_VENDOR_HYGON:    return "Hygon";
59     default:                  return "Unknown";
60     }
61 }
62 
x86_cpuid_policy_recalc_synth(struct cpuid_policy * p)63 void x86_cpuid_policy_recalc_synth(struct cpuid_policy *p)
64 {
65     p->x86_vendor = x86_cpuid_lookup_vendor(
66         p->basic.vendor_ebx, p->basic.vendor_ecx, p->basic.vendor_edx);
67 }
68 
x86_cpuid_policy_fill_native(struct cpuid_policy * p)69 void x86_cpuid_policy_fill_native(struct cpuid_policy *p)
70 {
71     unsigned int i;
72 
73     cpuid_leaf(0, &p->basic.raw[0]);
74     for ( i = 1; i <= MIN(p->basic.max_leaf,
75                           ARRAY_SIZE(p->basic.raw) - 1); ++i )
76     {
77         switch ( i )
78         {
79         case 0x4: case 0x7: case 0xb: case 0xd:
80             /* Multi-invocation leaves.  Deferred. */
81             continue;
82         }
83 
84         cpuid_leaf(i, &p->basic.raw[i]);
85     }
86 
87     if ( p->basic.max_leaf >= 4 )
88     {
89         for ( i = 0; i < ARRAY_SIZE(p->cache.raw); ++i )
90         {
91             union {
92                 struct cpuid_leaf l;
93                 struct cpuid_cache_leaf c;
94             } u;
95 
96             cpuid_count_leaf(4, i, &u.l);
97 
98             if ( u.c.type == 0 )
99                 break;
100 
101             p->cache.subleaf[i] = u.c;
102         }
103 
104         /*
105          * The choice of CPUID_GUEST_NR_CACHE is arbitrary.  It is expected
106          * that it will eventually need increasing for future hardware.
107          */
108 #ifdef __XEN__
109         if ( i == ARRAY_SIZE(p->cache.raw) )
110             printk(XENLOG_WARNING
111                    "CPUID: Insufficient Leaf 4 space for this hardware\n");
112 #endif
113     }
114 
115     if ( p->basic.max_leaf >= 7 )
116     {
117         cpuid_count_leaf(7, 0, &p->feat.raw[0]);
118 
119         for ( i = 1; i <= MIN(p->feat.max_subleaf,
120                               ARRAY_SIZE(p->feat.raw) - 1); ++i )
121             cpuid_count_leaf(7, i, &p->feat.raw[i]);
122     }
123 
124     if ( p->basic.max_leaf >= 0xb )
125     {
126         union {
127             struct cpuid_leaf l;
128             struct cpuid_topo_leaf t;
129         } u;
130 
131         for ( i = 0; i < ARRAY_SIZE(p->topo.raw); ++i )
132         {
133             cpuid_count_leaf(0xb, i, &u.l);
134 
135             if ( u.t.type == 0 )
136                 break;
137 
138             p->topo.subleaf[i] = u.t;
139         }
140 
141         /*
142          * The choice of CPUID_GUEST_NR_TOPO is per the manual.  It may need
143          * to grow for future hardware.
144          */
145 #ifdef __XEN__
146         if ( i == ARRAY_SIZE(p->topo.raw) &&
147              (cpuid_count_leaf(0xb, i, &u.l), u.t.type != 0) )
148             printk(XENLOG_WARNING
149                    "CPUID: Insufficient Leaf 0xb space for this hardware\n");
150 #endif
151     }
152 
153     if ( p->basic.max_leaf >= 0xd )
154     {
155         uint64_t xstates;
156 
157         cpuid_count_leaf(0xd, 0, &p->xstate.raw[0]);
158         cpuid_count_leaf(0xd, 1, &p->xstate.raw[1]);
159 
160         xstates = cpuid_policy_xstates(p);
161 
162         /* This logic will probably need adjusting when XCR0[63] gets used. */
163         BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) > 63);
164 
165         for ( i = 2; i < min_t(unsigned int, 63,
166                                ARRAY_SIZE(p->xstate.raw)); ++i )
167         {
168             if ( xstates & (1ul << i) )
169                 cpuid_count_leaf(0xd, i, &p->xstate.raw[i]);
170         }
171     }
172 
173     /* Extended leaves. */
174     cpuid_leaf(0x80000000, &p->extd.raw[0]);
175     for ( i = 1; i <= MIN(p->extd.max_leaf & 0xffffU,
176                           ARRAY_SIZE(p->extd.raw) - 1); ++i )
177         cpuid_leaf(0x80000000 + i, &p->extd.raw[i]);
178 
179     /* Don't report leaves from possible lower level hypervisor, for now. */
180     p->hv_limit = 0;
181     p->hv2_limit = 0;
182 
183     x86_cpuid_policy_recalc_synth(p);
184 }
185 
x86_cpuid_policy_clear_out_of_range_leaves(struct cpuid_policy * p)186 void x86_cpuid_policy_clear_out_of_range_leaves(struct cpuid_policy *p)
187 {
188     unsigned int i;
189 
190     zero_leaves(p->basic.raw, p->basic.max_leaf + 1,
191                 ARRAY_SIZE(p->basic.raw) - 1);
192 
193     if ( p->basic.max_leaf < 4 )
194         memset(p->cache.raw, 0, sizeof(p->cache.raw));
195     else
196     {
197         for ( i = 0; (i < ARRAY_SIZE(p->cache.raw) &&
198                       p->cache.subleaf[i].type); ++i )
199             ;
200 
201         zero_leaves(p->cache.raw, i, ARRAY_SIZE(p->cache.raw) - 1);
202     }
203 
204     if ( p->basic.max_leaf < 7 )
205         memset(p->feat.raw, 0, sizeof(p->feat.raw));
206     else
207         zero_leaves(p->feat.raw, p->feat.max_subleaf + 1,
208                     ARRAY_SIZE(p->feat.raw) - 1);
209 
210     if ( p->basic.max_leaf < 0xb )
211         memset(p->topo.raw, 0, sizeof(p->topo.raw));
212     else
213     {
214         for ( i = 0; (i < ARRAY_SIZE(p->topo.raw) &&
215                       p->topo.subleaf[i].type); ++i )
216             ;
217 
218         zero_leaves(p->topo.raw, i, ARRAY_SIZE(p->topo.raw) - 1);
219     }
220 
221     if ( p->basic.max_leaf < 0xd || !cpuid_policy_xstates(p) )
222         memset(p->xstate.raw, 0, sizeof(p->xstate.raw));
223     else
224     {
225         /* This logic will probably need adjusting when XCR0[63] gets used. */
226         BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) > 63);
227 
228         /* First two leaves always valid.  Rest depend on xstates. */
229         i = max(2, 64 - __builtin_clzll(cpuid_policy_xstates(p)));
230 
231         zero_leaves(p->xstate.raw, i,
232                     ARRAY_SIZE(p->xstate.raw) - 1);
233     }
234 
235     zero_leaves(p->extd.raw, (p->extd.max_leaf & 0xffff) + 1,
236                 ARRAY_SIZE(p->extd.raw) - 1);
237 }
238 
x86_cpuid_lookup_deep_deps(uint32_t feature)239 const uint32_t *x86_cpuid_lookup_deep_deps(uint32_t feature)
240 {
241     static const uint32_t deep_features[] = INIT_DEEP_FEATURES;
242     static const struct {
243         uint32_t feature;
244         uint32_t fs[FEATURESET_NR_ENTRIES];
245     } deep_deps[] = INIT_DEEP_DEPS;
246     unsigned int start = 0, end = ARRAY_SIZE(deep_deps);
247 
248     BUILD_BUG_ON(ARRAY_SIZE(deep_deps) != NR_DEEP_DEPS);
249 
250     /* Fast early exit. */
251     if ( !test_bit(feature, deep_features) )
252         return NULL;
253 
254     /* deep_deps[] is sorted.  Perform a binary search. */
255     while ( start < end )
256     {
257         unsigned int mid = start + ((end - start) / 2);
258 
259         if ( deep_deps[mid].feature > feature )
260             end = mid;
261         else if ( deep_deps[mid].feature < feature )
262             start = mid + 1;
263         else
264             return deep_deps[mid].fs;
265     }
266 
267     return NULL;
268 }
269 
270 /*
271  * Copy a single cpuid_leaf into a provided xen_cpuid_leaf_t buffer,
272  * performing boundary checking against the buffer size.
273  */
copy_leaf_to_buffer(uint32_t leaf,uint32_t subleaf,const struct cpuid_leaf * data,cpuid_leaf_buffer_t leaves,uint32_t * curr_entry,const uint32_t nr_entries)274 static int copy_leaf_to_buffer(uint32_t leaf, uint32_t subleaf,
275                                const struct cpuid_leaf *data,
276                                cpuid_leaf_buffer_t leaves,
277                                uint32_t *curr_entry, const uint32_t nr_entries)
278 {
279     const xen_cpuid_leaf_t val = {
280         leaf, subleaf, data->a, data->b, data->c, data->d,
281     };
282 
283     if ( *curr_entry == nr_entries )
284         return -ENOBUFS;
285 
286     if ( copy_to_buffer_offset(leaves, *curr_entry, &val, 1) )
287         return -EFAULT;
288 
289     ++*curr_entry;
290 
291     return 0;
292 }
293 
x86_cpuid_copy_to_buffer(const struct cpuid_policy * p,cpuid_leaf_buffer_t leaves,uint32_t * nr_entries_p)294 int x86_cpuid_copy_to_buffer(const struct cpuid_policy *p,
295                              cpuid_leaf_buffer_t leaves, uint32_t *nr_entries_p)
296 {
297     const uint32_t nr_entries = *nr_entries_p;
298     uint32_t curr_entry = 0, leaf, subleaf;
299 
300 #define COPY_LEAF(l, s, data)                                       \
301     ({                                                              \
302         int ret;                                                    \
303                                                                     \
304         if ( (ret = copy_leaf_to_buffer(                            \
305                   l, s, data, leaves, &curr_entry, nr_entries)) )   \
306             return ret;                                             \
307     })
308 
309     /* Basic leaves. */
310     for ( leaf = 0; leaf <= MIN(p->basic.max_leaf,
311                                 ARRAY_SIZE(p->basic.raw) - 1); ++leaf )
312     {
313         switch ( leaf )
314         {
315         case 0x4:
316             for ( subleaf = 0; subleaf < ARRAY_SIZE(p->cache.raw); ++subleaf )
317             {
318                 COPY_LEAF(leaf, subleaf, &p->cache.raw[subleaf]);
319 
320                 if ( p->cache.subleaf[subleaf].type == 0 )
321                     break;
322             }
323             break;
324 
325         case 0x7:
326             for ( subleaf = 0;
327                   subleaf <= MIN(p->feat.max_subleaf,
328                                  ARRAY_SIZE(p->feat.raw) - 1); ++subleaf )
329                 COPY_LEAF(leaf, subleaf, &p->feat.raw[subleaf]);
330             break;
331 
332         case 0xb:
333             for ( subleaf = 0; subleaf < ARRAY_SIZE(p->topo.raw); ++subleaf )
334             {
335                 COPY_LEAF(leaf, subleaf, &p->topo.raw[subleaf]);
336 
337                 if ( p->topo.subleaf[subleaf].type == 0 )
338                     break;
339             }
340             break;
341 
342         case 0xd:
343         {
344             uint64_t xstates = cpuid_policy_xstates(p);
345 
346             COPY_LEAF(leaf, 0, &p->xstate.raw[0]);
347             COPY_LEAF(leaf, 1, &p->xstate.raw[1]);
348 
349             for ( xstates >>= 2, subleaf = 2;
350                   xstates && subleaf < ARRAY_SIZE(p->xstate.raw);
351                   xstates >>= 1, ++subleaf )
352                 COPY_LEAF(leaf, subleaf, &p->xstate.raw[subleaf]);
353             break;
354         }
355 
356         default:
357             COPY_LEAF(leaf, XEN_CPUID_NO_SUBLEAF, &p->basic.raw[leaf]);
358             break;
359         }
360     }
361 
362     /* TODO: Port Xen and Viridian leaves to the new CPUID infrastructure. */
363     COPY_LEAF(0x40000000, XEN_CPUID_NO_SUBLEAF,
364               &(struct cpuid_leaf){ p->hv_limit });
365     COPY_LEAF(0x40000100, XEN_CPUID_NO_SUBLEAF,
366               &(struct cpuid_leaf){ p->hv2_limit });
367 
368     /* Extended leaves. */
369     for ( leaf = 0; leaf <= MIN(p->extd.max_leaf & 0xfffful,
370                                 ARRAY_SIZE(p->extd.raw) - 1); ++leaf )
371         COPY_LEAF(0x80000000 | leaf, XEN_CPUID_NO_SUBLEAF, &p->extd.raw[leaf]);
372 
373 #undef COPY_LEAF
374 
375     *nr_entries_p = curr_entry;
376 
377     return 0;
378 }
379 
x86_cpuid_copy_from_buffer(struct cpuid_policy * p,const cpuid_leaf_buffer_t leaves,uint32_t nr_entries,uint32_t * err_leaf,uint32_t * err_subleaf)380 int x86_cpuid_copy_from_buffer(struct cpuid_policy *p,
381                                const cpuid_leaf_buffer_t leaves,
382                                uint32_t nr_entries, uint32_t *err_leaf,
383                                uint32_t *err_subleaf)
384 {
385     unsigned int i;
386     xen_cpuid_leaf_t data;
387 
388     if ( err_leaf )
389         *err_leaf = -1;
390     if ( err_subleaf )
391         *err_subleaf = -1;
392 
393     /*
394      * A well formed caller is expected to pass an array with leaves in order,
395      * and without any repetitions.  However, due to per-vendor differences,
396      * and in the case of upgrade or levelled scenarios, we typically expect
397      * fewer than MAX leaves to be passed.
398      *
399      * Detecting repeated entries is prohibitively complicated, so we don't
400      * bother.  That said, one way or another if more than MAX leaves are
401      * passed, something is wrong.
402      */
403     if ( nr_entries > CPUID_MAX_SERIALISED_LEAVES )
404         return -E2BIG;
405 
406     for ( i = 0; i < nr_entries; ++i )
407     {
408         struct cpuid_leaf l;
409 
410         if ( copy_from_buffer_offset(&data, leaves, i, 1) )
411             return -EFAULT;
412 
413         l = (struct cpuid_leaf){ data.a, data.b, data.c, data.d };
414 
415         switch ( data.leaf )
416         {
417         case 0 ... ARRAY_SIZE(p->basic.raw) - 1:
418             switch ( data.leaf )
419             {
420             case 0x4:
421                 if ( data.subleaf >= ARRAY_SIZE(p->cache.raw) )
422                     goto out_of_range;
423 
424                 array_access_nospec(p->cache.raw, data.subleaf) = l;
425                 break;
426 
427             case 0x7:
428                 if ( data.subleaf >= ARRAY_SIZE(p->feat.raw) )
429                     goto out_of_range;
430 
431                 array_access_nospec(p->feat.raw, data.subleaf) = l;
432                 break;
433 
434             case 0xb:
435                 if ( data.subleaf >= ARRAY_SIZE(p->topo.raw) )
436                     goto out_of_range;
437 
438                 array_access_nospec(p->topo.raw, data.subleaf) = l;
439                 break;
440 
441             case 0xd:
442                 if ( data.subleaf >= ARRAY_SIZE(p->xstate.raw) )
443                     goto out_of_range;
444 
445                 array_access_nospec(p->xstate.raw, data.subleaf) = l;
446                 break;
447 
448             default:
449                 if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
450                     goto out_of_range;
451 
452                 array_access_nospec(p->basic.raw, data.leaf) = l;
453                 break;
454             }
455             break;
456 
457         case 0x40000000:
458             if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
459                 goto out_of_range;
460 
461             p->hv_limit = l.a;
462             break;
463 
464         case 0x40000100:
465             if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
466                 goto out_of_range;
467 
468             p->hv2_limit = l.a;
469             break;
470 
471         case 0x80000000 ... 0x80000000 + ARRAY_SIZE(p->extd.raw) - 1:
472             if ( data.subleaf != XEN_CPUID_NO_SUBLEAF )
473                 goto out_of_range;
474 
475             array_access_nospec(p->extd.raw, data.leaf & 0xffff) = l;
476             break;
477 
478         default:
479             goto out_of_range;
480         }
481     }
482 
483     x86_cpuid_policy_recalc_synth(p);
484 
485     return 0;
486 
487  out_of_range:
488     if ( err_leaf )
489         *err_leaf = data.leaf;
490     if ( err_subleaf )
491         *err_subleaf = data.subleaf;
492 
493     return -ERANGE;
494 }
495 
496 /*
497  * Local variables:
498  * mode: C
499  * c-file-style: "BSD"
500  * c-basic-offset: 4
501  * tab-width: 4
502  * indent-tabs-mode: nil
503  * End:
504  */
505