1 
2 /* Portions are: Copyright (c) 1994 Linus Torvalds */
3 
4 #ifndef __ASM_X86_PROCESSOR_H
5 #define __ASM_X86_PROCESSOR_H
6 
7 #ifndef __ASSEMBLY__
8 #include <xen/cache.h>
9 #include <xen/types.h>
10 #include <xen/smp.h>
11 #include <xen/percpu.h>
12 #include <asm/types.h>
13 #include <asm/cpufeature.h>
14 #include <asm/desc.h>
15 #endif
16 
17 #include <asm/x86-defns.h>
18 #include <asm/x86-vendors.h>
19 
20 /*
21  * Trap/fault mnemonics.
22  */
23 #define TRAP_divide_error      0
24 #define TRAP_debug             1
25 #define TRAP_nmi               2
26 #define TRAP_int3              3
27 #define TRAP_overflow          4
28 #define TRAP_bounds            5
29 #define TRAP_invalid_op        6
30 #define TRAP_no_device         7
31 #define TRAP_double_fault      8
32 #define TRAP_copro_seg         9
33 #define TRAP_invalid_tss      10
34 #define TRAP_no_segment       11
35 #define TRAP_stack_error      12
36 #define TRAP_gp_fault         13
37 #define TRAP_page_fault       14
38 #define TRAP_spurious_int     15
39 #define TRAP_copro_error      16
40 #define TRAP_alignment_check  17
41 #define TRAP_machine_check    18
42 #define TRAP_simd_error       19
43 #define TRAP_virtualisation   20
44 #define TRAP_nr               32
45 
46 #define TRAP_HAVE_EC X86_EXC_HAVE_EC
47 
48 /* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
49 /* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */
50 #define TRAP_syscall         256
51 
52 /* Boolean return code: the reason for a fault has been fixed. */
53 #define EXCRET_fault_fixed 1
54 
55 /* 'trap_bounce' flags values */
56 #define TBF_EXCEPTION          1
57 #define TBF_EXCEPTION_ERRCODE  2
58 #define TBF_INTERRUPT          8
59 
60 /* 'arch_vcpu' flags values */
61 #define _TF_kernel_mode        0
62 #define TF_kernel_mode         (1<<_TF_kernel_mode)
63 
64 /* #PF error code values. */
65 #define PFEC_page_present   (_AC(1,U) << 0)
66 #define PFEC_write_access   (_AC(1,U) << 1)
67 #define PFEC_user_mode      (_AC(1,U) << 2)
68 #define PFEC_reserved_bit   (_AC(1,U) << 3)
69 #define PFEC_insn_fetch     (_AC(1,U) << 4)
70 #define PFEC_prot_key       (_AC(1,U) << 5)
71 #define PFEC_shstk          (_AC(1,U) << 6)
72 #define PFEC_arch_mask      (_AC(0xffff,U)) /* Architectural PFEC values. */
73 /* Internally used only flags. */
74 #define PFEC_page_paged     (1U<<16)
75 #define PFEC_page_shared    (1U<<17)
76 #define PFEC_implicit       (1U<<18) /* Pagewalk input for ldt/gdt/idt/tr accesses. */
77 #define PFEC_synth_mask     (~PFEC_arch_mask) /* Synthetic PFEC values. */
78 
79 /* Other exception error code values. */
80 #define X86_XEC_EXT         (_AC(1,U) << 0)
81 #define X86_XEC_IDT         (_AC(1,U) << 1)
82 #define X86_XEC_TI          (_AC(1,U) << 2)
83 
84 #define XEN_MINIMAL_CR4 (X86_CR4_PGE | X86_CR4_PAE)
85 
86 #define XEN_CR4_PV32_BITS (X86_CR4_SMEP|X86_CR4_SMAP)
87 
88 /* Common SYSCALL parameters. */
89 #define XEN_MSR_STAR (((uint64_t)FLAT_RING3_CS32 << 48) |   \
90                       ((uint64_t)__HYPERVISOR_CS << 32))
91 #define XEN_SYSCALL_MASK (X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|    \
92                           X86_EFLAGS_NT|X86_EFLAGS_DF|X86_EFLAGS_IF|    \
93                           X86_EFLAGS_TF)
94 
95 /*
96  * Host IA32_CR_PAT value to cover all memory types.  This is not the default
97  * MSR_PAT value, and is an ABI with PV guests.
98  */
99 #define XEN_MSR_PAT _AC(0x050100070406, ULL)
100 
101 #ifndef __ASSEMBLY__
102 
103 struct domain;
104 struct vcpu;
105 
106 struct x86_cpu_id {
107     uint16_t vendor;
108     uint16_t family;
109     uint16_t model;
110     uint16_t feature;   /* bit index */
111     const void *driver_data;
112 };
113 
114 struct cpuinfo_x86 {
115     __u8 x86;            /* CPU family */
116     __u8 x86_vendor;     /* CPU vendor */
117     __u8 x86_model;
118     __u8 x86_mask;
119     int  cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
120     __u32 extended_cpuid_level; /* Maximum supported CPUID extended level */
121     unsigned int x86_capability[NCAPINTS];
122     char x86_vendor_id[16];
123     char x86_model_id[64];
124     int  x86_cache_size; /* in KB - valid for CPUS which support this call  */
125     int  x86_cache_alignment;    /* In bytes */
126     __u32 x86_max_cores; /* cpuid returned max cores value */
127     __u32 booted_cores;  /* number of cores as seen by OS */
128     __u32 x86_num_siblings; /* cpuid logical cpus per chip value */
129     __u32 apicid;
130     __u32 phys_proc_id;    /* package ID of each logical CPU */
131     __u32 cpu_core_id;     /* core ID of each logical CPU*/
132     __u32 compute_unit_id; /* AMD compute unit ID of each logical CPU */
133     unsigned short x86_clflush_size;
134 } __cacheline_aligned;
135 
136 /*
137  * capabilities of CPUs
138  */
139 
140 extern struct cpuinfo_x86 boot_cpu_data;
141 
142 extern struct cpuinfo_x86 cpu_data[];
143 #define current_cpu_data cpu_data[smp_processor_id()]
144 
145 extern bool probe_cpuid_faulting(void);
146 extern void ctxt_switch_levelling(const struct vcpu *next);
147 extern void (*ctxt_switch_masking)(const struct vcpu *next);
148 
149 extern bool_t opt_cpu_info;
150 extern u32 trampoline_efer;
151 extern u64 trampoline_misc_enable_off;
152 
153 /* Maximum width of physical addresses supported by the hardware. */
154 extern unsigned int paddr_bits;
155 /* Max physical address width supported within HAP guests. */
156 extern unsigned int hap_paddr_bits;
157 /* Maximum width of virtual addresses supported by the hardware. */
158 extern unsigned int vaddr_bits;
159 
160 extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id table[]);
161 
162 extern void identify_cpu(struct cpuinfo_x86 *);
163 extern void setup_clear_cpu_cap(unsigned int);
164 extern void setup_force_cpu_cap(unsigned int);
165 extern bool is_forced_cpu_cap(unsigned int);
166 extern void print_cpu_info(unsigned int cpu);
167 extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
168 
169 #define cpu_to_core(_cpu)   (cpu_data[_cpu].cpu_core_id)
170 #define cpu_to_socket(_cpu) (cpu_data[_cpu].phys_proc_id)
171 
172 unsigned int apicid_to_socket(unsigned int);
173 
cpu_nr_siblings(unsigned int cpu)174 static inline int cpu_nr_siblings(unsigned int cpu)
175 {
176     return cpu_data[cpu].x86_num_siblings;
177 }
178 
179 /*
180  * Generic CPUID function
181  * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
182  * resulting in stale register contents being returned.
183  */
184 #define cpuid(_op,_eax,_ebx,_ecx,_edx)          \
185     asm volatile ( "cpuid"                      \
186           : "=a" (*(int *)(_eax)),              \
187             "=b" (*(int *)(_ebx)),              \
188             "=c" (*(int *)(_ecx)),              \
189             "=d" (*(int *)(_edx))               \
190           : "0" (_op), "2" (0) )
191 
192 /* Some CPUID calls want 'count' to be placed in ecx */
cpuid_count(unsigned int op,unsigned int count,unsigned int * eax,unsigned int * ebx,unsigned int * ecx,unsigned int * edx)193 static inline void cpuid_count(
194     unsigned int op,
195     unsigned int count,
196     unsigned int *eax,
197     unsigned int *ebx,
198     unsigned int *ecx,
199     unsigned int *edx)
200 {
201     asm volatile ( "cpuid"
202           : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
203           : "0" (op), "c" (count) );
204 }
205 
206 /*
207  * CPUID functions returning a single datum
208  */
cpuid_eax(unsigned int op)209 static always_inline unsigned int cpuid_eax(unsigned int op)
210 {
211     unsigned int eax;
212 
213     asm volatile ( "cpuid"
214           : "=a" (eax)
215           : "0" (op)
216           : "bx", "cx", "dx" );
217     return eax;
218 }
219 
cpuid_ebx(unsigned int op)220 static always_inline unsigned int cpuid_ebx(unsigned int op)
221 {
222     unsigned int eax, ebx;
223 
224     asm volatile ( "cpuid"
225           : "=a" (eax), "=b" (ebx)
226           : "0" (op)
227           : "cx", "dx" );
228     return ebx;
229 }
230 
cpuid_ecx(unsigned int op)231 static always_inline unsigned int cpuid_ecx(unsigned int op)
232 {
233     unsigned int eax, ecx;
234 
235     asm volatile ( "cpuid"
236           : "=a" (eax), "=c" (ecx)
237           : "0" (op)
238           : "bx", "dx" );
239     return ecx;
240 }
241 
cpuid_edx(unsigned int op)242 static always_inline unsigned int cpuid_edx(unsigned int op)
243 {
244     unsigned int eax, edx;
245 
246     asm volatile ( "cpuid"
247           : "=a" (eax), "=d" (edx)
248           : "0" (op)
249           : "bx", "cx" );
250     return edx;
251 }
252 
cpuid_count_ebx(unsigned int leaf,unsigned int subleaf)253 static always_inline unsigned int cpuid_count_ebx(
254     unsigned int leaf, unsigned int subleaf)
255 {
256     unsigned int ebx, tmp;
257 
258     cpuid_count(leaf, subleaf, &tmp, &ebx, &tmp, &tmp);
259 
260     return ebx;
261 }
262 
cpuid_count_edx(unsigned int leaf,unsigned int subleaf)263 static always_inline unsigned int cpuid_count_edx(
264     unsigned int leaf, unsigned int subleaf)
265 {
266     unsigned int edx, tmp;
267 
268     cpuid_count(leaf, subleaf, &tmp, &tmp, &tmp, &edx);
269 
270     return edx;
271 }
272 
read_cr0(void)273 static inline unsigned long read_cr0(void)
274 {
275     unsigned long cr0;
276     asm volatile ( "mov %%cr0,%0\n\t" : "=r" (cr0) );
277     return cr0;
278 }
279 
write_cr0(unsigned long val)280 static inline void write_cr0(unsigned long val)
281 {
282     asm volatile ( "mov %0,%%cr0" : : "r" ((unsigned long)val) );
283 }
284 
read_cr2(void)285 static inline unsigned long read_cr2(void)
286 {
287     unsigned long cr2;
288     asm volatile ( "mov %%cr2,%0\n\t" : "=r" (cr2) );
289     return cr2;
290 }
291 
write_cr3(unsigned long val)292 static inline void write_cr3(unsigned long val)
293 {
294     asm volatile ( "mov %0, %%cr3" : : "r" (val) : "memory" );
295 }
296 
cr3_pa(unsigned long cr3)297 static inline unsigned long cr3_pa(unsigned long cr3)
298 {
299     return cr3 & X86_CR3_ADDR_MASK;
300 }
301 
cr3_pcid(unsigned long cr3)302 static inline unsigned int cr3_pcid(unsigned long cr3)
303 {
304     return IS_ENABLED(CONFIG_PV) ? cr3 & X86_CR3_PCID_MASK : 0;
305 }
306 
read_cr4(void)307 static inline unsigned long read_cr4(void)
308 {
309     return get_cpu_info()->cr4;
310 }
311 
write_cr4(unsigned long val)312 static inline void write_cr4(unsigned long val)
313 {
314     struct cpu_info *info = get_cpu_info();
315 
316 #ifdef CONFIG_PV
317     /* No global pages in case of PCIDs enabled! */
318     ASSERT(!(val & X86_CR4_PGE) || !(val & X86_CR4_PCIDE));
319 #else
320     ASSERT(!(val & X86_CR4_PCIDE));
321 #endif
322 
323     /*
324      * On hardware supporting FSGSBASE, the value in %cr4 is the kernel's
325      * choice for 64bit PV guests, which impacts whether Xen can use the
326      * instructions.
327      *
328      * The {rd,wr}{fs,gs}base() helpers use info->cr4 to work out whether it
329      * is safe to execute the {RD,WR}{FS,GS}BASE instruction, falling back to
330      * the MSR path if not.  Some users require interrupt safety.
331      *
332      * If FSGSBASE is currently or about to become clear, reflect this in
333      * info->cr4 before updating %cr4, so an interrupt which hits in the
334      * middle won't observe FSGSBASE set in info->cr4 but clear in %cr4.
335      */
336     info->cr4 = val & (info->cr4 | ~X86_CR4_FSGSBASE);
337 
338     asm volatile ( "mov %[val], %%cr4"
339                    : "+m" (info->cr4) /* Force ordering without a barrier. */
340                    : [val] "r" (val) );
341 
342     info->cr4 = val;
343 }
344 
345 /* Clear and set 'TS' bit respectively */
clts(void)346 static inline void clts(void)
347 {
348     asm volatile ( "clts" );
349 }
350 
stts(void)351 static inline void stts(void)
352 {
353     write_cr0(X86_CR0_TS|read_cr0());
354 }
355 
356 /*
357  * Save the cr4 feature set we're using (ie
358  * Pentium 4MB enable and PPro Global page
359  * enable), so that any CPU's that boot up
360  * after us can get the correct flags.
361  */
362 extern unsigned long mmu_cr4_features;
363 
set_in_cr4(unsigned long mask)364 static always_inline void set_in_cr4 (unsigned long mask)
365 {
366     mmu_cr4_features |= mask;
367     write_cr4(read_cr4() | mask);
368 }
369 
read_pkru(void)370 static inline unsigned int read_pkru(void)
371 {
372     unsigned int pkru;
373     unsigned long cr4 = read_cr4();
374 
375     /*
376      * _PAGE_PKEY_BITS have a conflict with _PAGE_GNTTAB used by PV guests,
377      * so that X86_CR4_PKE  is disabled on hypervisor. To use RDPKRU, CR4.PKE
378      * gets temporarily enabled.
379      */
380     write_cr4(cr4 | X86_CR4_PKE);
381     asm volatile (".byte 0x0f,0x01,0xee"
382         : "=a" (pkru) : "c" (0) : "dx");
383     write_cr4(cr4);
384 
385     return pkru;
386 }
387 
388 /* Macros for PKRU domain */
389 #define PKRU_READ  (0)
390 #define PKRU_WRITE (1)
391 #define PKRU_ATTRS (2)
392 
393 /*
394  * PKRU defines 32 bits, there are 16 domains and 2 attribute bits per
395  * domain in pkru, pkeys is index to a defined domain, so the value of
396  * pte_pkeys * PKRU_ATTRS + R/W is offset of a defined domain attribute.
397  */
read_pkru_ad(uint32_t pkru,unsigned int pkey)398 static inline bool_t read_pkru_ad(uint32_t pkru, unsigned int pkey)
399 {
400     ASSERT(pkey < 16);
401     return (pkru >> (pkey * PKRU_ATTRS + PKRU_READ)) & 1;
402 }
403 
read_pkru_wd(uint32_t pkru,unsigned int pkey)404 static inline bool_t read_pkru_wd(uint32_t pkru, unsigned int pkey)
405 {
406     ASSERT(pkey < 16);
407     return (pkru >> (pkey * PKRU_ATTRS + PKRU_WRITE)) & 1;
408 }
409 
__monitor(const void * eax,unsigned long ecx,unsigned long edx)410 static always_inline void __monitor(const void *eax, unsigned long ecx,
411                                     unsigned long edx)
412 {
413     /* "monitor %eax,%ecx,%edx;" */
414     asm volatile (
415         ".byte 0x0f,0x01,0xc8;"
416         : : "a" (eax), "c" (ecx), "d"(edx) );
417 }
418 
__mwait(unsigned long eax,unsigned long ecx)419 static always_inline void __mwait(unsigned long eax, unsigned long ecx)
420 {
421     /* "mwait %eax,%ecx;" */
422     asm volatile (
423         ".byte 0x0f,0x01,0xc9;"
424         : : "a" (eax), "c" (ecx) );
425 }
426 
427 #define IOBMP_BYTES             8192
428 #define IOBMP_INVALID_OFFSET    0x8000
429 
430 struct __packed tss64 {
431     uint32_t :32;
432     uint64_t rsp0, rsp1, rsp2;
433     uint64_t :64;
434     /*
435      * Interrupt Stack Table is 1-based so tss->ist[0] corresponds to an IST
436      * value of 1 in an Interrupt Descriptor.
437      */
438     uint64_t ist[7];
439     uint64_t :64;
440     uint16_t :16, bitmap;
441 };
442 struct tss_page {
443     uint64_t __aligned(PAGE_SIZE) ist_ssp[8];
444     struct tss64 tss;
445 };
446 DECLARE_PER_CPU(struct tss_page, tss_page);
447 
448 #define IST_NONE 0UL
449 #define IST_MCE  1UL
450 #define IST_NMI  2UL
451 #define IST_DB   3UL
452 #define IST_DF   4UL
453 #define IST_MAX  4UL
454 
455 /* Set the Interrupt Stack Table used by a particular IDT entry. */
set_ist(idt_entry_t * idt,unsigned int ist)456 static inline void set_ist(idt_entry_t *idt, unsigned int ist)
457 {
458     /* IST is a 3 bit field, 32 bits into the IDT entry. */
459     ASSERT(ist <= IST_MAX);
460 
461     /* Typically used on a live idt.  Disuade any clever optimisations. */
462     ACCESS_ONCE(idt->ist) = ist;
463 }
464 
enable_each_ist(idt_entry_t * idt)465 static inline void enable_each_ist(idt_entry_t *idt)
466 {
467     set_ist(&idt[TRAP_double_fault],  IST_DF);
468     set_ist(&idt[TRAP_nmi],           IST_NMI);
469     set_ist(&idt[TRAP_machine_check], IST_MCE);
470     set_ist(&idt[TRAP_debug],         IST_DB);
471 }
472 
disable_each_ist(idt_entry_t * idt)473 static inline void disable_each_ist(idt_entry_t *idt)
474 {
475     set_ist(&idt[TRAP_double_fault],  IST_NONE);
476     set_ist(&idt[TRAP_nmi],           IST_NONE);
477     set_ist(&idt[TRAP_machine_check], IST_NONE);
478     set_ist(&idt[TRAP_debug],         IST_NONE);
479 }
480 
481 #define IDT_ENTRIES 256
482 extern idt_entry_t idt_table[];
483 extern idt_entry_t *idt_tables[];
484 
485 DECLARE_PER_CPU(root_pgentry_t *, root_pgt);
486 
487 extern void write_ptbase(struct vcpu *v);
488 
489 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
rep_nop(void)490 static always_inline void rep_nop(void)
491 {
492     asm volatile ( "rep;nop" : : : "memory" );
493 }
494 
495 #define cpu_relax() rep_nop()
496 
497 void show_code(const struct cpu_user_regs *regs);
498 void show_stack(const struct cpu_user_regs *regs);
499 void show_stack_overflow(unsigned int cpu, const struct cpu_user_regs *regs);
500 void show_registers(const struct cpu_user_regs *regs);
501 void show_execution_state(const struct cpu_user_regs *regs);
502 #define dump_execution_state() run_in_exception_handler(show_execution_state)
503 void show_page_walk(unsigned long addr);
504 void noreturn fatal_trap(const struct cpu_user_regs *regs, bool_t show_remote);
505 
506 extern void mtrr_ap_init(void);
507 extern void mtrr_bp_init(void);
508 
509 void mcheck_init(struct cpuinfo_x86 *c, bool_t bsp);
510 
511 /* Dispatch table for exceptions */
512 extern void (* const exception_table[TRAP_nr])(struct cpu_user_regs *regs);
513 
514 #define DECLARE_TRAP_HANDLER(_name)                    \
515     void _name(void);                                  \
516     void do_ ## _name(struct cpu_user_regs *regs)
517 #define DECLARE_TRAP_HANDLER_CONST(_name)              \
518     void _name(void);                                  \
519     void do_ ## _name(const struct cpu_user_regs *regs)
520 
521 DECLARE_TRAP_HANDLER(divide_error);
522 DECLARE_TRAP_HANDLER(debug);
523 DECLARE_TRAP_HANDLER_CONST(nmi);
524 DECLARE_TRAP_HANDLER(int3);
525 DECLARE_TRAP_HANDLER(overflow);
526 DECLARE_TRAP_HANDLER(bounds);
527 DECLARE_TRAP_HANDLER(invalid_op);
528 DECLARE_TRAP_HANDLER(device_not_available);
529 DECLARE_TRAP_HANDLER(double_fault);
530 DECLARE_TRAP_HANDLER(invalid_TSS);
531 DECLARE_TRAP_HANDLER(segment_not_present);
532 DECLARE_TRAP_HANDLER(stack_segment);
533 DECLARE_TRAP_HANDLER(general_protection);
534 DECLARE_TRAP_HANDLER(page_fault);
535 DECLARE_TRAP_HANDLER(early_page_fault);
536 DECLARE_TRAP_HANDLER(coprocessor_error);
537 DECLARE_TRAP_HANDLER(simd_coprocessor_error);
538 DECLARE_TRAP_HANDLER_CONST(machine_check);
539 DECLARE_TRAP_HANDLER(alignment_check);
540 DECLARE_TRAP_HANDLER(entry_CP);
541 
542 DECLARE_TRAP_HANDLER(entry_int82);
543 
544 #undef DECLARE_TRAP_HANDLER_CONST
545 #undef DECLARE_TRAP_HANDLER
546 
547 void trap_nop(void);
548 
enable_nmis(void)549 static inline void enable_nmis(void)
550 {
551     unsigned long tmp;
552 
553     asm volatile ( "mov     %%rsp, %[rsp]        \n\t"
554                    "lea    .Ldone(%%rip), %[rip] \n\t"
555 #ifdef CONFIG_XEN_SHSTK
556                    /* Check for CET-SS being active. */
557                    "mov    $1, %k[ssp]           \n\t"
558                    "rdsspq %[ssp]                \n\t"
559                    "cmp    $1, %k[ssp]           \n\t"
560                    "je     .Lshstk_done          \n\t"
561 
562                    /* Push 3 words on the shadow stack */
563                    ".rept 3                      \n\t"
564                    "call 1f; nop; 1:             \n\t"
565                    ".endr                        \n\t"
566 
567                    /* Fixup to be an IRET shadow stack frame */
568                    "wrssq  %q[cs], -1*8(%[ssp])  \n\t"
569                    "wrssq  %[rip], -2*8(%[ssp])  \n\t"
570                    "wrssq  %[ssp], -3*8(%[ssp])  \n\t"
571 
572                    ".Lshstk_done:"
573 #endif
574                    /* Write an IRET regular frame */
575                    "push   %[ss]                 \n\t"
576                    "push   %[rsp]                \n\t"
577                    "pushf                        \n\t"
578                    "push   %q[cs]                \n\t"
579                    "push   %[rip]                \n\t"
580                    "iretq                        \n\t"
581                    ".Ldone:                      \n\t"
582                    : [rip] "=&r" (tmp),
583                      [rsp] "=&r" (tmp),
584                      [ssp] "=&r" (tmp)
585                    : [ss] "i" (__HYPERVISOR_DS),
586                      [cs] "r" (__HYPERVISOR_CS) );
587 }
588 
589 void sysenter_entry(void);
590 void sysenter_eflags_saved(void);
591 void int80_direct_trap(void);
592 
593 struct stubs {
594     union {
595         void(*func)(void);
596         unsigned long addr;
597     };
598     unsigned long mfn;
599 };
600 
601 DECLARE_PER_CPU(struct stubs, stubs);
602 unsigned long alloc_stub_page(unsigned int cpu, unsigned long *mfn);
603 
604 void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf,
605                              uint32_t subleaf, struct cpuid_leaf *res);
606 int guest_rdmsr_xen(const struct vcpu *v, uint32_t idx, uint64_t *val);
607 int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val);
608 
get_cpu_family(uint32_t raw,uint8_t * model,uint8_t * stepping)609 static inline uint8_t get_cpu_family(uint32_t raw, uint8_t *model,
610                                      uint8_t *stepping)
611 {
612     uint8_t fam = (raw >> 8) & 0xf;
613 
614     if ( fam == 0xf )
615         fam += (raw >> 20) & 0xff;
616 
617     if ( model )
618     {
619         uint8_t mod = (raw >> 4) & 0xf;
620 
621         if ( fam >= 0x6 )
622             mod |= (raw >> 12) & 0xf0;
623 
624         *model = mod;
625     }
626     if ( stepping )
627         *stepping = raw & 0xf;
628     return fam;
629 }
630 
631 extern int8_t opt_tsx, cpu_has_tsx_ctrl;
632 void tsx_init(void);
633 
634 #endif /* !__ASSEMBLY__ */
635 
636 #endif /* __ASM_X86_PROCESSOR_H */
637 
638 /*
639  * Local variables:
640  * mode: C
641  * c-file-style: "BSD"
642  * c-basic-offset: 4
643  * tab-width: 4
644  * indent-tabs-mode: nil
645  * End:
646  */
647