1 /*
2  * vmx.h: VMX Architecture related definitions
3  * Copyright (c) 2004, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; If not, see <http://www.gnu.org/licenses/>.
16  *
17  */
18 #ifndef __ASM_X86_HVM_VMX_VMX_H__
19 #define __ASM_X86_HVM_VMX_VMX_H__
20 
21 #include <xen/sched.h>
22 #include <asm/types.h>
23 #include <asm/regs.h>
24 #include <asm/asm_defns.h>
25 #include <asm/processor.h>
26 #include <asm/i387.h>
27 #include <asm/hvm/support.h>
28 #include <asm/hvm/trace.h>
29 #include <asm/hvm/vmx/vmcs.h>
30 
31 extern int8_t opt_ept_exec_sp;
32 
33 typedef union {
34     struct {
35         u64 r       :   1,  /* bit 0 - Read permission */
36         w           :   1,  /* bit 1 - Write permission */
37         x           :   1,  /* bit 2 - Execute permission */
38         emt         :   3,  /* bits 5:3 - EPT Memory type */
39         ipat        :   1,  /* bit 6 - Ignore PAT memory type */
40         sp          :   1,  /* bit 7 - Is this a superpage? */
41         a           :   1,  /* bit 8 - Access bit */
42         d           :   1,  /* bit 9 - Dirty bit */
43         recalc      :   1,  /* bit 10 - Software available 1 */
44         snp         :   1,  /* bit 11 - VT-d snoop control in shared
45                                EPT/VT-d usage */
46         mfn         :   40, /* bits 51:12 - Machine physical frame number */
47         sa_p2mt     :   6,  /* bits 57:52 - Software available 2 */
48         access      :   4,  /* bits 61:58 - p2m_access_t */
49         tm          :   1,  /* bit 62 - VT-d transient-mapping hint in
50                                shared EPT/VT-d usage */
51         suppress_ve :   1;  /* bit 63 - suppress #VE */
52     };
53     u64 epte;
54 } ept_entry_t;
55 
56 typedef struct {
57     /*use lxe[0] to save result */
58     ept_entry_t lxe[5];
59 } ept_walk_t;
60 
61 typedef enum {
62     ept_access_n     = 0, /* No access permissions allowed */
63     ept_access_r     = 1, /* Read only */
64     ept_access_w     = 2, /* Write only */
65     ept_access_rw    = 3, /* Read & Write */
66     ept_access_x     = 4, /* Exec Only */
67     ept_access_rx    = 5, /* Read & Exec */
68     ept_access_wx    = 6, /* Write & Exec*/
69     ept_access_all   = 7, /* Full permissions */
70 } ept_access_t;
71 
72 #define EPT_TABLE_ORDER         9
73 #define EPTE_SUPER_PAGE_MASK    0x80
74 #define EPTE_MFN_MASK           0xffffffffff000ULL
75 #define EPTE_AVAIL1_MASK        0xF00
76 #define EPTE_EMT_MASK           0x38
77 #define EPTE_IGMT_MASK          0x40
78 #define EPTE_AVAIL1_SHIFT       8
79 #define EPTE_EMT_SHIFT          3
80 #define EPTE_IGMT_SHIFT         6
81 #define EPTE_RWX_MASK           0x7
82 #define EPTE_FLAG_MASK          0x7f
83 
84 #define EPT_EMT_UC              0
85 #define EPT_EMT_WC              1
86 #define EPT_EMT_RSV0            2
87 #define EPT_EMT_RSV1            3
88 #define EPT_EMT_WT              4
89 #define EPT_EMT_WP              5
90 #define EPT_EMT_WB              6
91 #define EPT_EMT_RSV2            7
92 
93 #define PI_xAPIC_NDST_MASK      0xFF00
94 
95 void vmx_asm_vmexit_handler(struct cpu_user_regs);
96 void vmx_asm_do_vmentry(void);
97 void vmx_intr_assist(void);
98 void noreturn vmx_do_resume(void);
99 void vmx_vlapic_msr_changed(struct vcpu *v);
100 struct hvm_emulate_ctxt;
101 void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt);
102 void vmx_realmode(struct cpu_user_regs *regs);
103 void vmx_update_debug_state(struct vcpu *v);
104 void vmx_update_exception_bitmap(struct vcpu *v);
105 void vmx_update_cpu_exec_control(struct vcpu *v);
106 void vmx_update_secondary_exec_control(struct vcpu *v);
107 
108 #define POSTED_INTR_ON  0
109 #define POSTED_INTR_SN  1
pi_test_and_set_pir(uint8_t vector,struct pi_desc * pi_desc)110 static inline int pi_test_and_set_pir(uint8_t vector, struct pi_desc *pi_desc)
111 {
112     return test_and_set_bit(vector, pi_desc->pir);
113 }
114 
pi_test_pir(uint8_t vector,const struct pi_desc * pi_desc)115 static inline int pi_test_pir(uint8_t vector, const struct pi_desc *pi_desc)
116 {
117     return test_bit(vector, pi_desc->pir);
118 }
119 
pi_test_and_set_on(struct pi_desc * pi_desc)120 static inline int pi_test_and_set_on(struct pi_desc *pi_desc)
121 {
122     return test_and_set_bit(POSTED_INTR_ON, &pi_desc->control);
123 }
124 
pi_set_on(struct pi_desc * pi_desc)125 static inline void pi_set_on(struct pi_desc *pi_desc)
126 {
127     set_bit(POSTED_INTR_ON, &pi_desc->control);
128 }
129 
pi_test_and_clear_on(struct pi_desc * pi_desc)130 static inline int pi_test_and_clear_on(struct pi_desc *pi_desc)
131 {
132     return test_and_clear_bit(POSTED_INTR_ON, &pi_desc->control);
133 }
134 
pi_test_on(struct pi_desc * pi_desc)135 static inline int pi_test_on(struct pi_desc *pi_desc)
136 {
137     return pi_desc->on;
138 }
139 
pi_get_pir(struct pi_desc * pi_desc,int group)140 static inline unsigned long pi_get_pir(struct pi_desc *pi_desc, int group)
141 {
142     return xchg(&pi_desc->pir[group], 0);
143 }
144 
pi_test_sn(struct pi_desc * pi_desc)145 static inline int pi_test_sn(struct pi_desc *pi_desc)
146 {
147     return pi_desc->sn;
148 }
149 
pi_set_sn(struct pi_desc * pi_desc)150 static inline void pi_set_sn(struct pi_desc *pi_desc)
151 {
152     set_bit(POSTED_INTR_SN, &pi_desc->control);
153 }
154 
pi_clear_sn(struct pi_desc * pi_desc)155 static inline void pi_clear_sn(struct pi_desc *pi_desc)
156 {
157     clear_bit(POSTED_INTR_SN, &pi_desc->control);
158 }
159 
160 /*
161  * Exit Reasons
162  */
163 #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
164 
165 #define EXIT_REASON_EXCEPTION_NMI       0
166 #define EXIT_REASON_EXTERNAL_INTERRUPT  1
167 #define EXIT_REASON_TRIPLE_FAULT        2
168 #define EXIT_REASON_INIT                3
169 #define EXIT_REASON_SIPI                4
170 #define EXIT_REASON_IO_SMI              5
171 #define EXIT_REASON_OTHER_SMI           6
172 #define EXIT_REASON_PENDING_VIRT_INTR   7
173 #define EXIT_REASON_PENDING_VIRT_NMI    8
174 #define EXIT_REASON_TASK_SWITCH         9
175 #define EXIT_REASON_CPUID               10
176 #define EXIT_REASON_GETSEC              11
177 #define EXIT_REASON_HLT                 12
178 #define EXIT_REASON_INVD                13
179 #define EXIT_REASON_INVLPG              14
180 #define EXIT_REASON_RDPMC               15
181 #define EXIT_REASON_RDTSC               16
182 #define EXIT_REASON_RSM                 17
183 #define EXIT_REASON_VMCALL              18
184 #define EXIT_REASON_VMCLEAR             19
185 #define EXIT_REASON_VMLAUNCH            20
186 #define EXIT_REASON_VMPTRLD             21
187 #define EXIT_REASON_VMPTRST             22
188 #define EXIT_REASON_VMREAD              23
189 #define EXIT_REASON_VMRESUME            24
190 #define EXIT_REASON_VMWRITE             25
191 #define EXIT_REASON_VMXOFF              26
192 #define EXIT_REASON_VMXON               27
193 #define EXIT_REASON_CR_ACCESS           28
194 #define EXIT_REASON_DR_ACCESS           29
195 #define EXIT_REASON_IO_INSTRUCTION      30
196 #define EXIT_REASON_MSR_READ            31
197 #define EXIT_REASON_MSR_WRITE           32
198 #define EXIT_REASON_INVALID_GUEST_STATE 33
199 #define EXIT_REASON_MSR_LOADING         34
200 #define EXIT_REASON_MWAIT_INSTRUCTION   36
201 #define EXIT_REASON_MONITOR_TRAP_FLAG   37
202 #define EXIT_REASON_MONITOR_INSTRUCTION 39
203 #define EXIT_REASON_PAUSE_INSTRUCTION   40
204 #define EXIT_REASON_MCE_DURING_VMENTRY  41
205 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
206 #define EXIT_REASON_APIC_ACCESS         44
207 #define EXIT_REASON_EOI_INDUCED         45
208 #define EXIT_REASON_ACCESS_GDTR_OR_IDTR 46
209 #define EXIT_REASON_ACCESS_LDTR_OR_TR   47
210 #define EXIT_REASON_EPT_VIOLATION       48
211 #define EXIT_REASON_EPT_MISCONFIG       49
212 #define EXIT_REASON_INVEPT              50
213 #define EXIT_REASON_RDTSCP              51
214 #define EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED 52
215 #define EXIT_REASON_INVVPID             53
216 #define EXIT_REASON_WBINVD              54
217 #define EXIT_REASON_XSETBV              55
218 #define EXIT_REASON_APIC_WRITE          56
219 #define EXIT_REASON_INVPCID             58
220 #define EXIT_REASON_VMFUNC              59
221 #define EXIT_REASON_PML_FULL            62
222 #define EXIT_REASON_XSAVES              63
223 #define EXIT_REASON_XRSTORS             64
224 
225 /*
226  * Interruption-information format
227  */
228 #define INTR_INFO_VECTOR_MASK           0xff            /* 7:0 */
229 #define INTR_INFO_INTR_TYPE_MASK        0x700           /* 10:8 */
230 #define INTR_INFO_DELIVER_CODE_MASK     0x800           /* 11 */
231 #define INTR_INFO_NMI_UNBLOCKED_BY_IRET 0x1000          /* 12 */
232 #define INTR_INFO_VALID_MASK            0x80000000      /* 31 */
233 #define INTR_INFO_RESVD_BITS_MASK       0x7ffff000
234 
235 /*
236  * Exit Qualifications for MOV for Control Register Access
237  */
238 enum {
239     VMX_CR_ACCESS_TYPE_MOV_TO_CR,
240     VMX_CR_ACCESS_TYPE_MOV_FROM_CR,
241     VMX_CR_ACCESS_TYPE_CLTS,
242     VMX_CR_ACCESS_TYPE_LMSW,
243 };
244 typedef union cr_access_qual {
245     unsigned long raw;
246     struct {
247         uint16_t cr:4,
248                  access_type:2,  /* VMX_CR_ACCESS_TYPE_* */
249                  lmsw_op_type:1, /* 0 => reg, 1 => mem   */
250                  :1,
251                  gpr:4,
252                  :4;
253         uint16_t lmsw_data;
254         uint32_t :32;
255     };
256 } __transparent__ cr_access_qual_t;
257 
258 /*
259  * Access Rights
260  */
261 #define X86_SEG_AR_SEG_TYPE     0xf        /* 3:0, segment type */
262 #define X86_SEG_AR_DESC_TYPE    (1u << 4)  /* 4, descriptor type */
263 #define X86_SEG_AR_DPL          0x60       /* 6:5, descriptor privilege level */
264 #define X86_SEG_AR_SEG_PRESENT  (1u << 7)  /* 7, segment present */
265 #define X86_SEG_AR_AVL          (1u << 12) /* 12, available for system software */
266 #define X86_SEG_AR_CS_LM_ACTIVE (1u << 13) /* 13, long mode active (CS only) */
267 #define X86_SEG_AR_DEF_OP_SIZE  (1u << 14) /* 14, default operation size */
268 #define X86_SEG_AR_GRANULARITY  (1u << 15) /* 15, granularity */
269 #define X86_SEG_AR_SEG_UNUSABLE (1u << 16) /* 16, segment unusable */
270 
271 #define VMCALL_OPCODE   ".byte 0x0f,0x01,0xc1\n"
272 #define VMCLEAR_OPCODE  ".byte 0x66,0x0f,0xc7\n"        /* reg/opcode: /6 */
273 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"
274 #define VMPTRLD_OPCODE  ".byte 0x0f,0xc7\n"             /* reg/opcode: /6 */
275 #define VMPTRST_OPCODE  ".byte 0x0f,0xc7\n"             /* reg/opcode: /7 */
276 #define VMREAD_OPCODE   ".byte 0x0f,0x78\n"
277 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n"
278 #define VMWRITE_OPCODE  ".byte 0x0f,0x79\n"
279 #define INVEPT_OPCODE   ".byte 0x66,0x0f,0x38,0x80\n"   /* m128,r64/32 */
280 #define INVVPID_OPCODE  ".byte 0x66,0x0f,0x38,0x81\n"   /* m128,r64/32 */
281 #define VMXOFF_OPCODE   ".byte 0x0f,0x01,0xc4\n"
282 #define VMXON_OPCODE    ".byte 0xf3,0x0f,0xc7\n"
283 
284 #define MODRM_EAX_08    ".byte 0x08\n" /* ECX, [EAX] */
285 #define MODRM_EAX_06    ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */
286 #define MODRM_EAX_07    ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */
287 #define MODRM_EAX_ECX   ".byte 0xc1\n" /* EAX, ECX */
288 
289 extern uint8_t posted_intr_vector;
290 
291 #define cpu_has_vmx_ept_exec_only_supported        \
292     (vmx_ept_vpid_cap & VMX_EPT_EXEC_ONLY_SUPPORTED)
293 
294 #define cpu_has_vmx_ept_wl4_supported           \
295     (vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED)
296 #define cpu_has_vmx_ept_mt_uc (vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_UC)
297 #define cpu_has_vmx_ept_mt_wb (vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_WB)
298 #define cpu_has_vmx_ept_2mb   (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_2MB)
299 #define cpu_has_vmx_ept_1gb   (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_1GB)
300 #define cpu_has_vmx_ept_ad    (vmx_ept_vpid_cap & VMX_EPT_AD_BIT)
301 #define cpu_has_vmx_ept_invept_single_context   \
302     (vmx_ept_vpid_cap & VMX_EPT_INVEPT_SINGLE_CONTEXT)
303 
304 #define EPT_2MB_SHIFT     16
305 #define EPT_1GB_SHIFT     17
306 #define ept_has_2mb(c)    ((c >> EPT_2MB_SHIFT) & 1)
307 #define ept_has_1gb(c)    ((c >> EPT_1GB_SHIFT) & 1)
308 
309 #define INVEPT_SINGLE_CONTEXT   1
310 #define INVEPT_ALL_CONTEXT      2
311 
312 #define cpu_has_vmx_vpid_invvpid_individual_addr                    \
313     (vmx_ept_vpid_cap & VMX_VPID_INVVPID_INDIVIDUAL_ADDR)
314 #define cpu_has_vmx_vpid_invvpid_single_context                     \
315     (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT)
316 #define cpu_has_vmx_vpid_invvpid_single_context_retaining_global    \
317     (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL)
318 
319 #define INVVPID_INDIVIDUAL_ADDR                 0
320 #define INVVPID_SINGLE_CONTEXT                  1
321 #define INVVPID_ALL_CONTEXT                     2
322 #define INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 3
323 
324 #ifdef HAVE_AS_VMX
325 # define GAS_VMX_OP(yes, no) yes
326 #else
327 # define GAS_VMX_OP(yes, no) no
328 #endif
329 
__vmptrld(u64 addr)330 static always_inline void __vmptrld(u64 addr)
331 {
332     asm volatile (
333 #ifdef HAVE_AS_VMX
334                    "vmptrld %0\n"
335 #else
336                    VMPTRLD_OPCODE MODRM_EAX_06
337 #endif
338                    /* CF==1 or ZF==1 --> BUG() */
339                    UNLIKELY_START(be, vmptrld)
340                    _ASM_BUGFRAME_TEXT(0)
341                    UNLIKELY_END_SECTION
342                    :
343 #ifdef HAVE_AS_VMX
344                    : "m" (addr),
345 #else
346                    : "a" (&addr),
347 #endif
348                      _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
349                    : "memory");
350 }
351 
__vmpclear(u64 addr)352 static always_inline void __vmpclear(u64 addr)
353 {
354     asm volatile (
355 #ifdef HAVE_AS_VMX
356                    "vmclear %0\n"
357 #else
358                    VMCLEAR_OPCODE MODRM_EAX_06
359 #endif
360                    /* CF==1 or ZF==1 --> BUG() */
361                    UNLIKELY_START(be, vmclear)
362                    _ASM_BUGFRAME_TEXT(0)
363                    UNLIKELY_END_SECTION
364                    :
365 #ifdef HAVE_AS_VMX
366                    : "m" (addr),
367 #else
368                    : "a" (&addr),
369 #endif
370                      _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
371                    : "memory");
372 }
373 
__vmread(unsigned long field,unsigned long * value)374 static always_inline void __vmread(unsigned long field, unsigned long *value)
375 {
376     asm volatile (
377 #ifdef HAVE_AS_VMX
378                    "vmread %1, %0\n\t"
379 #else
380                    VMREAD_OPCODE MODRM_EAX_ECX
381 #endif
382                    /* CF==1 or ZF==1 --> BUG() */
383                    UNLIKELY_START(be, vmread)
384                    _ASM_BUGFRAME_TEXT(0)
385                    UNLIKELY_END_SECTION
386 #ifdef HAVE_AS_VMX
387                    : "=rm" (*value)
388                    : "r" (field),
389 #else
390                    : "=c" (*value)
391                    : "a" (field),
392 #endif
393                      _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
394         );
395 }
396 
__vmwrite(unsigned long field,unsigned long value)397 static always_inline void __vmwrite(unsigned long field, unsigned long value)
398 {
399     asm volatile (
400 #ifdef HAVE_AS_VMX
401                    "vmwrite %1, %0\n"
402 #else
403                    VMWRITE_OPCODE MODRM_EAX_ECX
404 #endif
405                    /* CF==1 or ZF==1 --> BUG() */
406                    UNLIKELY_START(be, vmwrite)
407                    _ASM_BUGFRAME_TEXT(0)
408                    UNLIKELY_END_SECTION
409                    :
410 #ifdef HAVE_AS_VMX
411                    : "r" (field) , "rm" (value),
412 #else
413                    : "a" (field) , "c" (value),
414 #endif
415                      _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
416         );
417 }
418 
vmread_safe(unsigned long field,unsigned long * value)419 static inline enum vmx_insn_errno vmread_safe(unsigned long field,
420                                               unsigned long *value)
421 {
422     unsigned long ret = VMX_INSN_SUCCEED;
423     bool fail_invalid, fail_valid;
424 
425     asm volatile ( GAS_VMX_OP("vmread %[field], %[value]\n\t",
426                               VMREAD_OPCODE MODRM_EAX_ECX)
427                    ASM_FLAG_OUT(, "setc %[invalid]\n\t")
428                    ASM_FLAG_OUT(, "setz %[valid]\n\t")
429                    : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
430                      ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid),
431                      [value] GAS_VMX_OP("=rm", "=c") (*value)
432                    : [field] GAS_VMX_OP("r", "a") (field));
433 
434     if ( unlikely(fail_invalid) )
435         ret = VMX_INSN_FAIL_INVALID;
436     else if ( unlikely(fail_valid) )
437         __vmread(VM_INSTRUCTION_ERROR, &ret);
438 
439     return ret;
440 }
441 
vmwrite_safe(unsigned long field,unsigned long value)442 static inline enum vmx_insn_errno vmwrite_safe(unsigned long field,
443                                                unsigned long value)
444 {
445     unsigned long ret = VMX_INSN_SUCCEED;
446     bool fail_invalid, fail_valid;
447 
448     asm volatile ( GAS_VMX_OP("vmwrite %[value], %[field]\n\t",
449                               VMWRITE_OPCODE MODRM_EAX_ECX)
450                    ASM_FLAG_OUT(, "setc %[invalid]\n\t")
451                    ASM_FLAG_OUT(, "setz %[valid]\n\t")
452                    : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
453                      ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid)
454                    : [field] GAS_VMX_OP("r", "a") (field),
455                      [value] GAS_VMX_OP("rm", "c") (value));
456 
457     if ( unlikely(fail_invalid) )
458         ret = VMX_INSN_FAIL_INVALID;
459     else if ( unlikely(fail_valid) )
460         __vmread(VM_INSTRUCTION_ERROR, &ret);
461 
462     return ret;
463 }
464 
__invept(unsigned long type,uint64_t eptp)465 static always_inline void __invept(unsigned long type, uint64_t eptp)
466 {
467     struct {
468         uint64_t eptp, rsvd;
469     } operand = { eptp };
470 
471     /*
472      * If single context invalidation is not supported, we escalate to
473      * use all context invalidation.
474      */
475     if ( (type == INVEPT_SINGLE_CONTEXT) &&
476          !cpu_has_vmx_ept_invept_single_context )
477         type = INVEPT_ALL_CONTEXT;
478 
479     asm volatile (
480 #ifdef HAVE_AS_EPT
481                    "invept %0, %1\n"
482 #else
483                    INVEPT_OPCODE MODRM_EAX_08
484 #endif
485                    /* CF==1 or ZF==1 --> BUG() */
486                    UNLIKELY_START(be, invept)
487                    _ASM_BUGFRAME_TEXT(0)
488                    UNLIKELY_END_SECTION
489                    :
490 #ifdef HAVE_AS_EPT
491                    : "m" (operand), "r" (type),
492 #else
493                    : "a" (&operand), "c" (type),
494 #endif
495                      _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
496                    : "memory" );
497 }
498 
__invvpid(unsigned long type,u16 vpid,u64 gva)499 static always_inline void __invvpid(unsigned long type, u16 vpid, u64 gva)
500 {
501     struct __packed {
502         u64 vpid:16;
503         u64 rsvd:48;
504         u64 gva;
505     }  operand = {vpid, 0, gva};
506 
507     /* Fix up #UD exceptions which occur when TLBs are flushed before VMXON. */
508     asm volatile ( "1: "
509 #ifdef HAVE_AS_EPT
510                    "invvpid %0, %1\n"
511 #else
512                    INVVPID_OPCODE MODRM_EAX_08
513 #endif
514                    /* CF==1 or ZF==1 --> BUG() */
515                    UNLIKELY_START(be, invvpid)
516                    _ASM_BUGFRAME_TEXT(0)
517                    UNLIKELY_END_SECTION "\n"
518                    "2:"
519                    _ASM_EXTABLE(1b, 2b)
520                    :
521 #ifdef HAVE_AS_EPT
522                    : "m" (operand), "r" (type),
523 #else
524                    : "a" (&operand), "c" (type),
525 #endif
526                      _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0)
527                    : "memory" );
528 }
529 
ept_sync_all(void)530 static inline void ept_sync_all(void)
531 {
532     __invept(INVEPT_ALL_CONTEXT, 0);
533 }
534 
535 void ept_sync_domain(struct p2m_domain *p2m);
536 
vpid_sync_vcpu_gva(struct vcpu * v,unsigned long gva)537 static inline void vpid_sync_vcpu_gva(struct vcpu *v, unsigned long gva)
538 {
539     int type = INVVPID_INDIVIDUAL_ADDR;
540 
541     /*
542      * If individual address invalidation is not supported, we escalate to
543      * use single context invalidation.
544      */
545     if ( likely(cpu_has_vmx_vpid_invvpid_individual_addr) )
546         goto execute_invvpid;
547 
548     type = INVVPID_SINGLE_CONTEXT;
549 
550     /*
551      * If single context invalidation is not supported, we escalate to
552      * use all context invalidation.
553      */
554     if ( !cpu_has_vmx_vpid_invvpid_single_context )
555         type = INVVPID_ALL_CONTEXT;
556 
557 execute_invvpid:
558     __invvpid(type, v->arch.hvm.n1asid.asid, (u64)gva);
559 }
560 
vpid_sync_all(void)561 static inline void vpid_sync_all(void)
562 {
563     __invvpid(INVVPID_ALL_CONTEXT, 0, 0);
564 }
565 
__vmxoff(void)566 static inline void __vmxoff(void)
567 {
568     asm volatile (
569         VMXOFF_OPCODE
570         : : : "memory" );
571 }
572 
__vmxon(u64 addr)573 static inline int __vmxon(u64 addr)
574 {
575     int rc;
576 
577     asm volatile (
578         "1: " VMXON_OPCODE MODRM_EAX_06 "\n"
579         "   setna %b0 ; neg %0\n" /* CF==1 or ZF==1 --> rc = -1 */
580         "2:\n"
581         ".section .fixup,\"ax\"\n"
582         "3: sub $2,%0 ; jmp 2b\n"    /* #UD or #GP --> rc = -2 */
583         ".previous\n"
584         _ASM_EXTABLE(1b, 3b)
585         : "=q" (rc)
586         : "0" (0), "a" (&addr)
587         : "memory");
588 
589     return rc;
590 }
591 
592 int vmx_guest_x86_mode(struct vcpu *v);
593 unsigned int vmx_get_cpl(void);
594 
595 void vmx_inject_extint(int trap, uint8_t source);
596 void vmx_inject_nmi(void);
597 
598 int ept_p2m_init(struct p2m_domain *p2m);
599 void ept_p2m_uninit(struct p2m_domain *p2m);
600 
601 void ept_walk_table(struct domain *d, unsigned long gfn);
602 bool_t ept_handle_misconfig(uint64_t gpa);
603 void setup_ept_dump(void);
604 void p2m_init_altp2m_ept(struct domain *d, unsigned int i);
605 /* Locate an alternate p2m by its EPTP */
606 unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp);
607 
608 void update_guest_eip(void);
609 
610 void vmx_pi_per_cpu_init(unsigned int cpu);
611 void vmx_pi_desc_fixup(unsigned int cpu);
612 
613 void vmx_sync_exit_bitmap(struct vcpu *v);
614 
615 #ifdef CONFIG_HVM
616 void vmx_pi_hooks_assign(struct domain *d);
617 void vmx_pi_hooks_deassign(struct domain *d);
618 #else
vmx_pi_hooks_assign(struct domain * d)619 static inline void vmx_pi_hooks_assign(struct domain *d) {}
vmx_pi_hooks_deassign(struct domain * d)620 static inline void vmx_pi_hooks_deassign(struct domain *d) {}
621 #endif
622 
623 #define APIC_INVALID_DEST           0xffffffff
624 
625 /* EPT violation qualifications definitions */
626 typedef union ept_qual {
627     unsigned long raw;
628     struct {
629         bool read:1, write:1, fetch:1,
630             eff_read:1, eff_write:1, eff_exec:1, /* eff_user_exec */:1,
631             gla_valid:1,
632             gla_fault:1; /* Valid iff gla_valid. */
633         unsigned long /* pad */:55;
634     };
635 } __transparent__ ept_qual_t;
636 
637 #define EPT_L4_PAGETABLE_SHIFT      39
638 #define EPT_PAGETABLE_ENTRIES       512
639 
640 /* #VE information page */
641 typedef struct {
642     u32 exit_reason;
643     u32 semaphore;
644     u64 exit_qualification;
645     u64 gla;
646     u64 gpa;
647     u16 eptp_index;
648 } ve_info_t;
649 
650 /* VM-Exit instruction info for LIDT, LGDT, SIDT, SGDT */
651 typedef union idt_or_gdt_instr_info {
652     unsigned long raw;
653     struct {
654         unsigned long scaling   :2,  /* bits 0:1 - Scaling */
655                                 :5,  /* bits 6:2 - Undefined */
656         addr_size               :3,  /* bits 9:7 - Address size */
657                                 :1,  /* bit 10 - Cleared to 0 */
658         operand_size            :1,  /* bit 11 - Operand size */
659                                 :3,  /* bits 14:12 - Undefined */
660         segment_reg             :3,  /* bits 17:15 - Segment register */
661         index_reg               :4,  /* bits 21:18 - Index register */
662         index_reg_invalid       :1,  /* bit 22 - Index register invalid */
663         base_reg                :4,  /* bits 26:23 - Base register */
664         base_reg_invalid        :1,  /* bit 27 - Base register invalid */
665         instr_identity          :1,  /* bit 28 - 0:GDT, 1:IDT */
666         instr_write             :1,  /* bit 29 - 0:store, 1:load */
667                                 :34; /* bits 30:63 - Undefined */
668     };
669 } idt_or_gdt_instr_info_t;
670 
671 /* VM-Exit instruction info for LLDT, LTR, SLDT, STR */
672 typedef union ldt_or_tr_instr_info {
673     unsigned long raw;
674     struct {
675         unsigned long scaling   :2,  /* bits 0:1 - Scaling */
676                                 :1,  /* bit 2 - Undefined */
677         reg1                    :4,  /* bits 6:3 - Reg1 */
678         addr_size               :3,  /* bits 9:7 - Address size */
679         mem_reg                 :1,  /* bit 10 - Mem/Reg */
680                                 :4,  /* bits 14:11 - Undefined */
681         segment_reg             :3,  /* bits 17:15 - Segment register */
682         index_reg               :4,  /* bits 21:18 - Index register */
683         index_reg_invalid       :1,  /* bit 22 - Index register invalid */
684         base_reg                :4,  /* bits 26:23 - Base register */
685         base_reg_invalid        :1,  /* bit 27 - Base register invalid */
686         instr_identity          :1,  /* bit 28 - 0:LDT, 1:TR */
687         instr_write             :1,  /* bit 29 - 0:store, 1:load */
688                                 :34; /* bits 31:63 - Undefined */
689     };
690 } ldt_or_tr_instr_info_t;
691 
692 #endif /* __ASM_X86_HVM_VMX_VMX_H__ */
693