1 /*
2  * vmcs.h: VMCS related definitions
3  * Copyright (c) 2004, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; If not, see <http://www.gnu.org/licenses/>.
16  *
17  */
18 #ifndef __ASM_X86_HVM_VMX_VMCS_H__
19 #define __ASM_X86_HVM_VMX_VMCS_H__
20 
21 #include <xen/mm.h>
22 
23 extern void vmcs_dump_vcpu(struct vcpu *v);
24 extern int vmx_vmcs_init(void);
25 extern int  vmx_cpu_up_prepare(unsigned int cpu);
26 extern void vmx_cpu_dead(unsigned int cpu);
27 extern int  vmx_cpu_up(void);
28 extern void vmx_cpu_down(void);
29 
30 struct vmcs_struct {
31     u32 vmcs_revision_id;
32     unsigned char data [0]; /* vmcs size is read from MSR */
33 };
34 
35 struct vmx_msr_entry {
36     u32 index;
37     u32 mbz;
38     u64 data;
39 };
40 
41 #define EPT_DEFAULT_MT      MTRR_TYPE_WRBACK
42 
43 struct ept_data {
44     union {
45         struct {
46             uint64_t mt:3,   /* Memory Type. */
47                      wl:3,   /* Walk length -1. */
48                      ad:1,   /* Enable EPT A/D bits. */
49                      :5,     /* rsvd. */
50                      mfn:52;
51         };
52         u64 eptp;
53     };
54     /* Set of PCPUs needing an INVEPT before a VMENTER. */
55     cpumask_var_t invalidate;
56 };
57 
58 #define _VMX_DOMAIN_PML_ENABLED    0
59 #define VMX_DOMAIN_PML_ENABLED     (1ul << _VMX_DOMAIN_PML_ENABLED)
60 struct vmx_domain {
61     mfn_t apic_access_mfn;
62     /* VMX_DOMAIN_* */
63     unsigned int status;
64 
65     /*
66      * Domain permitted to use Executable EPT Superpages?  Cleared to work
67      * around CVE-2018-12207 as appropriate.
68      */
69     bool exec_sp;
70 };
71 
72 /*
73  * Layout of the MSR bitmap, as interpreted by hardware:
74  *  - *_low  covers MSRs 0 to 0x1fff
75  *  - *_ligh covers MSRs 0xc0000000 to 0xc0001fff
76  */
77 struct vmx_msr_bitmap {
78     unsigned long read_low  [0x2000 / BITS_PER_LONG];
79     unsigned long read_high [0x2000 / BITS_PER_LONG];
80     unsigned long write_low [0x2000 / BITS_PER_LONG];
81     unsigned long write_high[0x2000 / BITS_PER_LONG];
82 };
83 
84 struct pi_desc {
85     DECLARE_BITMAP(pir, X86_NR_VECTORS);
86     union {
87         struct {
88             u16     on     : 1,  /* bit 256 - Outstanding Notification */
89                     sn     : 1,  /* bit 257 - Suppress Notification */
90                     rsvd_1 : 14; /* bit 271:258 - Reserved */
91             u8      nv;          /* bit 279:272 - Notification Vector */
92             u8      rsvd_2;      /* bit 287:280 - Reserved */
93             u32     ndst;        /* bit 319:288 - Notification Destination */
94         };
95         u64 control;
96     };
97     u32 rsvd[6];
98 } __attribute__ ((aligned (64)));
99 
100 #define NR_PML_ENTRIES   512
101 
102 struct pi_blocking_vcpu {
103     struct list_head     list;
104     spinlock_t           *lock;
105 };
106 
107 struct vmx_vcpu {
108     /* Physical address of VMCS. */
109     paddr_t              vmcs_pa;
110     /* VMCS shadow machine address. */
111     paddr_t              vmcs_shadow_maddr;
112 
113     /* Protects remote usage of VMCS (VMPTRLD/VMCLEAR). */
114     spinlock_t           vmcs_lock;
115 
116     /*
117      * Activation and launch status of this VMCS.
118      *  - Activated on a CPU by VMPTRLD. Deactivated by VMCLEAR.
119      *  - Launched on active CPU by VMLAUNCH when current VMCS.
120      */
121     struct list_head     active_list;
122     int                  active_cpu;
123     int                  launched;
124 
125     /* Cache of cpu execution control. */
126     u32                  exec_control;
127     u32                  secondary_exec_control;
128     u32                  exception_bitmap;
129 
130     uint64_t             shadow_gs;
131     uint64_t             star;
132     uint64_t             lstar;
133     uint64_t             cstar;
134     uint64_t             sfmask;
135 
136     struct vmx_msr_bitmap *msr_bitmap;
137 
138     /*
139      * Most accesses to the MSR host/guest load/save lists are in current
140      * context.  However, the data can be modified by toolstack/migration
141      * actions.  Remote access is only permitted for paused vcpus, and is
142      * protected under the domctl lock.
143      */
144     struct vmx_msr_entry *msr_area;
145     struct vmx_msr_entry *host_msr_area;
146     unsigned int         msr_load_count;
147     unsigned int         msr_save_count;
148     unsigned int         host_msr_count;
149 
150     unsigned long        eoi_exitmap_changed;
151     DECLARE_BITMAP(eoi_exit_bitmap, X86_NR_VECTORS);
152     struct pi_desc       pi_desc;
153 
154     unsigned long        host_cr0;
155 
156     /* Do we need to tolerate a spurious EPT_MISCONFIG VM exit? */
157     bool_t               ept_spurious_misconfig;
158 
159     /* Is the guest in real mode? */
160     uint8_t              vmx_realmode;
161     /* Are we emulating rather than VMENTERing? */
162     uint8_t              vmx_emulate;
163 
164     uint8_t              lbr_flags;
165 
166     /* Bitmask of segments that we can't safely use in virtual 8086 mode */
167     uint16_t             vm86_segment_mask;
168     /* Shadow CS, SS, DS, ES, FS, GS, TR while in virtual 8086 mode */
169     struct segment_register vm86_saved_seg[x86_seg_tr + 1];
170     /* Remember EFLAGS while in virtual 8086 mode */
171     uint32_t             vm86_saved_eflags;
172     int                  hostenv_migrated;
173 
174     /* Bitmap to control vmexit policy for Non-root VMREAD/VMWRITE */
175     struct page_info     *vmread_bitmap;
176     struct page_info     *vmwrite_bitmap;
177 
178     struct page_info     *pml_pg;
179 
180     /* Bitmask of trapped CR4 bits. */
181     unsigned long        cr4_host_mask;
182 
183     /*
184      * Before it is blocked, vCPU is added to the per-cpu list.
185      * VT-d engine can send wakeup notification event to the
186      * pCPU and wakeup the related vCPU.
187      */
188     struct pi_blocking_vcpu pi_blocking;
189 };
190 
191 int vmx_create_vmcs(struct vcpu *v);
192 void vmx_destroy_vmcs(struct vcpu *v);
193 void vmx_vmcs_enter(struct vcpu *v);
194 bool_t __must_check vmx_vmcs_try_enter(struct vcpu *v);
195 void vmx_vmcs_exit(struct vcpu *v);
196 void vmx_vmcs_reload(struct vcpu *v);
197 
198 #define CPU_BASED_VIRTUAL_INTR_PENDING        0x00000004
199 #define CPU_BASED_USE_TSC_OFFSETING           0x00000008
200 #define CPU_BASED_HLT_EXITING                 0x00000080
201 #define CPU_BASED_INVLPG_EXITING              0x00000200
202 #define CPU_BASED_MWAIT_EXITING               0x00000400
203 #define CPU_BASED_RDPMC_EXITING               0x00000800
204 #define CPU_BASED_RDTSC_EXITING               0x00001000
205 #define CPU_BASED_CR3_LOAD_EXITING            0x00008000
206 #define CPU_BASED_CR3_STORE_EXITING           0x00010000
207 #define CPU_BASED_CR8_LOAD_EXITING            0x00080000
208 #define CPU_BASED_CR8_STORE_EXITING           0x00100000
209 #define CPU_BASED_TPR_SHADOW                  0x00200000
210 #define CPU_BASED_VIRTUAL_NMI_PENDING         0x00400000
211 #define CPU_BASED_MOV_DR_EXITING              0x00800000
212 #define CPU_BASED_UNCOND_IO_EXITING           0x01000000
213 #define CPU_BASED_ACTIVATE_IO_BITMAP          0x02000000
214 #define CPU_BASED_MONITOR_TRAP_FLAG           0x08000000
215 #define CPU_BASED_ACTIVATE_MSR_BITMAP         0x10000000
216 #define CPU_BASED_MONITOR_EXITING             0x20000000
217 #define CPU_BASED_PAUSE_EXITING               0x40000000
218 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
219 extern u32 vmx_cpu_based_exec_control;
220 
221 #define PIN_BASED_EXT_INTR_MASK         0x00000001
222 #define PIN_BASED_NMI_EXITING           0x00000008
223 #define PIN_BASED_VIRTUAL_NMIS          0x00000020
224 #define PIN_BASED_PREEMPT_TIMER         0x00000040
225 #define PIN_BASED_POSTED_INTERRUPT      0x00000080
226 extern u32 vmx_pin_based_exec_control;
227 
228 #define VM_EXIT_SAVE_DEBUG_CNTRLS       0x00000004
229 #define VM_EXIT_IA32E_MODE              0x00000200
230 #define VM_EXIT_LOAD_PERF_GLOBAL_CTRL   0x00001000
231 #define VM_EXIT_ACK_INTR_ON_EXIT        0x00008000
232 #define VM_EXIT_SAVE_GUEST_PAT          0x00040000
233 #define VM_EXIT_LOAD_HOST_PAT           0x00080000
234 #define VM_EXIT_SAVE_GUEST_EFER         0x00100000
235 #define VM_EXIT_LOAD_HOST_EFER          0x00200000
236 #define VM_EXIT_SAVE_PREEMPT_TIMER      0x00400000
237 #define VM_EXIT_CLEAR_BNDCFGS           0x00800000
238 extern u32 vmx_vmexit_control;
239 
240 #define VM_ENTRY_IA32E_MODE             0x00000200
241 #define VM_ENTRY_SMM                    0x00000400
242 #define VM_ENTRY_DEACT_DUAL_MONITOR     0x00000800
243 #define VM_ENTRY_LOAD_PERF_GLOBAL_CTRL  0x00002000
244 #define VM_ENTRY_LOAD_GUEST_PAT         0x00004000
245 #define VM_ENTRY_LOAD_GUEST_EFER        0x00008000
246 #define VM_ENTRY_LOAD_BNDCFGS           0x00010000
247 extern u32 vmx_vmentry_control;
248 
249 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
250 #define SECONDARY_EXEC_ENABLE_EPT               0x00000002
251 #define SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING 0x00000004
252 #define SECONDARY_EXEC_ENABLE_RDTSCP            0x00000008
253 #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE   0x00000010
254 #define SECONDARY_EXEC_ENABLE_VPID              0x00000020
255 #define SECONDARY_EXEC_WBINVD_EXITING           0x00000040
256 #define SECONDARY_EXEC_UNRESTRICTED_GUEST       0x00000080
257 #define SECONDARY_EXEC_APIC_REGISTER_VIRT       0x00000100
258 #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY    0x00000200
259 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING       0x00000400
260 #define SECONDARY_EXEC_ENABLE_INVPCID           0x00001000
261 #define SECONDARY_EXEC_ENABLE_VM_FUNCTIONS      0x00002000
262 #define SECONDARY_EXEC_ENABLE_VMCS_SHADOWING    0x00004000
263 #define SECONDARY_EXEC_ENABLE_PML               0x00020000
264 #define SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS   0x00040000
265 #define SECONDARY_EXEC_XSAVES                   0x00100000
266 #define SECONDARY_EXEC_TSC_SCALING              0x02000000
267 extern u32 vmx_secondary_exec_control;
268 
269 #define VMX_EPT_EXEC_ONLY_SUPPORTED                         0x00000001
270 #define VMX_EPT_WALK_LENGTH_4_SUPPORTED                     0x00000040
271 #define VMX_EPT_MEMORY_TYPE_UC                              0x00000100
272 #define VMX_EPT_MEMORY_TYPE_WB                              0x00004000
273 #define VMX_EPT_SUPERPAGE_2MB                               0x00010000
274 #define VMX_EPT_SUPERPAGE_1GB                               0x00020000
275 #define VMX_EPT_INVEPT_INSTRUCTION                          0x00100000
276 #define VMX_EPT_AD_BIT                                      0x00200000
277 #define VMX_EPT_INVEPT_SINGLE_CONTEXT                       0x02000000
278 #define VMX_EPT_INVEPT_ALL_CONTEXT                          0x04000000
279 #define VMX_VPID_INVVPID_INSTRUCTION                     0x00100000000ULL
280 #define VMX_VPID_INVVPID_INDIVIDUAL_ADDR                 0x10000000000ULL
281 #define VMX_VPID_INVVPID_SINGLE_CONTEXT                  0x20000000000ULL
282 #define VMX_VPID_INVVPID_ALL_CONTEXT                     0x40000000000ULL
283 #define VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 0x80000000000ULL
284 extern u64 vmx_ept_vpid_cap;
285 
286 #define VMX_MISC_CR3_TARGET                     0x01ff0000
287 #define VMX_MISC_VMWRITE_ALL                    0x20000000
288 
289 #define VMX_TSC_MULTIPLIER_MAX                  0xffffffffffffffffULL
290 
291 #define cpu_has_wbinvd_exiting \
292     (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING)
293 #define cpu_has_vmx_virtualize_apic_accesses \
294     (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
295 #define cpu_has_vmx_tpr_shadow \
296     (vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)
297 #define cpu_has_vmx_vnmi \
298     (vmx_pin_based_exec_control & PIN_BASED_VIRTUAL_NMIS)
299 #define cpu_has_vmx_msr_bitmap \
300     (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP)
301 #define cpu_has_vmx_secondary_exec_control \
302     (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
303 #define cpu_has_vmx_ept \
304     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)
305 #define cpu_has_vmx_dt_exiting \
306     (vmx_secondary_exec_control & SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING)
307 #define cpu_has_vmx_vpid \
308     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
309 #define cpu_has_monitor_trap_flag \
310     (vmx_cpu_based_exec_control & CPU_BASED_MONITOR_TRAP_FLAG)
311 #define cpu_has_vmx_pat \
312     (vmx_vmentry_control & VM_ENTRY_LOAD_GUEST_PAT)
313 #define cpu_has_vmx_efer \
314     (vmx_vmentry_control & VM_ENTRY_LOAD_GUEST_EFER)
315 #define cpu_has_vmx_unrestricted_guest \
316     (vmx_secondary_exec_control & SECONDARY_EXEC_UNRESTRICTED_GUEST)
317 #define vmx_unrestricted_guest(v)               \
318     ((v)->arch.hvm.vmx.secondary_exec_control & \
319      SECONDARY_EXEC_UNRESTRICTED_GUEST)
320 #define cpu_has_vmx_ple \
321     (vmx_secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
322 #define cpu_has_vmx_apic_reg_virt \
323     (vmx_secondary_exec_control & SECONDARY_EXEC_APIC_REGISTER_VIRT)
324 #define cpu_has_vmx_virtual_intr_delivery \
325     (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
326 #define cpu_has_vmx_virtualize_x2apic_mode \
327     (vmx_secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)
328 #define cpu_has_vmx_posted_intr_processing \
329     (vmx_pin_based_exec_control & PIN_BASED_POSTED_INTERRUPT)
330 #define cpu_has_vmx_vmcs_shadowing \
331     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VMCS_SHADOWING)
332 #define cpu_has_vmx_vmfunc \
333     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VM_FUNCTIONS)
334 #define cpu_has_vmx_virt_exceptions \
335     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS)
336 #define cpu_has_vmx_pml \
337     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_PML)
338 #define cpu_has_vmx_mpx \
339     ((vmx_vmexit_control & VM_EXIT_CLEAR_BNDCFGS) && \
340      (vmx_vmentry_control & VM_ENTRY_LOAD_BNDCFGS))
341 #define cpu_has_vmx_xsaves \
342     (vmx_secondary_exec_control & SECONDARY_EXEC_XSAVES)
343 #define cpu_has_vmx_tsc_scaling \
344     (vmx_secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
345 
346 #define VMCS_RID_TYPE_MASK              0x80000000
347 
348 /* GUEST_INTERRUPTIBILITY_INFO flags. */
349 #define VMX_INTR_SHADOW_STI             0x00000001
350 #define VMX_INTR_SHADOW_MOV_SS          0x00000002
351 #define VMX_INTR_SHADOW_SMI             0x00000004
352 #define VMX_INTR_SHADOW_NMI             0x00000008
353 
354 #define VMX_BASIC_REVISION_MASK         0x7fffffff
355 #define VMX_BASIC_VMCS_SIZE_MASK        (0x1fffULL << 32)
356 #define VMX_BASIC_32BIT_ADDRESSES       (1ULL << 48)
357 #define VMX_BASIC_DUAL_MONITOR          (1ULL << 49)
358 #define VMX_BASIC_MEMORY_TYPE_MASK      (0xfULL << 50)
359 #define VMX_BASIC_INS_OUT_INFO          (1ULL << 54)
360 /*
361  * bit 55 of IA32_VMX_BASIC MSR, indicating whether any VMX controls that
362  * default to 1 may be cleared to 0.
363  */
364 #define VMX_BASIC_DEFAULT1_ZERO		(1ULL << 55)
365 
366 extern u64 vmx_basic_msr;
367 #define cpu_has_vmx_ins_outs_instr_info \
368     (!!(vmx_basic_msr & VMX_BASIC_INS_OUT_INFO))
369 
370 /* Guest interrupt status */
371 #define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK  0x0FF
372 #define VMX_GUEST_INTR_STATUS_SVI_OFFSET        8
373 
374 /* VMFUNC leaf definitions */
375 #define VMX_VMFUNC_EPTP_SWITCHING   (1ULL << 0)
376 
377 /* VMCS field encodings. */
378 #define VMCS_HIGH(x) ((x) | 1)
379 enum vmcs_field {
380     VIRTUAL_PROCESSOR_ID            = 0x00000000,
381     POSTED_INTR_NOTIFICATION_VECTOR = 0x00000002,
382     EPTP_INDEX                      = 0x00000004,
383 #define GUEST_SEG_SELECTOR(sel) (GUEST_ES_SELECTOR + (sel) * 2) /* ES ... GS */
384     GUEST_ES_SELECTOR               = 0x00000800,
385     GUEST_CS_SELECTOR               = 0x00000802,
386     GUEST_SS_SELECTOR               = 0x00000804,
387     GUEST_DS_SELECTOR               = 0x00000806,
388     GUEST_FS_SELECTOR               = 0x00000808,
389     GUEST_GS_SELECTOR               = 0x0000080a,
390     GUEST_LDTR_SELECTOR             = 0x0000080c,
391     GUEST_TR_SELECTOR               = 0x0000080e,
392     GUEST_INTR_STATUS               = 0x00000810,
393     GUEST_PML_INDEX                 = 0x00000812,
394     HOST_ES_SELECTOR                = 0x00000c00,
395     HOST_CS_SELECTOR                = 0x00000c02,
396     HOST_SS_SELECTOR                = 0x00000c04,
397     HOST_DS_SELECTOR                = 0x00000c06,
398     HOST_FS_SELECTOR                = 0x00000c08,
399     HOST_GS_SELECTOR                = 0x00000c0a,
400     HOST_TR_SELECTOR                = 0x00000c0c,
401     IO_BITMAP_A                     = 0x00002000,
402     IO_BITMAP_B                     = 0x00002002,
403     MSR_BITMAP                      = 0x00002004,
404     VM_EXIT_MSR_STORE_ADDR          = 0x00002006,
405     VM_EXIT_MSR_LOAD_ADDR           = 0x00002008,
406     VM_ENTRY_MSR_LOAD_ADDR          = 0x0000200a,
407     PML_ADDRESS                     = 0x0000200e,
408     TSC_OFFSET                      = 0x00002010,
409     VIRTUAL_APIC_PAGE_ADDR          = 0x00002012,
410     APIC_ACCESS_ADDR                = 0x00002014,
411     PI_DESC_ADDR                    = 0x00002016,
412     VM_FUNCTION_CONTROL             = 0x00002018,
413     EPT_POINTER                     = 0x0000201a,
414     EOI_EXIT_BITMAP0                = 0x0000201c,
415 #define EOI_EXIT_BITMAP(n) (EOI_EXIT_BITMAP0 + (n) * 2) /* n = 0...3 */
416     EPTP_LIST_ADDR                  = 0x00002024,
417     VMREAD_BITMAP                   = 0x00002026,
418     VMWRITE_BITMAP                  = 0x00002028,
419     VIRT_EXCEPTION_INFO             = 0x0000202a,
420     XSS_EXIT_BITMAP                 = 0x0000202c,
421     TSC_MULTIPLIER                  = 0x00002032,
422     GUEST_PHYSICAL_ADDRESS          = 0x00002400,
423     VMCS_LINK_POINTER               = 0x00002800,
424     GUEST_IA32_DEBUGCTL             = 0x00002802,
425     GUEST_PAT                       = 0x00002804,
426     GUEST_EFER                      = 0x00002806,
427     GUEST_PERF_GLOBAL_CTRL          = 0x00002808,
428     GUEST_PDPTE0                    = 0x0000280a,
429 #define GUEST_PDPTE(n) (GUEST_PDPTE0 + (n) * 2) /* n = 0...3 */
430     GUEST_BNDCFGS                   = 0x00002812,
431     HOST_PAT                        = 0x00002c00,
432     HOST_EFER                       = 0x00002c02,
433     HOST_PERF_GLOBAL_CTRL           = 0x00002c04,
434     PIN_BASED_VM_EXEC_CONTROL       = 0x00004000,
435     CPU_BASED_VM_EXEC_CONTROL       = 0x00004002,
436     EXCEPTION_BITMAP                = 0x00004004,
437     PAGE_FAULT_ERROR_CODE_MASK      = 0x00004006,
438     PAGE_FAULT_ERROR_CODE_MATCH     = 0x00004008,
439     CR3_TARGET_COUNT                = 0x0000400a,
440     VM_EXIT_CONTROLS                = 0x0000400c,
441     VM_EXIT_MSR_STORE_COUNT         = 0x0000400e,
442     VM_EXIT_MSR_LOAD_COUNT          = 0x00004010,
443     VM_ENTRY_CONTROLS               = 0x00004012,
444     VM_ENTRY_MSR_LOAD_COUNT         = 0x00004014,
445     VM_ENTRY_INTR_INFO              = 0x00004016,
446     VM_ENTRY_EXCEPTION_ERROR_CODE   = 0x00004018,
447     VM_ENTRY_INSTRUCTION_LEN        = 0x0000401a,
448     TPR_THRESHOLD                   = 0x0000401c,
449     SECONDARY_VM_EXEC_CONTROL       = 0x0000401e,
450     PLE_GAP                         = 0x00004020,
451     PLE_WINDOW                      = 0x00004022,
452     VM_INSTRUCTION_ERROR            = 0x00004400,
453     VM_EXIT_REASON                  = 0x00004402,
454     VM_EXIT_INTR_INFO               = 0x00004404,
455     VM_EXIT_INTR_ERROR_CODE         = 0x00004406,
456     IDT_VECTORING_INFO              = 0x00004408,
457     IDT_VECTORING_ERROR_CODE        = 0x0000440a,
458     VM_EXIT_INSTRUCTION_LEN         = 0x0000440c,
459     VMX_INSTRUCTION_INFO            = 0x0000440e,
460 #define GUEST_SEG_LIMIT(sel) (GUEST_ES_LIMIT + (sel) * 2) /* ES ... GS */
461     GUEST_ES_LIMIT                  = 0x00004800,
462     GUEST_CS_LIMIT                  = 0x00004802,
463     GUEST_SS_LIMIT                  = 0x00004804,
464     GUEST_DS_LIMIT                  = 0x00004806,
465     GUEST_FS_LIMIT                  = 0x00004808,
466     GUEST_GS_LIMIT                  = 0x0000480a,
467     GUEST_LDTR_LIMIT                = 0x0000480c,
468     GUEST_TR_LIMIT                  = 0x0000480e,
469     GUEST_GDTR_LIMIT                = 0x00004810,
470     GUEST_IDTR_LIMIT                = 0x00004812,
471 #define GUEST_SEG_AR_BYTES(sel) (GUEST_ES_AR_BYTES + (sel) * 2) /* ES ... GS */
472     GUEST_ES_AR_BYTES               = 0x00004814,
473     GUEST_CS_AR_BYTES               = 0x00004816,
474     GUEST_SS_AR_BYTES               = 0x00004818,
475     GUEST_DS_AR_BYTES               = 0x0000481a,
476     GUEST_FS_AR_BYTES               = 0x0000481c,
477     GUEST_GS_AR_BYTES               = 0x0000481e,
478     GUEST_LDTR_AR_BYTES             = 0x00004820,
479     GUEST_TR_AR_BYTES               = 0x00004822,
480     GUEST_INTERRUPTIBILITY_INFO     = 0x00004824,
481     GUEST_ACTIVITY_STATE            = 0x00004826,
482     GUEST_SMBASE                    = 0x00004828,
483     GUEST_SYSENTER_CS               = 0x0000482a,
484     GUEST_PREEMPTION_TIMER          = 0x0000482e,
485     HOST_SYSENTER_CS                = 0x00004c00,
486     CR0_GUEST_HOST_MASK             = 0x00006000,
487     CR4_GUEST_HOST_MASK             = 0x00006002,
488     CR0_READ_SHADOW                 = 0x00006004,
489     CR4_READ_SHADOW                 = 0x00006006,
490     CR3_TARGET_VALUE0               = 0x00006008,
491 #define CR3_TARGET_VALUE(n) (CR3_TARGET_VALUE0 + (n) * 2) /* n < CR3_TARGET_COUNT */
492     EXIT_QUALIFICATION              = 0x00006400,
493     GUEST_LINEAR_ADDRESS            = 0x0000640a,
494     GUEST_CR0                       = 0x00006800,
495     GUEST_CR3                       = 0x00006802,
496     GUEST_CR4                       = 0x00006804,
497 #define GUEST_SEG_BASE(sel) (GUEST_ES_BASE + (sel) * 2) /* ES ... GS */
498     GUEST_ES_BASE                   = 0x00006806,
499     GUEST_CS_BASE                   = 0x00006808,
500     GUEST_SS_BASE                   = 0x0000680a,
501     GUEST_DS_BASE                   = 0x0000680c,
502     GUEST_FS_BASE                   = 0x0000680e,
503     GUEST_GS_BASE                   = 0x00006810,
504     GUEST_LDTR_BASE                 = 0x00006812,
505     GUEST_TR_BASE                   = 0x00006814,
506     GUEST_GDTR_BASE                 = 0x00006816,
507     GUEST_IDTR_BASE                 = 0x00006818,
508     GUEST_DR7                       = 0x0000681a,
509     GUEST_RSP                       = 0x0000681c,
510     GUEST_RIP                       = 0x0000681e,
511     GUEST_RFLAGS                    = 0x00006820,
512     GUEST_PENDING_DBG_EXCEPTIONS    = 0x00006822,
513     GUEST_SYSENTER_ESP              = 0x00006824,
514     GUEST_SYSENTER_EIP              = 0x00006826,
515     HOST_CR0                        = 0x00006c00,
516     HOST_CR3                        = 0x00006c02,
517     HOST_CR4                        = 0x00006c04,
518     HOST_FS_BASE                    = 0x00006c06,
519     HOST_GS_BASE                    = 0x00006c08,
520     HOST_TR_BASE                    = 0x00006c0a,
521     HOST_GDTR_BASE                  = 0x00006c0c,
522     HOST_IDTR_BASE                  = 0x00006c0e,
523     HOST_SYSENTER_ESP               = 0x00006c10,
524     HOST_SYSENTER_EIP               = 0x00006c12,
525     HOST_RSP                        = 0x00006c14,
526     HOST_RIP                        = 0x00006c16,
527 };
528 
529 #define VMCS_VPID_WIDTH 16
530 
531 /* VM Instruction error numbers */
532 enum vmx_insn_errno
533 {
534     VMX_INSN_SUCCEED                       = 0,
535     VMX_INSN_VMCLEAR_INVALID_PHYADDR       = 2,
536     VMX_INSN_VMCLEAR_WITH_VMXON_PTR        = 3,
537     VMX_INSN_VMLAUNCH_NONCLEAR_VMCS        = 4,
538     VMX_INSN_VMRESUME_NONLAUNCHED_VMCS     = 5,
539     VMX_INSN_INVALID_CONTROL_STATE         = 7,
540     VMX_INSN_INVALID_HOST_STATE            = 8,
541     VMX_INSN_VMPTRLD_INVALID_PHYADDR       = 9,
542     VMX_INSN_VMPTRLD_WITH_VMXON_PTR        = 10,
543     VMX_INSN_VMPTRLD_INCORRECT_VMCS_ID     = 11,
544     VMX_INSN_UNSUPPORTED_VMCS_COMPONENT    = 12,
545     VMX_INSN_VMXON_IN_VMX_ROOT             = 15,
546     VMX_INSN_VMENTRY_BLOCKED_BY_MOV_SS     = 26,
547     VMX_INSN_INVEPT_INVVPID_INVALID_OP     = 28,
548     VMX_INSN_FAIL_INVALID                  = ~0,
549 };
550 
551 /* MSR load/save list infrastructure. */
552 enum vmx_msr_list_type {
553     VMX_MSR_HOST,           /* MSRs loaded on VMExit.                   */
554     VMX_MSR_GUEST,          /* MSRs saved on VMExit, loaded on VMEntry. */
555     VMX_MSR_GUEST_LOADONLY, /* MSRs loaded on VMEntry only.             */
556 };
557 
558 /**
559  * Add an MSR to an MSR list (inserting space for the entry if necessary), and
560  * set the MSRs value.
561  *
562  * It is undefined behaviour to try and insert the same MSR into both the
563  * GUEST and GUEST_LOADONLY list.
564  *
565  * May fail if unable to allocate memory for the list, or the total number of
566  * entries exceeds the memory allocated.
567  */
568 int vmx_add_msr(struct vcpu *v, uint32_t msr, uint64_t val,
569                 enum vmx_msr_list_type type);
570 
571 /**
572  * Remove an MSR entry from an MSR list.  Returns -ESRCH if the MSR was not
573  * found in the list.
574  */
575 int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type);
576 
vmx_add_guest_msr(struct vcpu * v,uint32_t msr,uint64_t val)577 static inline int vmx_add_guest_msr(struct vcpu *v, uint32_t msr, uint64_t val)
578 {
579     return vmx_add_msr(v, msr, val, VMX_MSR_GUEST);
580 }
vmx_add_host_load_msr(struct vcpu * v,uint32_t msr,uint64_t val)581 static inline int vmx_add_host_load_msr(struct vcpu *v, uint32_t msr,
582                                         uint64_t val)
583 {
584     return vmx_add_msr(v, msr, val, VMX_MSR_HOST);
585 }
586 
587 struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, uint32_t msr,
588                                    enum vmx_msr_list_type type);
589 
vmx_read_guest_msr(const struct vcpu * v,uint32_t msr,uint64_t * val)590 static inline int vmx_read_guest_msr(const struct vcpu *v, uint32_t msr,
591                                      uint64_t *val)
592 {
593     const struct vmx_msr_entry *ent = vmx_find_msr(v, msr, VMX_MSR_GUEST);
594 
595     if ( !ent )
596     {
597         *val = 0;
598         return -ESRCH;
599     }
600 
601     *val = ent->data;
602 
603     return 0;
604 }
605 
vmx_read_guest_loadonly_msr(const struct vcpu * v,uint32_t msr,uint64_t * val)606 static inline int vmx_read_guest_loadonly_msr(
607     const struct vcpu *v, uint32_t msr, uint64_t *val)
608 {
609     const struct vmx_msr_entry *ent =
610         vmx_find_msr(v, msr, VMX_MSR_GUEST_LOADONLY);
611 
612     if ( !ent )
613     {
614         *val = 0;
615         return -ESRCH;
616     }
617 
618     *val = ent->data;
619 
620     return 0;
621 }
622 
vmx_write_guest_msr(struct vcpu * v,uint32_t msr,uint64_t val)623 static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr,
624                                       uint64_t val)
625 {
626     struct vmx_msr_entry *ent = vmx_find_msr(v, msr, VMX_MSR_GUEST);
627 
628     if ( !ent )
629         return -ESRCH;
630 
631     ent->data = val;
632 
633     return 0;
634 }
635 
636 
637 /* MSR intercept bitmap infrastructure. */
638 enum vmx_msr_intercept_type {
639     VMX_MSR_R  = 1,
640     VMX_MSR_W  = 2,
641     VMX_MSR_RW = VMX_MSR_R | VMX_MSR_W,
642 };
643 
644 void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
645                              enum vmx_msr_intercept_type type);
646 void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
647                            enum vmx_msr_intercept_type type);
648 void vmx_vmcs_switch(paddr_t from, paddr_t to);
649 void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
650 void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);
651 bool vmx_msr_is_intercepted(struct vmx_msr_bitmap *msr_bitmap,
652                             unsigned int msr, bool is_write) __nonnull(1);
653 void virtual_vmcs_enter(const struct vcpu *);
654 void virtual_vmcs_exit(const struct vcpu *);
655 u64 virtual_vmcs_vmread(const struct vcpu *, u32 encoding);
656 enum vmx_insn_errno virtual_vmcs_vmread_safe(const struct vcpu *v,
657                                              u32 vmcs_encoding, u64 *val);
658 void virtual_vmcs_vmwrite(const struct vcpu *, u32 encoding, u64 val);
659 enum vmx_insn_errno virtual_vmcs_vmwrite_safe(const struct vcpu *v,
660                                               u32 vmcs_encoding, u64 val);
661 
662 DECLARE_PER_CPU(bool_t, vmxon);
663 
664 bool_t vmx_vcpu_pml_enabled(const struct vcpu *v);
665 int vmx_vcpu_enable_pml(struct vcpu *v);
666 void vmx_vcpu_disable_pml(struct vcpu *v);
667 void vmx_vcpu_flush_pml_buffer(struct vcpu *v);
668 bool_t vmx_domain_pml_enabled(const struct domain *d);
669 int vmx_domain_enable_pml(struct domain *d);
670 void vmx_domain_disable_pml(struct domain *d);
671 void vmx_domain_flush_pml_buffers(struct domain *d);
672 
673 void vmx_domain_update_eptp(struct domain *d);
674 
675 #endif /* ASM_X86_HVM_VMX_VMCS_H__ */
676 
677 /*
678  * Local variables:
679  * mode: C
680  * c-file-style: "BSD"
681  * c-basic-offset: 4
682  * tab-width: 4
683  * indent-tabs-mode: nil
684  * End:
685  */
686