1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * tools/testing/selftests/kvm/include/x86_64/vmx.h
4  *
5  * Copyright (C) 2018, Google LLC.
6  */
7 
8 #ifndef SELFTEST_KVM_VMX_H
9 #define SELFTEST_KVM_VMX_H
10 
11 #include <stdint.h>
12 #include "processor.h"
13 #include "apic.h"
14 
15 /*
16  * Definitions of Primary Processor-Based VM-Execution Controls.
17  */
18 #define CPU_BASED_INTR_WINDOW_EXITING		0x00000004
19 #define CPU_BASED_USE_TSC_OFFSETTING		0x00000008
20 #define CPU_BASED_HLT_EXITING			0x00000080
21 #define CPU_BASED_INVLPG_EXITING		0x00000200
22 #define CPU_BASED_MWAIT_EXITING			0x00000400
23 #define CPU_BASED_RDPMC_EXITING			0x00000800
24 #define CPU_BASED_RDTSC_EXITING			0x00001000
25 #define CPU_BASED_CR3_LOAD_EXITING		0x00008000
26 #define CPU_BASED_CR3_STORE_EXITING		0x00010000
27 #define CPU_BASED_CR8_LOAD_EXITING		0x00080000
28 #define CPU_BASED_CR8_STORE_EXITING		0x00100000
29 #define CPU_BASED_TPR_SHADOW			0x00200000
30 #define CPU_BASED_NMI_WINDOW_EXITING		0x00400000
31 #define CPU_BASED_MOV_DR_EXITING		0x00800000
32 #define CPU_BASED_UNCOND_IO_EXITING		0x01000000
33 #define CPU_BASED_USE_IO_BITMAPS		0x02000000
34 #define CPU_BASED_MONITOR_TRAP			0x08000000
35 #define CPU_BASED_USE_MSR_BITMAPS		0x10000000
36 #define CPU_BASED_MONITOR_EXITING		0x20000000
37 #define CPU_BASED_PAUSE_EXITING			0x40000000
38 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS	0x80000000
39 
40 #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR	0x0401e172
41 
42 /*
43  * Definitions of Secondary Processor-Based VM-Execution Controls.
44  */
45 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
46 #define SECONDARY_EXEC_ENABLE_EPT		0x00000002
47 #define SECONDARY_EXEC_DESC			0x00000004
48 #define SECONDARY_EXEC_ENABLE_RDTSCP		0x00000008
49 #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE	0x00000010
50 #define SECONDARY_EXEC_ENABLE_VPID		0x00000020
51 #define SECONDARY_EXEC_WBINVD_EXITING		0x00000040
52 #define SECONDARY_EXEC_UNRESTRICTED_GUEST	0x00000080
53 #define SECONDARY_EXEC_APIC_REGISTER_VIRT	0x00000100
54 #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY	0x00000200
55 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING	0x00000400
56 #define SECONDARY_EXEC_RDRAND_EXITING		0x00000800
57 #define SECONDARY_EXEC_ENABLE_INVPCID		0x00001000
58 #define SECONDARY_EXEC_ENABLE_VMFUNC		0x00002000
59 #define SECONDARY_EXEC_SHADOW_VMCS		0x00004000
60 #define SECONDARY_EXEC_RDSEED_EXITING		0x00010000
61 #define SECONDARY_EXEC_ENABLE_PML		0x00020000
62 #define SECONDARY_EPT_VE			0x00040000
63 #define SECONDARY_ENABLE_XSAV_RESTORE		0x00100000
64 #define SECONDARY_EXEC_TSC_SCALING		0x02000000
65 
66 #define PIN_BASED_EXT_INTR_MASK			0x00000001
67 #define PIN_BASED_NMI_EXITING			0x00000008
68 #define PIN_BASED_VIRTUAL_NMIS			0x00000020
69 #define PIN_BASED_VMX_PREEMPTION_TIMER		0x00000040
70 #define PIN_BASED_POSTED_INTR			0x00000080
71 
72 #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR	0x00000016
73 
74 #define VM_EXIT_SAVE_DEBUG_CONTROLS		0x00000004
75 #define VM_EXIT_HOST_ADDR_SPACE_SIZE		0x00000200
76 #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL	0x00001000
77 #define VM_EXIT_ACK_INTR_ON_EXIT		0x00008000
78 #define VM_EXIT_SAVE_IA32_PAT			0x00040000
79 #define VM_EXIT_LOAD_IA32_PAT			0x00080000
80 #define VM_EXIT_SAVE_IA32_EFER			0x00100000
81 #define VM_EXIT_LOAD_IA32_EFER			0x00200000
82 #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER	0x00400000
83 
84 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR	0x00036dff
85 
86 #define VM_ENTRY_LOAD_DEBUG_CONTROLS		0x00000004
87 #define VM_ENTRY_IA32E_MODE			0x00000200
88 #define VM_ENTRY_SMM				0x00000400
89 #define VM_ENTRY_DEACT_DUAL_MONITOR		0x00000800
90 #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL	0x00002000
91 #define VM_ENTRY_LOAD_IA32_PAT			0x00004000
92 #define VM_ENTRY_LOAD_IA32_EFER			0x00008000
93 
94 #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR	0x000011ff
95 
96 #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK	0x0000001f
97 #define VMX_MISC_SAVE_EFER_LMA			0x00000020
98 
99 #define EXIT_REASON_FAILED_VMENTRY	0x80000000
100 #define EXIT_REASON_EXCEPTION_NMI	0
101 #define EXIT_REASON_EXTERNAL_INTERRUPT	1
102 #define EXIT_REASON_TRIPLE_FAULT	2
103 #define EXIT_REASON_INTERRUPT_WINDOW	7
104 #define EXIT_REASON_NMI_WINDOW		8
105 #define EXIT_REASON_TASK_SWITCH		9
106 #define EXIT_REASON_CPUID		10
107 #define EXIT_REASON_HLT			12
108 #define EXIT_REASON_INVD		13
109 #define EXIT_REASON_INVLPG		14
110 #define EXIT_REASON_RDPMC		15
111 #define EXIT_REASON_RDTSC		16
112 #define EXIT_REASON_VMCALL		18
113 #define EXIT_REASON_VMCLEAR		19
114 #define EXIT_REASON_VMLAUNCH		20
115 #define EXIT_REASON_VMPTRLD		21
116 #define EXIT_REASON_VMPTRST		22
117 #define EXIT_REASON_VMREAD		23
118 #define EXIT_REASON_VMRESUME		24
119 #define EXIT_REASON_VMWRITE		25
120 #define EXIT_REASON_VMOFF		26
121 #define EXIT_REASON_VMON		27
122 #define EXIT_REASON_CR_ACCESS		28
123 #define EXIT_REASON_DR_ACCESS		29
124 #define EXIT_REASON_IO_INSTRUCTION	30
125 #define EXIT_REASON_MSR_READ		31
126 #define EXIT_REASON_MSR_WRITE		32
127 #define EXIT_REASON_INVALID_STATE	33
128 #define EXIT_REASON_MWAIT_INSTRUCTION	36
129 #define EXIT_REASON_MONITOR_INSTRUCTION 39
130 #define EXIT_REASON_PAUSE_INSTRUCTION	40
131 #define EXIT_REASON_MCE_DURING_VMENTRY	41
132 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
133 #define EXIT_REASON_APIC_ACCESS		44
134 #define EXIT_REASON_EOI_INDUCED		45
135 #define EXIT_REASON_EPT_VIOLATION	48
136 #define EXIT_REASON_EPT_MISCONFIG	49
137 #define EXIT_REASON_INVEPT		50
138 #define EXIT_REASON_RDTSCP		51
139 #define EXIT_REASON_PREEMPTION_TIMER	52
140 #define EXIT_REASON_INVVPID		53
141 #define EXIT_REASON_WBINVD		54
142 #define EXIT_REASON_XSETBV		55
143 #define EXIT_REASON_APIC_WRITE		56
144 #define EXIT_REASON_INVPCID		58
145 #define EXIT_REASON_PML_FULL		62
146 #define EXIT_REASON_XSAVES		63
147 #define EXIT_REASON_XRSTORS		64
148 #define LAST_EXIT_REASON		64
149 
150 enum vmcs_field {
151 	VIRTUAL_PROCESSOR_ID		= 0x00000000,
152 	POSTED_INTR_NV			= 0x00000002,
153 	GUEST_ES_SELECTOR		= 0x00000800,
154 	GUEST_CS_SELECTOR		= 0x00000802,
155 	GUEST_SS_SELECTOR		= 0x00000804,
156 	GUEST_DS_SELECTOR		= 0x00000806,
157 	GUEST_FS_SELECTOR		= 0x00000808,
158 	GUEST_GS_SELECTOR		= 0x0000080a,
159 	GUEST_LDTR_SELECTOR		= 0x0000080c,
160 	GUEST_TR_SELECTOR		= 0x0000080e,
161 	GUEST_INTR_STATUS		= 0x00000810,
162 	GUEST_PML_INDEX			= 0x00000812,
163 	HOST_ES_SELECTOR		= 0x00000c00,
164 	HOST_CS_SELECTOR		= 0x00000c02,
165 	HOST_SS_SELECTOR		= 0x00000c04,
166 	HOST_DS_SELECTOR		= 0x00000c06,
167 	HOST_FS_SELECTOR		= 0x00000c08,
168 	HOST_GS_SELECTOR		= 0x00000c0a,
169 	HOST_TR_SELECTOR		= 0x00000c0c,
170 	IO_BITMAP_A			= 0x00002000,
171 	IO_BITMAP_A_HIGH		= 0x00002001,
172 	IO_BITMAP_B			= 0x00002002,
173 	IO_BITMAP_B_HIGH		= 0x00002003,
174 	MSR_BITMAP			= 0x00002004,
175 	MSR_BITMAP_HIGH			= 0x00002005,
176 	VM_EXIT_MSR_STORE_ADDR		= 0x00002006,
177 	VM_EXIT_MSR_STORE_ADDR_HIGH	= 0x00002007,
178 	VM_EXIT_MSR_LOAD_ADDR		= 0x00002008,
179 	VM_EXIT_MSR_LOAD_ADDR_HIGH	= 0x00002009,
180 	VM_ENTRY_MSR_LOAD_ADDR		= 0x0000200a,
181 	VM_ENTRY_MSR_LOAD_ADDR_HIGH	= 0x0000200b,
182 	PML_ADDRESS			= 0x0000200e,
183 	PML_ADDRESS_HIGH		= 0x0000200f,
184 	TSC_OFFSET			= 0x00002010,
185 	TSC_OFFSET_HIGH			= 0x00002011,
186 	VIRTUAL_APIC_PAGE_ADDR		= 0x00002012,
187 	VIRTUAL_APIC_PAGE_ADDR_HIGH	= 0x00002013,
188 	APIC_ACCESS_ADDR		= 0x00002014,
189 	APIC_ACCESS_ADDR_HIGH		= 0x00002015,
190 	POSTED_INTR_DESC_ADDR		= 0x00002016,
191 	POSTED_INTR_DESC_ADDR_HIGH	= 0x00002017,
192 	EPT_POINTER			= 0x0000201a,
193 	EPT_POINTER_HIGH		= 0x0000201b,
194 	EOI_EXIT_BITMAP0		= 0x0000201c,
195 	EOI_EXIT_BITMAP0_HIGH		= 0x0000201d,
196 	EOI_EXIT_BITMAP1		= 0x0000201e,
197 	EOI_EXIT_BITMAP1_HIGH		= 0x0000201f,
198 	EOI_EXIT_BITMAP2		= 0x00002020,
199 	EOI_EXIT_BITMAP2_HIGH		= 0x00002021,
200 	EOI_EXIT_BITMAP3		= 0x00002022,
201 	EOI_EXIT_BITMAP3_HIGH		= 0x00002023,
202 	VMREAD_BITMAP			= 0x00002026,
203 	VMREAD_BITMAP_HIGH		= 0x00002027,
204 	VMWRITE_BITMAP			= 0x00002028,
205 	VMWRITE_BITMAP_HIGH		= 0x00002029,
206 	XSS_EXIT_BITMAP			= 0x0000202C,
207 	XSS_EXIT_BITMAP_HIGH		= 0x0000202D,
208 	TSC_MULTIPLIER			= 0x00002032,
209 	TSC_MULTIPLIER_HIGH		= 0x00002033,
210 	GUEST_PHYSICAL_ADDRESS		= 0x00002400,
211 	GUEST_PHYSICAL_ADDRESS_HIGH	= 0x00002401,
212 	VMCS_LINK_POINTER		= 0x00002800,
213 	VMCS_LINK_POINTER_HIGH		= 0x00002801,
214 	GUEST_IA32_DEBUGCTL		= 0x00002802,
215 	GUEST_IA32_DEBUGCTL_HIGH	= 0x00002803,
216 	GUEST_IA32_PAT			= 0x00002804,
217 	GUEST_IA32_PAT_HIGH		= 0x00002805,
218 	GUEST_IA32_EFER			= 0x00002806,
219 	GUEST_IA32_EFER_HIGH		= 0x00002807,
220 	GUEST_IA32_PERF_GLOBAL_CTRL	= 0x00002808,
221 	GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809,
222 	GUEST_PDPTR0			= 0x0000280a,
223 	GUEST_PDPTR0_HIGH		= 0x0000280b,
224 	GUEST_PDPTR1			= 0x0000280c,
225 	GUEST_PDPTR1_HIGH		= 0x0000280d,
226 	GUEST_PDPTR2			= 0x0000280e,
227 	GUEST_PDPTR2_HIGH		= 0x0000280f,
228 	GUEST_PDPTR3			= 0x00002810,
229 	GUEST_PDPTR3_HIGH		= 0x00002811,
230 	GUEST_BNDCFGS			= 0x00002812,
231 	GUEST_BNDCFGS_HIGH		= 0x00002813,
232 	HOST_IA32_PAT			= 0x00002c00,
233 	HOST_IA32_PAT_HIGH		= 0x00002c01,
234 	HOST_IA32_EFER			= 0x00002c02,
235 	HOST_IA32_EFER_HIGH		= 0x00002c03,
236 	HOST_IA32_PERF_GLOBAL_CTRL	= 0x00002c04,
237 	HOST_IA32_PERF_GLOBAL_CTRL_HIGH	= 0x00002c05,
238 	PIN_BASED_VM_EXEC_CONTROL	= 0x00004000,
239 	CPU_BASED_VM_EXEC_CONTROL	= 0x00004002,
240 	EXCEPTION_BITMAP		= 0x00004004,
241 	PAGE_FAULT_ERROR_CODE_MASK	= 0x00004006,
242 	PAGE_FAULT_ERROR_CODE_MATCH	= 0x00004008,
243 	CR3_TARGET_COUNT		= 0x0000400a,
244 	VM_EXIT_CONTROLS		= 0x0000400c,
245 	VM_EXIT_MSR_STORE_COUNT		= 0x0000400e,
246 	VM_EXIT_MSR_LOAD_COUNT		= 0x00004010,
247 	VM_ENTRY_CONTROLS		= 0x00004012,
248 	VM_ENTRY_MSR_LOAD_COUNT		= 0x00004014,
249 	VM_ENTRY_INTR_INFO_FIELD	= 0x00004016,
250 	VM_ENTRY_EXCEPTION_ERROR_CODE	= 0x00004018,
251 	VM_ENTRY_INSTRUCTION_LEN	= 0x0000401a,
252 	TPR_THRESHOLD			= 0x0000401c,
253 	SECONDARY_VM_EXEC_CONTROL	= 0x0000401e,
254 	PLE_GAP				= 0x00004020,
255 	PLE_WINDOW			= 0x00004022,
256 	VM_INSTRUCTION_ERROR		= 0x00004400,
257 	VM_EXIT_REASON			= 0x00004402,
258 	VM_EXIT_INTR_INFO		= 0x00004404,
259 	VM_EXIT_INTR_ERROR_CODE		= 0x00004406,
260 	IDT_VECTORING_INFO_FIELD	= 0x00004408,
261 	IDT_VECTORING_ERROR_CODE	= 0x0000440a,
262 	VM_EXIT_INSTRUCTION_LEN		= 0x0000440c,
263 	VMX_INSTRUCTION_INFO		= 0x0000440e,
264 	GUEST_ES_LIMIT			= 0x00004800,
265 	GUEST_CS_LIMIT			= 0x00004802,
266 	GUEST_SS_LIMIT			= 0x00004804,
267 	GUEST_DS_LIMIT			= 0x00004806,
268 	GUEST_FS_LIMIT			= 0x00004808,
269 	GUEST_GS_LIMIT			= 0x0000480a,
270 	GUEST_LDTR_LIMIT		= 0x0000480c,
271 	GUEST_TR_LIMIT			= 0x0000480e,
272 	GUEST_GDTR_LIMIT		= 0x00004810,
273 	GUEST_IDTR_LIMIT		= 0x00004812,
274 	GUEST_ES_AR_BYTES		= 0x00004814,
275 	GUEST_CS_AR_BYTES		= 0x00004816,
276 	GUEST_SS_AR_BYTES		= 0x00004818,
277 	GUEST_DS_AR_BYTES		= 0x0000481a,
278 	GUEST_FS_AR_BYTES		= 0x0000481c,
279 	GUEST_GS_AR_BYTES		= 0x0000481e,
280 	GUEST_LDTR_AR_BYTES		= 0x00004820,
281 	GUEST_TR_AR_BYTES		= 0x00004822,
282 	GUEST_INTERRUPTIBILITY_INFO	= 0x00004824,
283 	GUEST_ACTIVITY_STATE		= 0X00004826,
284 	GUEST_SYSENTER_CS		= 0x0000482A,
285 	VMX_PREEMPTION_TIMER_VALUE	= 0x0000482E,
286 	HOST_IA32_SYSENTER_CS		= 0x00004c00,
287 	CR0_GUEST_HOST_MASK		= 0x00006000,
288 	CR4_GUEST_HOST_MASK		= 0x00006002,
289 	CR0_READ_SHADOW			= 0x00006004,
290 	CR4_READ_SHADOW			= 0x00006006,
291 	CR3_TARGET_VALUE0		= 0x00006008,
292 	CR3_TARGET_VALUE1		= 0x0000600a,
293 	CR3_TARGET_VALUE2		= 0x0000600c,
294 	CR3_TARGET_VALUE3		= 0x0000600e,
295 	EXIT_QUALIFICATION		= 0x00006400,
296 	GUEST_LINEAR_ADDRESS		= 0x0000640a,
297 	GUEST_CR0			= 0x00006800,
298 	GUEST_CR3			= 0x00006802,
299 	GUEST_CR4			= 0x00006804,
300 	GUEST_ES_BASE			= 0x00006806,
301 	GUEST_CS_BASE			= 0x00006808,
302 	GUEST_SS_BASE			= 0x0000680a,
303 	GUEST_DS_BASE			= 0x0000680c,
304 	GUEST_FS_BASE			= 0x0000680e,
305 	GUEST_GS_BASE			= 0x00006810,
306 	GUEST_LDTR_BASE			= 0x00006812,
307 	GUEST_TR_BASE			= 0x00006814,
308 	GUEST_GDTR_BASE			= 0x00006816,
309 	GUEST_IDTR_BASE			= 0x00006818,
310 	GUEST_DR7			= 0x0000681a,
311 	GUEST_RSP			= 0x0000681c,
312 	GUEST_RIP			= 0x0000681e,
313 	GUEST_RFLAGS			= 0x00006820,
314 	GUEST_PENDING_DBG_EXCEPTIONS	= 0x00006822,
315 	GUEST_SYSENTER_ESP		= 0x00006824,
316 	GUEST_SYSENTER_EIP		= 0x00006826,
317 	HOST_CR0			= 0x00006c00,
318 	HOST_CR3			= 0x00006c02,
319 	HOST_CR4			= 0x00006c04,
320 	HOST_FS_BASE			= 0x00006c06,
321 	HOST_GS_BASE			= 0x00006c08,
322 	HOST_TR_BASE			= 0x00006c0a,
323 	HOST_GDTR_BASE			= 0x00006c0c,
324 	HOST_IDTR_BASE			= 0x00006c0e,
325 	HOST_IA32_SYSENTER_ESP		= 0x00006c10,
326 	HOST_IA32_SYSENTER_EIP		= 0x00006c12,
327 	HOST_RSP			= 0x00006c14,
328 	HOST_RIP			= 0x00006c16,
329 };
330 
331 struct vmx_msr_entry {
332 	uint32_t index;
333 	uint32_t reserved;
334 	uint64_t value;
335 } __attribute__ ((aligned(16)));
336 
337 #include "evmcs.h"
338 
vmxon(uint64_t phys)339 static inline int vmxon(uint64_t phys)
340 {
341 	uint8_t ret;
342 
343 	__asm__ __volatile__ ("vmxon %[pa]; setna %[ret]"
344 		: [ret]"=rm"(ret)
345 		: [pa]"m"(phys)
346 		: "cc", "memory");
347 
348 	return ret;
349 }
350 
vmxoff(void)351 static inline void vmxoff(void)
352 {
353 	__asm__ __volatile__("vmxoff");
354 }
355 
vmclear(uint64_t vmcs_pa)356 static inline int vmclear(uint64_t vmcs_pa)
357 {
358 	uint8_t ret;
359 
360 	__asm__ __volatile__ ("vmclear %[pa]; setna %[ret]"
361 		: [ret]"=rm"(ret)
362 		: [pa]"m"(vmcs_pa)
363 		: "cc", "memory");
364 
365 	return ret;
366 }
367 
vmptrld(uint64_t vmcs_pa)368 static inline int vmptrld(uint64_t vmcs_pa)
369 {
370 	uint8_t ret;
371 
372 	if (enable_evmcs)
373 		return -1;
374 
375 	__asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]"
376 		: [ret]"=rm"(ret)
377 		: [pa]"m"(vmcs_pa)
378 		: "cc", "memory");
379 
380 	return ret;
381 }
382 
vmptrst(uint64_t * value)383 static inline int vmptrst(uint64_t *value)
384 {
385 	uint64_t tmp;
386 	uint8_t ret;
387 
388 	if (enable_evmcs)
389 		return evmcs_vmptrst(value);
390 
391 	__asm__ __volatile__("vmptrst %[value]; setna %[ret]"
392 		: [value]"=m"(tmp), [ret]"=rm"(ret)
393 		: : "cc", "memory");
394 
395 	*value = tmp;
396 	return ret;
397 }
398 
399 /*
400  * A wrapper around vmptrst that ignores errors and returns zero if the
401  * vmptrst instruction fails.
402  */
vmptrstz(void)403 static inline uint64_t vmptrstz(void)
404 {
405 	uint64_t value = 0;
406 	vmptrst(&value);
407 	return value;
408 }
409 
410 /*
411  * No guest state (e.g. GPRs) is established by this vmlaunch.
412  */
vmlaunch(void)413 static inline int vmlaunch(void)
414 {
415 	int ret;
416 
417 	if (enable_evmcs)
418 		return evmcs_vmlaunch();
419 
420 	__asm__ __volatile__("push %%rbp;"
421 			     "push %%rcx;"
422 			     "push %%rdx;"
423 			     "push %%rsi;"
424 			     "push %%rdi;"
425 			     "push $0;"
426 			     "vmwrite %%rsp, %[host_rsp];"
427 			     "lea 1f(%%rip), %%rax;"
428 			     "vmwrite %%rax, %[host_rip];"
429 			     "vmlaunch;"
430 			     "incq (%%rsp);"
431 			     "1: pop %%rax;"
432 			     "pop %%rdi;"
433 			     "pop %%rsi;"
434 			     "pop %%rdx;"
435 			     "pop %%rcx;"
436 			     "pop %%rbp;"
437 			     : [ret]"=&a"(ret)
438 			     : [host_rsp]"r"((uint64_t)HOST_RSP),
439 			       [host_rip]"r"((uint64_t)HOST_RIP)
440 			     : "memory", "cc", "rbx", "r8", "r9", "r10",
441 			       "r11", "r12", "r13", "r14", "r15");
442 	return ret;
443 }
444 
445 /*
446  * No guest state (e.g. GPRs) is established by this vmresume.
447  */
vmresume(void)448 static inline int vmresume(void)
449 {
450 	int ret;
451 
452 	if (enable_evmcs)
453 		return evmcs_vmresume();
454 
455 	__asm__ __volatile__("push %%rbp;"
456 			     "push %%rcx;"
457 			     "push %%rdx;"
458 			     "push %%rsi;"
459 			     "push %%rdi;"
460 			     "push $0;"
461 			     "vmwrite %%rsp, %[host_rsp];"
462 			     "lea 1f(%%rip), %%rax;"
463 			     "vmwrite %%rax, %[host_rip];"
464 			     "vmresume;"
465 			     "incq (%%rsp);"
466 			     "1: pop %%rax;"
467 			     "pop %%rdi;"
468 			     "pop %%rsi;"
469 			     "pop %%rdx;"
470 			     "pop %%rcx;"
471 			     "pop %%rbp;"
472 			     : [ret]"=&a"(ret)
473 			     : [host_rsp]"r"((uint64_t)HOST_RSP),
474 			       [host_rip]"r"((uint64_t)HOST_RIP)
475 			     : "memory", "cc", "rbx", "r8", "r9", "r10",
476 			       "r11", "r12", "r13", "r14", "r15");
477 	return ret;
478 }
479 
vmcall(void)480 static inline void vmcall(void)
481 {
482 	/* Currently, L1 destroys our GPRs during vmexits.  */
483 	__asm__ __volatile__("push %%rbp; vmcall; pop %%rbp" : : :
484 			     "rax", "rbx", "rcx", "rdx",
485 			     "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
486 			     "r13", "r14", "r15");
487 }
488 
vmread(uint64_t encoding,uint64_t * value)489 static inline int vmread(uint64_t encoding, uint64_t *value)
490 {
491 	uint64_t tmp;
492 	uint8_t ret;
493 
494 	if (enable_evmcs)
495 		return evmcs_vmread(encoding, value);
496 
497 	__asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]"
498 		: [value]"=rm"(tmp), [ret]"=rm"(ret)
499 		: [encoding]"r"(encoding)
500 		: "cc", "memory");
501 
502 	*value = tmp;
503 	return ret;
504 }
505 
506 /*
507  * A wrapper around vmread that ignores errors and returns zero if the
508  * vmread instruction fails.
509  */
vmreadz(uint64_t encoding)510 static inline uint64_t vmreadz(uint64_t encoding)
511 {
512 	uint64_t value = 0;
513 	vmread(encoding, &value);
514 	return value;
515 }
516 
vmwrite(uint64_t encoding,uint64_t value)517 static inline int vmwrite(uint64_t encoding, uint64_t value)
518 {
519 	uint8_t ret;
520 
521 	if (enable_evmcs)
522 		return evmcs_vmwrite(encoding, value);
523 
524 	__asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]"
525 		: [ret]"=rm"(ret)
526 		: [value]"rm"(value), [encoding]"r"(encoding)
527 		: "cc", "memory");
528 
529 	return ret;
530 }
531 
vmcs_revision(void)532 static inline uint32_t vmcs_revision(void)
533 {
534 	return rdmsr(MSR_IA32_VMX_BASIC);
535 }
536 
537 struct vmx_pages {
538 	void *vmxon_hva;
539 	uint64_t vmxon_gpa;
540 	void *vmxon;
541 
542 	void *vmcs_hva;
543 	uint64_t vmcs_gpa;
544 	void *vmcs;
545 
546 	void *msr_hva;
547 	uint64_t msr_gpa;
548 	void *msr;
549 
550 	void *shadow_vmcs_hva;
551 	uint64_t shadow_vmcs_gpa;
552 	void *shadow_vmcs;
553 
554 	void *vmread_hva;
555 	uint64_t vmread_gpa;
556 	void *vmread;
557 
558 	void *vmwrite_hva;
559 	uint64_t vmwrite_gpa;
560 	void *vmwrite;
561 
562 	void *vp_assist_hva;
563 	uint64_t vp_assist_gpa;
564 	void *vp_assist;
565 
566 	void *enlightened_vmcs_hva;
567 	uint64_t enlightened_vmcs_gpa;
568 	void *enlightened_vmcs;
569 
570 	void *eptp_hva;
571 	uint64_t eptp_gpa;
572 	void *eptp;
573 
574 	void *apic_access_hva;
575 	uint64_t apic_access_gpa;
576 	void *apic_access;
577 };
578 
579 union vmx_basic {
580 	u64 val;
581 	struct {
582 		u32 revision;
583 		u32	size:13,
584 			reserved1:3,
585 			width:1,
586 			dual:1,
587 			type:4,
588 			insouts:1,
589 			ctrl:1,
590 			vm_entry_exception_ctrl:1,
591 			reserved2:7;
592 	};
593 };
594 
595 union vmx_ctrl_msr {
596 	u64 val;
597 	struct {
598 		u32 set, clr;
599 	};
600 };
601 
602 struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
603 bool prepare_for_vmx_operation(struct vmx_pages *vmx);
604 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
605 bool load_vmcs(struct vmx_pages *vmx);
606 
607 bool nested_vmx_supported(void);
608 void nested_vmx_check_supported(void);
609 
610 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
611 		   uint64_t nested_paddr, uint64_t paddr);
612 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
613 		 uint64_t nested_paddr, uint64_t paddr, uint64_t size);
614 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
615 			uint32_t memslot);
616 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
617 		  uint32_t eptp_memslot);
618 void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
619 
620 #endif /* SELFTEST_KVM_VMX_H */
621