1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021, Red Hat, Inc.
4 *
5 * Tests for Hyper-V features enablement
6 */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <stdint.h>
10
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "hyperv.h"
15
16 #define VCPU_ID 0
17 #define LINUX_OS_ID ((u64)0x8100 << 48)
18
19 extern unsigned char rdmsr_start;
20 extern unsigned char rdmsr_end;
21
do_rdmsr(u32 idx)22 static u64 do_rdmsr(u32 idx)
23 {
24 u32 lo, hi;
25
26 asm volatile("rdmsr_start: rdmsr;"
27 "rdmsr_end:"
28 : "=a"(lo), "=c"(hi)
29 : "c"(idx));
30
31 return (((u64) hi) << 32) | lo;
32 }
33
34 extern unsigned char wrmsr_start;
35 extern unsigned char wrmsr_end;
36
do_wrmsr(u32 idx,u64 val)37 static void do_wrmsr(u32 idx, u64 val)
38 {
39 u32 lo, hi;
40
41 lo = val;
42 hi = val >> 32;
43
44 asm volatile("wrmsr_start: wrmsr;"
45 "wrmsr_end:"
46 : : "a"(lo), "c"(idx), "d"(hi));
47 }
48
49 static int nr_gp;
50 static int nr_ud;
51
hypercall(u64 control,vm_vaddr_t input_address,vm_vaddr_t output_address)52 static inline u64 hypercall(u64 control, vm_vaddr_t input_address,
53 vm_vaddr_t output_address)
54 {
55 u64 hv_status;
56
57 asm volatile("mov %3, %%r8\n"
58 "vmcall"
59 : "=a" (hv_status),
60 "+c" (control), "+d" (input_address)
61 : "r" (output_address)
62 : "cc", "memory", "r8", "r9", "r10", "r11");
63
64 return hv_status;
65 }
66
guest_gp_handler(struct ex_regs * regs)67 static void guest_gp_handler(struct ex_regs *regs)
68 {
69 unsigned char *rip = (unsigned char *)regs->rip;
70 bool r, w;
71
72 r = rip == &rdmsr_start;
73 w = rip == &wrmsr_start;
74 GUEST_ASSERT(r || w);
75
76 nr_gp++;
77
78 if (r)
79 regs->rip = (uint64_t)&rdmsr_end;
80 else
81 regs->rip = (uint64_t)&wrmsr_end;
82 }
83
guest_ud_handler(struct ex_regs * regs)84 static void guest_ud_handler(struct ex_regs *regs)
85 {
86 nr_ud++;
87 regs->rip += 3;
88 }
89
90 struct msr_data {
91 uint32_t idx;
92 bool available;
93 bool write;
94 u64 write_val;
95 };
96
97 struct hcall_data {
98 uint64_t control;
99 uint64_t expect;
100 bool ud_expected;
101 };
102
guest_msr(struct msr_data * msr)103 static void guest_msr(struct msr_data *msr)
104 {
105 int i = 0;
106
107 while (msr->idx) {
108 WRITE_ONCE(nr_gp, 0);
109 if (!msr->write)
110 do_rdmsr(msr->idx);
111 else
112 do_wrmsr(msr->idx, msr->write_val);
113
114 if (msr->available)
115 GUEST_ASSERT(READ_ONCE(nr_gp) == 0);
116 else
117 GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
118
119 GUEST_SYNC(i++);
120 }
121
122 GUEST_DONE();
123 }
124
guest_hcall(vm_vaddr_t pgs_gpa,struct hcall_data * hcall)125 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
126 {
127 int i = 0;
128 u64 res, input, output;
129
130 wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID);
131 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
132
133 while (hcall->control) {
134 nr_ud = 0;
135 if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
136 input = pgs_gpa;
137 output = pgs_gpa + 4096;
138 } else {
139 input = output = 0;
140 }
141
142 res = hypercall(hcall->control, input, output);
143 if (hcall->ud_expected)
144 GUEST_ASSERT(nr_ud == 1);
145 else
146 GUEST_ASSERT(res == hcall->expect);
147
148 GUEST_SYNC(i++);
149 }
150
151 GUEST_DONE();
152 }
153
hv_set_cpuid(struct kvm_vm * vm,struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 * feat,struct kvm_cpuid_entry2 * recomm,struct kvm_cpuid_entry2 * dbg)154 static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid,
155 struct kvm_cpuid_entry2 *feat,
156 struct kvm_cpuid_entry2 *recomm,
157 struct kvm_cpuid_entry2 *dbg)
158 {
159 TEST_ASSERT(set_cpuid(cpuid, feat),
160 "failed to set KVM_CPUID_FEATURES leaf");
161 TEST_ASSERT(set_cpuid(cpuid, recomm),
162 "failed to set HYPERV_CPUID_ENLIGHTMENT_INFO leaf");
163 TEST_ASSERT(set_cpuid(cpuid, dbg),
164 "failed to set HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES leaf");
165 vcpu_set_cpuid(vm, VCPU_ID, cpuid);
166 }
167
guest_test_msrs_access(void)168 static void guest_test_msrs_access(void)
169 {
170 struct kvm_run *run;
171 struct kvm_vm *vm;
172 struct ucall uc;
173 int stage = 0, r;
174 struct kvm_cpuid_entry2 feat = {
175 .function = HYPERV_CPUID_FEATURES
176 };
177 struct kvm_cpuid_entry2 recomm = {
178 .function = HYPERV_CPUID_ENLIGHTMENT_INFO
179 };
180 struct kvm_cpuid_entry2 dbg = {
181 .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
182 };
183 struct kvm_cpuid2 *best;
184 vm_vaddr_t msr_gva;
185 struct kvm_enable_cap cap = {
186 .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
187 .args = {1}
188 };
189 struct msr_data *msr;
190
191 while (true) {
192 vm = vm_create_default(VCPU_ID, 0, guest_msr);
193
194 msr_gva = vm_vaddr_alloc_page(vm);
195 memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
196 msr = addr_gva2hva(vm, msr_gva);
197
198 vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
199 vcpu_enable_cap(vm, VCPU_ID, &cap);
200
201 vcpu_set_hv_cpuid(vm, VCPU_ID);
202
203 best = kvm_get_supported_hv_cpuid();
204
205 vm_init_descriptor_tables(vm);
206 vcpu_init_descriptor_tables(vm, VCPU_ID);
207 vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
208
209 run = vcpu_state(vm, VCPU_ID);
210
211 switch (stage) {
212 case 0:
213 /*
214 * Only available when Hyper-V identification is set
215 */
216 msr->idx = HV_X64_MSR_GUEST_OS_ID;
217 msr->write = 0;
218 msr->available = 0;
219 break;
220 case 1:
221 msr->idx = HV_X64_MSR_HYPERCALL;
222 msr->write = 0;
223 msr->available = 0;
224 break;
225 case 2:
226 feat.eax |= HV_MSR_HYPERCALL_AVAILABLE;
227 /*
228 * HV_X64_MSR_GUEST_OS_ID has to be written first to make
229 * HV_X64_MSR_HYPERCALL available.
230 */
231 msr->idx = HV_X64_MSR_GUEST_OS_ID;
232 msr->write = 1;
233 msr->write_val = LINUX_OS_ID;
234 msr->available = 1;
235 break;
236 case 3:
237 msr->idx = HV_X64_MSR_GUEST_OS_ID;
238 msr->write = 0;
239 msr->available = 1;
240 break;
241 case 4:
242 msr->idx = HV_X64_MSR_HYPERCALL;
243 msr->write = 0;
244 msr->available = 1;
245 break;
246
247 case 5:
248 msr->idx = HV_X64_MSR_VP_RUNTIME;
249 msr->write = 0;
250 msr->available = 0;
251 break;
252 case 6:
253 feat.eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
254 msr->write = 0;
255 msr->available = 1;
256 break;
257 case 7:
258 /* Read only */
259 msr->write = 1;
260 msr->write_val = 1;
261 msr->available = 0;
262 break;
263
264 case 8:
265 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
266 msr->write = 0;
267 msr->available = 0;
268 break;
269 case 9:
270 feat.eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
271 msr->write = 0;
272 msr->available = 1;
273 break;
274 case 10:
275 /* Read only */
276 msr->write = 1;
277 msr->write_val = 1;
278 msr->available = 0;
279 break;
280
281 case 11:
282 msr->idx = HV_X64_MSR_VP_INDEX;
283 msr->write = 0;
284 msr->available = 0;
285 break;
286 case 12:
287 feat.eax |= HV_MSR_VP_INDEX_AVAILABLE;
288 msr->write = 0;
289 msr->available = 1;
290 break;
291 case 13:
292 /* Read only */
293 msr->write = 1;
294 msr->write_val = 1;
295 msr->available = 0;
296 break;
297
298 case 14:
299 msr->idx = HV_X64_MSR_RESET;
300 msr->write = 0;
301 msr->available = 0;
302 break;
303 case 15:
304 feat.eax |= HV_MSR_RESET_AVAILABLE;
305 msr->write = 0;
306 msr->available = 1;
307 break;
308 case 16:
309 msr->write = 1;
310 msr->write_val = 0;
311 msr->available = 1;
312 break;
313
314 case 17:
315 msr->idx = HV_X64_MSR_REFERENCE_TSC;
316 msr->write = 0;
317 msr->available = 0;
318 break;
319 case 18:
320 feat.eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
321 msr->write = 0;
322 msr->available = 1;
323 break;
324 case 19:
325 msr->write = 1;
326 msr->write_val = 0;
327 msr->available = 1;
328 break;
329
330 case 20:
331 msr->idx = HV_X64_MSR_EOM;
332 msr->write = 0;
333 msr->available = 0;
334 break;
335 case 21:
336 /*
337 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
338 * capability enabled and guest visible CPUID bit unset.
339 */
340 cap.cap = KVM_CAP_HYPERV_SYNIC2;
341 cap.args[0] = 0;
342 vcpu_enable_cap(vm, VCPU_ID, &cap);
343 break;
344 case 22:
345 feat.eax |= HV_MSR_SYNIC_AVAILABLE;
346 msr->write = 0;
347 msr->available = 1;
348 break;
349 case 23:
350 msr->write = 1;
351 msr->write_val = 0;
352 msr->available = 1;
353 break;
354
355 case 24:
356 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
357 msr->write = 0;
358 msr->available = 0;
359 break;
360 case 25:
361 feat.eax |= HV_MSR_SYNTIMER_AVAILABLE;
362 msr->write = 0;
363 msr->available = 1;
364 break;
365 case 26:
366 msr->write = 1;
367 msr->write_val = 0;
368 msr->available = 1;
369 break;
370 case 27:
371 /* Direct mode test */
372 msr->write = 1;
373 msr->write_val = 1 << 12;
374 msr->available = 0;
375 break;
376 case 28:
377 feat.edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
378 msr->available = 1;
379 break;
380
381 case 29:
382 msr->idx = HV_X64_MSR_EOI;
383 msr->write = 0;
384 msr->available = 0;
385 break;
386 case 30:
387 feat.eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
388 msr->write = 1;
389 msr->write_val = 1;
390 msr->available = 1;
391 break;
392
393 case 31:
394 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
395 msr->write = 0;
396 msr->available = 0;
397 break;
398 case 32:
399 feat.eax |= HV_ACCESS_FREQUENCY_MSRS;
400 msr->write = 0;
401 msr->available = 1;
402 break;
403 case 33:
404 /* Read only */
405 msr->write = 1;
406 msr->write_val = 1;
407 msr->available = 0;
408 break;
409
410 case 34:
411 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
412 msr->write = 0;
413 msr->available = 0;
414 break;
415 case 35:
416 feat.eax |= HV_ACCESS_REENLIGHTENMENT;
417 msr->write = 0;
418 msr->available = 1;
419 break;
420 case 36:
421 msr->write = 1;
422 msr->write_val = 1;
423 msr->available = 1;
424 break;
425 case 37:
426 /* Can only write '0' */
427 msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
428 msr->write = 1;
429 msr->write_val = 1;
430 msr->available = 0;
431 break;
432
433 case 38:
434 msr->idx = HV_X64_MSR_CRASH_P0;
435 msr->write = 0;
436 msr->available = 0;
437 break;
438 case 39:
439 feat.edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
440 msr->write = 0;
441 msr->available = 1;
442 break;
443 case 40:
444 msr->write = 1;
445 msr->write_val = 1;
446 msr->available = 1;
447 break;
448
449 case 41:
450 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
451 msr->write = 0;
452 msr->available = 0;
453 break;
454 case 42:
455 feat.edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
456 dbg.eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
457 msr->write = 0;
458 msr->available = 1;
459 break;
460 case 43:
461 msr->write = 1;
462 msr->write_val = 0;
463 msr->available = 1;
464 break;
465
466 case 44:
467 /* END */
468 msr->idx = 0;
469 break;
470 }
471
472 hv_set_cpuid(vm, best, &feat, &recomm, &dbg);
473
474 if (msr->idx)
475 pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
476 msr->idx, msr->write ? "write" : "read");
477 else
478 pr_debug("Stage %d: finish\n", stage);
479
480 r = _vcpu_run(vm, VCPU_ID);
481 TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
482 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
483 "unexpected exit reason: %u (%s)",
484 run->exit_reason, exit_reason_str(run->exit_reason));
485
486 switch (get_ucall(vm, VCPU_ID, &uc)) {
487 case UCALL_SYNC:
488 TEST_ASSERT(uc.args[1] == 0,
489 "Unexpected stage: %ld (0 expected)\n",
490 uc.args[1]);
491 break;
492 case UCALL_ABORT:
493 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
494 __FILE__, uc.args[1]);
495 return;
496 case UCALL_DONE:
497 return;
498 }
499
500 stage++;
501 kvm_vm_free(vm);
502 }
503 }
504
guest_test_hcalls_access(void)505 static void guest_test_hcalls_access(void)
506 {
507 struct kvm_run *run;
508 struct kvm_vm *vm;
509 struct ucall uc;
510 int stage = 0, r;
511 struct kvm_cpuid_entry2 feat = {
512 .function = HYPERV_CPUID_FEATURES,
513 .eax = HV_MSR_HYPERCALL_AVAILABLE
514 };
515 struct kvm_cpuid_entry2 recomm = {
516 .function = HYPERV_CPUID_ENLIGHTMENT_INFO
517 };
518 struct kvm_cpuid_entry2 dbg = {
519 .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
520 };
521 struct kvm_enable_cap cap = {
522 .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
523 .args = {1}
524 };
525 vm_vaddr_t hcall_page, hcall_params;
526 struct hcall_data *hcall;
527 struct kvm_cpuid2 *best;
528
529 while (true) {
530 vm = vm_create_default(VCPU_ID, 0, guest_hcall);
531
532 vm_init_descriptor_tables(vm);
533 vcpu_init_descriptor_tables(vm, VCPU_ID);
534 vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
535
536 /* Hypercall input/output */
537 hcall_page = vm_vaddr_alloc_pages(vm, 2);
538 hcall = addr_gva2hva(vm, hcall_page);
539 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
540
541 hcall_params = vm_vaddr_alloc_page(vm);
542 memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
543
544 vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
545 vcpu_enable_cap(vm, VCPU_ID, &cap);
546
547 vcpu_set_hv_cpuid(vm, VCPU_ID);
548
549 best = kvm_get_supported_hv_cpuid();
550
551 run = vcpu_state(vm, VCPU_ID);
552
553 switch (stage) {
554 case 0:
555 hcall->control = 0xdeadbeef;
556 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
557 break;
558
559 case 1:
560 hcall->control = HVCALL_POST_MESSAGE;
561 hcall->expect = HV_STATUS_ACCESS_DENIED;
562 break;
563 case 2:
564 feat.ebx |= HV_POST_MESSAGES;
565 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
566 break;
567
568 case 3:
569 hcall->control = HVCALL_SIGNAL_EVENT;
570 hcall->expect = HV_STATUS_ACCESS_DENIED;
571 break;
572 case 4:
573 feat.ebx |= HV_SIGNAL_EVENTS;
574 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
575 break;
576
577 case 5:
578 hcall->control = HVCALL_RESET_DEBUG_SESSION;
579 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
580 break;
581 case 6:
582 dbg.eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
583 hcall->expect = HV_STATUS_ACCESS_DENIED;
584 break;
585 case 7:
586 feat.ebx |= HV_DEBUGGING;
587 hcall->expect = HV_STATUS_OPERATION_DENIED;
588 break;
589
590 case 8:
591 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
592 hcall->expect = HV_STATUS_ACCESS_DENIED;
593 break;
594 case 9:
595 recomm.eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
596 hcall->expect = HV_STATUS_SUCCESS;
597 break;
598 case 10:
599 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
600 hcall->expect = HV_STATUS_ACCESS_DENIED;
601 break;
602 case 11:
603 recomm.eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
604 hcall->expect = HV_STATUS_SUCCESS;
605 break;
606
607 case 12:
608 hcall->control = HVCALL_SEND_IPI;
609 hcall->expect = HV_STATUS_ACCESS_DENIED;
610 break;
611 case 13:
612 recomm.eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
613 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
614 break;
615 case 14:
616 /* Nothing in 'sparse banks' -> success */
617 hcall->control = HVCALL_SEND_IPI_EX;
618 hcall->expect = HV_STATUS_SUCCESS;
619 break;
620
621 case 15:
622 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
623 hcall->expect = HV_STATUS_ACCESS_DENIED;
624 break;
625 case 16:
626 recomm.ebx = 0xfff;
627 hcall->expect = HV_STATUS_SUCCESS;
628 break;
629 case 17:
630 /* XMM fast hypercall */
631 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
632 hcall->ud_expected = true;
633 break;
634 case 18:
635 feat.edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
636 hcall->ud_expected = false;
637 hcall->expect = HV_STATUS_SUCCESS;
638 break;
639
640 case 19:
641 /* END */
642 hcall->control = 0;
643 break;
644 }
645
646 hv_set_cpuid(vm, best, &feat, &recomm, &dbg);
647
648 if (hcall->control)
649 pr_debug("Stage %d: testing hcall: 0x%lx\n", stage,
650 hcall->control);
651 else
652 pr_debug("Stage %d: finish\n", stage);
653
654 r = _vcpu_run(vm, VCPU_ID);
655 TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
656 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
657 "unexpected exit reason: %u (%s)",
658 run->exit_reason, exit_reason_str(run->exit_reason));
659
660 switch (get_ucall(vm, VCPU_ID, &uc)) {
661 case UCALL_SYNC:
662 TEST_ASSERT(uc.args[1] == 0,
663 "Unexpected stage: %ld (0 expected)\n",
664 uc.args[1]);
665 break;
666 case UCALL_ABORT:
667 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
668 __FILE__, uc.args[1]);
669 return;
670 case UCALL_DONE:
671 return;
672 }
673
674 stage++;
675 kvm_vm_free(vm);
676 }
677 }
678
main(void)679 int main(void)
680 {
681 pr_info("Testing access to Hyper-V specific MSRs\n");
682 guest_test_msrs_access();
683
684 pr_info("Testing access to Hyper-V hypercalls\n");
685 guest_test_hcalls_access();
686 }
687