1 /**************************************************************************
2 * viridian.c
3 *
4 * An implementation of some Viridian enlightenments. See Microsoft's
5 * Hypervisor Top Level Functional Specification for more information.
6 */
7
8 #include <xen/sched.h>
9 #include <xen/version.h>
10 #include <xen/hypercall.h>
11 #include <xen/domain_page.h>
12 #include <xen/param.h>
13 #include <asm/guest_access.h>
14 #include <asm/guest/hyperv-tlfs.h>
15 #include <asm/paging.h>
16 #include <asm/p2m.h>
17 #include <asm/apic.h>
18 #include <asm/hvm/support.h>
19 #include <public/sched.h>
20 #include <public/hvm/hvm_op.h>
21
22 #include "private.h"
23
24 /* Viridian Partition Privilege Flags */
25 typedef struct {
26 /* Access to virtual MSRs */
27 uint64_t AccessVpRunTimeReg:1;
28 uint64_t AccessPartitionReferenceCounter:1;
29 uint64_t AccessSynicRegs:1;
30 uint64_t AccessSyntheticTimerRegs:1;
31 uint64_t AccessIntrCtrlRegs:1;
32 uint64_t AccessHypercallMsrs:1;
33 uint64_t AccessVpIndex:1;
34 uint64_t AccessResetReg:1;
35 uint64_t AccessStatsReg:1;
36 uint64_t AccessPartitionReferenceTsc:1;
37 uint64_t AccessGuestIdleReg:1;
38 uint64_t AccessFrequencyRegs:1;
39 uint64_t AccessDebugRegs:1;
40 uint64_t Reserved1:19;
41
42 /* Access to hypercalls */
43 uint64_t CreatePartitions:1;
44 uint64_t AccessPartitionId:1;
45 uint64_t AccessMemoryPool:1;
46 uint64_t AdjustMessageBuffers:1;
47 uint64_t PostMessages:1;
48 uint64_t SignalEvents:1;
49 uint64_t CreatePort:1;
50 uint64_t ConnectPort:1;
51 uint64_t AccessStats:1;
52 uint64_t Reserved2:2;
53 uint64_t Debugging:1;
54 uint64_t CpuManagement:1;
55 uint64_t Reserved3:1;
56 uint64_t Reserved4:1;
57 uint64_t Reserved5:1;
58 uint64_t AccessVSM:1;
59 uint64_t AccessVpRegisters:1;
60 uint64_t Reserved6:1;
61 uint64_t Reserved7:1;
62 uint64_t EnableExtendedHypercalls:1;
63 uint64_t StartVirtualProcessor:1;
64 uint64_t Reserved8:10;
65 } HV_PARTITION_PRIVILEGE_MASK;
66
67 typedef union _HV_CRASH_CTL_REG_CONTENTS
68 {
69 uint64_t AsUINT64;
70 struct
71 {
72 uint64_t Reserved:63;
73 uint64_t CrashNotify:1;
74 } u;
75 } HV_CRASH_CTL_REG_CONTENTS;
76
77 /* Viridian CPUID leaf 3, Hypervisor Feature Indication */
78 #define CPUID3D_CRASH_MSRS (1 << 10)
79 #define CPUID3D_SINT_POLLING (1 << 17)
80
81 /* Viridian CPUID leaf 4: Implementation Recommendations. */
82 #define CPUID4A_HCALL_REMOTE_TLB_FLUSH (1 << 2)
83 #define CPUID4A_MSR_BASED_APIC (1 << 3)
84 #define CPUID4A_RELAX_TIMER_INT (1 << 5)
85 #define CPUID4A_SYNTHETIC_CLUSTER_IPI (1 << 10)
86
87 /* Viridian CPUID leaf 6: Implementation HW features detected and in use */
88 #define CPUID6A_APIC_OVERLAY (1 << 0)
89 #define CPUID6A_MSR_BITMAPS (1 << 1)
90 #define CPUID6A_NESTED_PAGING (1 << 3)
91
92 /*
93 * Version and build number reported by CPUID leaf 2
94 *
95 * These numbers are chosen to match the version numbers reported by
96 * Windows Server 2008.
97 */
98 static uint16_t __read_mostly viridian_major = 6;
99 static uint16_t __read_mostly viridian_minor = 0;
100 static uint32_t __read_mostly viridian_build = 0x1772;
101
102 /*
103 * Maximum number of retries before the guest will notify of failure
104 * to acquire a spinlock.
105 */
106 static uint32_t __read_mostly viridian_spinlock_retry_count = 2047;
107 integer_param("viridian-spinlock-retry-count",
108 viridian_spinlock_retry_count);
109
cpuid_viridian_leaves(const struct vcpu * v,uint32_t leaf,uint32_t subleaf,struct cpuid_leaf * res)110 void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf,
111 uint32_t subleaf, struct cpuid_leaf *res)
112 {
113 const struct domain *d = v->domain;
114 const struct viridian_domain *vd = d->arch.hvm.viridian;
115
116 ASSERT(is_viridian_domain(d));
117 ASSERT(leaf >= 0x40000000 && leaf < 0x40000100);
118
119 leaf -= 0x40000000;
120
121 switch ( leaf )
122 {
123 case 0:
124 res->a = 0x40000006; /* Maximum leaf */
125 memcpy(&res->b, "Micr", 4);
126 memcpy(&res->c, "osof", 4);
127 memcpy(&res->d, "t Hv", 4);
128 break;
129
130 case 1:
131 memcpy(&res->a, "Hv#1", 4);
132 break;
133
134 case 2:
135 /*
136 * Hypervisor information, but only if the guest has set its
137 * own version number.
138 */
139 if ( vd->guest_os_id.raw == 0 )
140 break;
141 res->a = viridian_build;
142 res->b = ((uint32_t)viridian_major << 16) | viridian_minor;
143 res->c = 0; /* SP */
144 res->d = 0; /* Service branch and number */
145 break;
146
147 case 3:
148 {
149 /*
150 * The specification states that EAX and EBX are defined to be
151 * the low and high parts of the partition privilege mask
152 * respectively.
153 */
154 HV_PARTITION_PRIVILEGE_MASK mask = {
155 .AccessIntrCtrlRegs = 1,
156 .AccessHypercallMsrs = 1,
157 .AccessVpIndex = 1,
158 };
159 union {
160 HV_PARTITION_PRIVILEGE_MASK mask;
161 struct { uint32_t lo, hi; };
162 } u;
163
164 if ( !(viridian_feature_mask(d) & HVMPV_no_freq) )
165 mask.AccessFrequencyRegs = 1;
166 if ( viridian_feature_mask(d) & HVMPV_time_ref_count )
167 mask.AccessPartitionReferenceCounter = 1;
168 if ( viridian_feature_mask(d) & HVMPV_reference_tsc )
169 mask.AccessPartitionReferenceTsc = 1;
170 if ( viridian_feature_mask(d) & HVMPV_synic )
171 mask.AccessSynicRegs = 1;
172 if ( viridian_feature_mask(d) & HVMPV_stimer )
173 mask.AccessSyntheticTimerRegs = 1;
174
175 u.mask = mask;
176
177 res->a = u.lo;
178 res->b = u.hi;
179
180 if ( viridian_feature_mask(d) & HVMPV_crash_ctl )
181 res->d = CPUID3D_CRASH_MSRS;
182 if ( viridian_feature_mask(d) & HVMPV_synic )
183 res->d |= CPUID3D_SINT_POLLING;
184
185 break;
186 }
187
188 case 4:
189 /* Recommended hypercall usage. */
190 if ( vd->guest_os_id.raw == 0 || vd->guest_os_id.os < 4 )
191 break;
192 res->a = CPUID4A_RELAX_TIMER_INT;
193 if ( viridian_feature_mask(d) & HVMPV_hcall_remote_tlb_flush )
194 res->a |= CPUID4A_HCALL_REMOTE_TLB_FLUSH;
195 if ( !cpu_has_vmx_apic_reg_virt )
196 res->a |= CPUID4A_MSR_BASED_APIC;
197 if ( viridian_feature_mask(d) & HVMPV_hcall_ipi )
198 res->a |= CPUID4A_SYNTHETIC_CLUSTER_IPI;
199
200 /*
201 * This value is the recommended number of attempts to try to
202 * acquire a spinlock before notifying the hypervisor via the
203 * HVCALL_NOTIFY_LONG_SPIN_WAIT hypercall.
204 */
205 res->b = viridian_spinlock_retry_count;
206 break;
207
208 case 6:
209 /* Detected and in use hardware features. */
210 if ( cpu_has_vmx_virtualize_apic_accesses )
211 res->a |= CPUID6A_APIC_OVERLAY;
212 if ( cpu_has_vmx_msr_bitmap || (read_efer() & EFER_SVME) )
213 res->a |= CPUID6A_MSR_BITMAPS;
214 if ( hap_enabled(d) )
215 res->a |= CPUID6A_NESTED_PAGING;
216 break;
217 }
218 }
219
dump_guest_os_id(const struct domain * d)220 static void dump_guest_os_id(const struct domain *d)
221 {
222 const union hv_guest_os_id *goi;
223
224 goi = &d->arch.hvm.viridian->guest_os_id;
225
226 printk(XENLOG_G_INFO
227 "d%d: VIRIDIAN GUEST_OS_ID: vendor: %x os: %x major: %x minor: %x sp: %x build: %x\n",
228 d->domain_id, goi->vendor, goi->os, goi->major, goi->minor,
229 goi->service_pack, goi->build_number);
230 }
231
dump_hypercall(const struct domain * d)232 static void dump_hypercall(const struct domain *d)
233 {
234 const union hv_vp_assist_page_msr *hg;
235
236 hg = &d->arch.hvm.viridian->hypercall_gpa;
237
238 printk(XENLOG_G_INFO "d%d: VIRIDIAN HYPERCALL: enabled: %x pfn: %lx\n",
239 d->domain_id,
240 hg->enabled, (unsigned long)hg->pfn);
241 }
242
enable_hypercall_page(struct domain * d)243 static void enable_hypercall_page(struct domain *d)
244 {
245 unsigned long gmfn = d->arch.hvm.viridian->hypercall_gpa.pfn;
246 struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
247 uint8_t *p;
248
249 if ( !page || !get_page_type(page, PGT_writable_page) )
250 {
251 if ( page )
252 put_page(page);
253 gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
254 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
255 return;
256 }
257
258 p = __map_domain_page(page);
259
260 /*
261 * We set the bit 31 in %eax (reserved field in the Viridian hypercall
262 * calling convention) to differentiate Xen and Viridian hypercalls.
263 */
264 *(u8 *)(p + 0) = 0x0d; /* orl $0x80000000, %eax */
265 *(u32 *)(p + 1) = 0x80000000;
266 *(u8 *)(p + 5) = 0x0f; /* vmcall/vmmcall */
267 *(u8 *)(p + 6) = 0x01;
268 *(u8 *)(p + 7) = (cpu_has_vmx ? 0xc1 : 0xd9);
269 *(u8 *)(p + 8) = 0xc3; /* ret */
270 memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */
271
272 unmap_domain_page(p);
273
274 put_page_and_type(page);
275 }
276
guest_wrmsr_viridian(struct vcpu * v,uint32_t idx,uint64_t val)277 int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val)
278 {
279 struct viridian_vcpu *vv = v->arch.hvm.viridian;
280 struct domain *d = v->domain;
281 struct viridian_domain *vd = d->arch.hvm.viridian;
282
283 ASSERT(is_viridian_domain(d));
284
285 switch ( idx )
286 {
287 case HV_X64_MSR_GUEST_OS_ID:
288 vd->guest_os_id.raw = val;
289 dump_guest_os_id(d);
290 break;
291
292 case HV_X64_MSR_HYPERCALL:
293 vd->hypercall_gpa.raw = val;
294 dump_hypercall(d);
295 if ( vd->hypercall_gpa.enabled )
296 enable_hypercall_page(d);
297 break;
298
299 case HV_X64_MSR_VP_INDEX:
300 break;
301
302 case HV_X64_MSR_EOI:
303 case HV_X64_MSR_ICR:
304 case HV_X64_MSR_TPR:
305 case HV_X64_MSR_VP_ASSIST_PAGE:
306 case HV_X64_MSR_SCONTROL:
307 case HV_X64_MSR_SVERSION:
308 case HV_X64_MSR_SIEFP:
309 case HV_X64_MSR_SIMP:
310 case HV_X64_MSR_EOM:
311 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
312 return viridian_synic_wrmsr(v, idx, val);
313
314 case HV_X64_MSR_TSC_FREQUENCY:
315 case HV_X64_MSR_APIC_FREQUENCY:
316 case HV_X64_MSR_REFERENCE_TSC:
317 case HV_X64_MSR_TIME_REF_COUNT:
318 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
319 return viridian_time_wrmsr(v, idx, val);
320
321 case HV_X64_MSR_CRASH_P0:
322 case HV_X64_MSR_CRASH_P1:
323 case HV_X64_MSR_CRASH_P2:
324 case HV_X64_MSR_CRASH_P3:
325 case HV_X64_MSR_CRASH_P4:
326 BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >=
327 ARRAY_SIZE(vv->crash_param));
328
329 idx -= HV_X64_MSR_CRASH_P0;
330 vv->crash_param[idx] = val;
331 break;
332
333 case HV_X64_MSR_CRASH_CTL:
334 {
335 HV_CRASH_CTL_REG_CONTENTS ctl;
336
337 ctl.AsUINT64 = val;
338
339 if ( !ctl.u.CrashNotify )
340 break;
341
342 spin_lock(&d->shutdown_lock);
343 d->shutdown_code = SHUTDOWN_crash;
344 spin_unlock(&d->shutdown_lock);
345
346 gprintk(XENLOG_WARNING, "VIRIDIAN CRASH: %lx %lx %lx %lx %lx\n",
347 vv->crash_param[0], vv->crash_param[1], vv->crash_param[2],
348 vv->crash_param[3], vv->crash_param[4]);
349 break;
350 }
351
352 default:
353 gdprintk(XENLOG_INFO,
354 "Write %016"PRIx64" to unimplemented MSR %#x\n", val,
355 idx);
356 return X86EMUL_EXCEPTION;
357 }
358
359 return X86EMUL_OKAY;
360 }
361
guest_rdmsr_viridian(const struct vcpu * v,uint32_t idx,uint64_t * val)362 int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val)
363 {
364 const struct viridian_vcpu *vv = v->arch.hvm.viridian;
365 const struct domain *d = v->domain;
366 const struct viridian_domain *vd = d->arch.hvm.viridian;
367
368 ASSERT(is_viridian_domain(d));
369
370 switch ( idx )
371 {
372 case HV_X64_MSR_GUEST_OS_ID:
373 *val = vd->guest_os_id.raw;
374 break;
375
376 case HV_X64_MSR_HYPERCALL:
377 *val = vd->hypercall_gpa.raw;
378 break;
379
380 case HV_X64_MSR_VP_INDEX:
381 *val = v->vcpu_id;
382 break;
383
384 case HV_X64_MSR_EOI:
385 case HV_X64_MSR_ICR:
386 case HV_X64_MSR_TPR:
387 case HV_X64_MSR_VP_ASSIST_PAGE:
388 case HV_X64_MSR_SCONTROL:
389 case HV_X64_MSR_SVERSION:
390 case HV_X64_MSR_SIEFP:
391 case HV_X64_MSR_SIMP:
392 case HV_X64_MSR_EOM:
393 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
394 return viridian_synic_rdmsr(v, idx, val);
395
396 case HV_X64_MSR_TSC_FREQUENCY:
397 case HV_X64_MSR_APIC_FREQUENCY:
398 case HV_X64_MSR_REFERENCE_TSC:
399 case HV_X64_MSR_TIME_REF_COUNT:
400 case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
401 return viridian_time_rdmsr(v, idx, val);
402
403 case HV_X64_MSR_CRASH_P0:
404 case HV_X64_MSR_CRASH_P1:
405 case HV_X64_MSR_CRASH_P2:
406 case HV_X64_MSR_CRASH_P3:
407 case HV_X64_MSR_CRASH_P4:
408 BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >=
409 ARRAY_SIZE(vv->crash_param));
410
411 idx -= HV_X64_MSR_CRASH_P0;
412 *val = vv->crash_param[idx];
413 break;
414
415 case HV_X64_MSR_CRASH_CTL:
416 {
417 HV_CRASH_CTL_REG_CONTENTS ctl = {
418 .u.CrashNotify = 1,
419 };
420
421 *val = ctl.AsUINT64;
422 break;
423 }
424
425 default:
426 gdprintk(XENLOG_INFO, "Read from unimplemented MSR %#x\n", idx);
427 return X86EMUL_EXCEPTION;
428 }
429
430 return X86EMUL_OKAY;
431 }
432
viridian_vcpu_init(struct vcpu * v)433 int viridian_vcpu_init(struct vcpu *v)
434 {
435 int rc;
436
437 ASSERT(!v->arch.hvm.viridian);
438 v->arch.hvm.viridian = xzalloc(struct viridian_vcpu);
439 if ( !v->arch.hvm.viridian )
440 return -ENOMEM;
441
442 rc = viridian_synic_vcpu_init(v);
443 if ( rc )
444 goto fail;
445
446 rc = viridian_time_vcpu_init(v);
447 if ( rc )
448 goto fail;
449
450 return 0;
451
452 fail:
453 viridian_vcpu_deinit(v);
454
455 return rc;
456 }
457
viridian_domain_init(struct domain * d)458 int viridian_domain_init(struct domain *d)
459 {
460 int rc;
461
462 ASSERT(!d->arch.hvm.viridian);
463 d->arch.hvm.viridian = xzalloc(struct viridian_domain);
464 if ( !d->arch.hvm.viridian )
465 return -ENOMEM;
466
467 rc = viridian_synic_domain_init(d);
468 if ( rc )
469 goto fail;
470
471 rc = viridian_time_domain_init(d);
472 if ( rc )
473 goto fail;
474
475 return 0;
476
477 fail:
478 viridian_domain_deinit(d);
479
480 return rc;
481 }
482
viridian_vcpu_deinit(struct vcpu * v)483 void viridian_vcpu_deinit(struct vcpu *v)
484 {
485 if ( !v->arch.hvm.viridian )
486 return;
487
488 viridian_time_vcpu_deinit(v);
489 viridian_synic_vcpu_deinit(v);
490
491 XFREE(v->arch.hvm.viridian);
492 }
493
viridian_domain_deinit(struct domain * d)494 void viridian_domain_deinit(struct domain *d)
495 {
496 struct vcpu *v;
497
498 for_each_vcpu ( d, v )
499 viridian_vcpu_deinit(v);
500
501 if ( !d->arch.hvm.viridian )
502 return;
503
504 viridian_time_domain_deinit(d);
505 viridian_synic_domain_deinit(d);
506
507 XFREE(d->arch.hvm.viridian);
508 }
509
510 /*
511 * Windows should not issue the hypercalls requiring this callback in the
512 * case where vcpu_id would exceed the size of the mask.
513 */
need_flush(void * ctxt,struct vcpu * v)514 static bool need_flush(void *ctxt, struct vcpu *v)
515 {
516 uint64_t vcpu_mask = *(uint64_t *)ctxt;
517
518 return vcpu_mask & (1ul << v->vcpu_id);
519 }
520
viridian_hypercall(struct cpu_user_regs * regs)521 int viridian_hypercall(struct cpu_user_regs *regs)
522 {
523 struct vcpu *curr = current;
524 struct domain *currd = curr->domain;
525 int mode = hvm_guest_x86_mode(curr);
526 unsigned long input_params_gpa, output_params_gpa;
527 uint16_t status = HV_STATUS_SUCCESS;
528
529 union hypercall_input {
530 uint64_t raw;
531 struct {
532 uint16_t call_code;
533 uint16_t fast:1;
534 uint16_t rsvd1:15;
535 uint16_t rep_count:12;
536 uint16_t rsvd2:4;
537 uint16_t rep_start:12;
538 uint16_t rsvd3:4;
539 };
540 } input;
541
542 union hypercall_output {
543 uint64_t raw;
544 struct {
545 uint16_t result;
546 uint16_t rsvd1;
547 uint32_t rep_complete:12;
548 uint32_t rsvd2:20;
549 };
550 } output = { 0 };
551
552 ASSERT(is_viridian_domain(currd));
553
554 switch ( mode )
555 {
556 case 8:
557 input.raw = regs->rcx;
558 input_params_gpa = regs->rdx;
559 output_params_gpa = regs->r8;
560 break;
561 case 4:
562 input.raw = (regs->rdx << 32) | regs->eax;
563 input_params_gpa = (regs->rbx << 32) | regs->ecx;
564 output_params_gpa = (regs->rdi << 32) | regs->esi;
565 break;
566 default:
567 goto out;
568 }
569
570 switch ( input.call_code )
571 {
572 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
573 /*
574 * See section 14.5.1 of the specification.
575 */
576 do_sched_op(SCHEDOP_yield, guest_handle_from_ptr(NULL, void));
577 status = HV_STATUS_SUCCESS;
578 break;
579
580 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
581 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
582 {
583 struct {
584 uint64_t address_space;
585 uint64_t flags;
586 uint64_t vcpu_mask;
587 } input_params;
588
589 /* These hypercalls should never use the fast-call convention. */
590 status = HV_STATUS_INVALID_PARAMETER;
591 if ( input.fast )
592 break;
593
594 /* Get input parameters. */
595 if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa,
596 sizeof(input_params)) !=
597 HVMTRANS_okay )
598 break;
599
600 /*
601 * It is not clear from the spec. if we are supposed to
602 * include current virtual CPU in the set or not in this case,
603 * so err on the safe side.
604 */
605 if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS )
606 input_params.vcpu_mask = ~0ul;
607
608 /*
609 * A false return means that another vcpu is currently trying
610 * a similar operation, so back off.
611 */
612 if ( !paging_flush_tlb(need_flush, &input_params.vcpu_mask) )
613 return HVM_HCALL_preempted;
614
615 output.rep_complete = input.rep_count;
616
617 status = HV_STATUS_SUCCESS;
618 break;
619 }
620
621 case HVCALL_SEND_IPI:
622 {
623 struct vcpu *v;
624 uint32_t vector;
625 uint64_t vcpu_mask;
626
627 status = HV_STATUS_INVALID_PARAMETER;
628
629 /* Get input parameters. */
630 if ( input.fast )
631 {
632 if ( input_params_gpa >> 32 )
633 break;
634
635 vector = input_params_gpa;
636 vcpu_mask = output_params_gpa;
637 }
638 else
639 {
640 struct {
641 uint32_t vector;
642 uint8_t target_vtl;
643 uint8_t reserved_zero[3];
644 uint64_t vcpu_mask;
645 } input_params;
646
647 if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa,
648 sizeof(input_params)) !=
649 HVMTRANS_okay )
650 break;
651
652 if ( input_params.target_vtl ||
653 input_params.reserved_zero[0] ||
654 input_params.reserved_zero[1] ||
655 input_params.reserved_zero[2] )
656 break;
657
658 vector = input_params.vector;
659 vcpu_mask = input_params.vcpu_mask;
660 }
661
662 if ( vector < 0x10 || vector > 0xff )
663 break;
664
665 for_each_vcpu ( currd, v )
666 {
667 if ( v->vcpu_id >= (sizeof(vcpu_mask) * 8) )
668 break;
669
670 if ( !(vcpu_mask & (1ul << v->vcpu_id)) )
671 continue;
672
673 vlapic_set_irq(vcpu_vlapic(v), vector, 0);
674 }
675
676 status = HV_STATUS_SUCCESS;
677 break;
678 }
679
680 default:
681 gprintk(XENLOG_WARNING, "unimplemented hypercall %04x\n",
682 input.call_code);
683 /* Fallthrough. */
684 case HVCALL_EXT_CALL_QUERY_CAPABILITIES:
685 /*
686 * This hypercall seems to be erroneously issued by Windows
687 * despite EnableExtendedHypercalls not being set in CPUID leaf 2.
688 * Given that return a status of 'invalid code' has not so far
689 * caused any problems it's not worth logging.
690 */
691 status = HV_STATUS_INVALID_HYPERCALL_CODE;
692 break;
693 }
694
695 out:
696 output.result = status;
697 switch (mode) {
698 case 8:
699 regs->rax = output.raw;
700 break;
701 default:
702 regs->rdx = output.raw >> 32;
703 regs->rax = (uint32_t)output.raw;
704 break;
705 }
706
707 return HVM_HCALL_completed;
708 }
709
viridian_dump_guest_page(const struct vcpu * v,const char * name,const struct viridian_page * vp)710 void viridian_dump_guest_page(const struct vcpu *v, const char *name,
711 const struct viridian_page *vp)
712 {
713 if ( !vp->msr.enabled )
714 return;
715
716 printk(XENLOG_G_INFO "%pv: VIRIDIAN %s: pfn: %lx\n",
717 v, name, (unsigned long)vp->msr.pfn);
718 }
719
viridian_map_guest_page(struct domain * d,struct viridian_page * vp)720 void viridian_map_guest_page(struct domain *d, struct viridian_page *vp)
721 {
722 unsigned long gmfn = vp->msr.pfn;
723 struct page_info *page;
724
725 if ( vp->ptr )
726 return;
727
728 page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
729 if ( !page )
730 goto fail;
731
732 if ( !get_page_type(page, PGT_writable_page) )
733 {
734 put_page(page);
735 goto fail;
736 }
737
738 vp->ptr = __map_domain_page_global(page);
739 if ( !vp->ptr )
740 {
741 put_page_and_type(page);
742 goto fail;
743 }
744
745 clear_page(vp->ptr);
746 return;
747
748 fail:
749 gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
750 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN));
751 }
752
viridian_unmap_guest_page(struct viridian_page * vp)753 void viridian_unmap_guest_page(struct viridian_page *vp)
754 {
755 struct page_info *page;
756
757 if ( !vp->ptr )
758 return;
759
760 page = mfn_to_page(domain_page_map_to_mfn(vp->ptr));
761
762 unmap_domain_page_global(vp->ptr);
763 vp->ptr = NULL;
764
765 put_page_and_type(page);
766 }
767
viridian_save_domain_ctxt(struct vcpu * v,hvm_domain_context_t * h)768 static int viridian_save_domain_ctxt(struct vcpu *v,
769 hvm_domain_context_t *h)
770 {
771 const struct domain *d = v->domain;
772 const struct viridian_domain *vd = d->arch.hvm.viridian;
773 struct hvm_viridian_domain_context ctxt = {
774 .hypercall_gpa = vd->hypercall_gpa.raw,
775 .guest_os_id = vd->guest_os_id.raw,
776 };
777
778 if ( !is_viridian_domain(d) )
779 return 0;
780
781 viridian_time_save_domain_ctxt(d, &ctxt);
782 viridian_synic_save_domain_ctxt(d, &ctxt);
783
784 return (hvm_save_entry(VIRIDIAN_DOMAIN, 0, h, &ctxt) != 0);
785 }
786
viridian_load_domain_ctxt(struct domain * d,hvm_domain_context_t * h)787 static int viridian_load_domain_ctxt(struct domain *d,
788 hvm_domain_context_t *h)
789 {
790 struct viridian_domain *vd = d->arch.hvm.viridian;
791 struct hvm_viridian_domain_context ctxt;
792
793 if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
794 return -EINVAL;
795
796 vd->hypercall_gpa.raw = ctxt.hypercall_gpa;
797 vd->guest_os_id.raw = ctxt.guest_os_id;
798
799 viridian_synic_load_domain_ctxt(d, &ctxt);
800 viridian_time_load_domain_ctxt(d, &ctxt);
801
802 return 0;
803 }
804
805 HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt,
806 viridian_load_domain_ctxt, 1, HVMSR_PER_DOM);
807
viridian_save_vcpu_ctxt(struct vcpu * v,hvm_domain_context_t * h)808 static int viridian_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
809 {
810 struct hvm_viridian_vcpu_context ctxt = {};
811
812 if ( !is_viridian_vcpu(v) )
813 return 0;
814
815 viridian_time_save_vcpu_ctxt(v, &ctxt);
816 viridian_synic_save_vcpu_ctxt(v, &ctxt);
817
818 return hvm_save_entry(VIRIDIAN_VCPU, v->vcpu_id, h, &ctxt);
819 }
820
viridian_load_vcpu_ctxt(struct domain * d,hvm_domain_context_t * h)821 static int viridian_load_vcpu_ctxt(struct domain *d,
822 hvm_domain_context_t *h)
823 {
824 unsigned int vcpuid = hvm_load_instance(h);
825 struct vcpu *v;
826 struct hvm_viridian_vcpu_context ctxt;
827
828 if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
829 {
830 dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n",
831 d->domain_id, vcpuid);
832 return -EINVAL;
833 }
834
835 if ( hvm_load_entry_zeroextend(VIRIDIAN_VCPU, h, &ctxt) != 0 )
836 return -EINVAL;
837
838 if ( memcmp(&ctxt._pad, zero_page, sizeof(ctxt._pad)) )
839 return -EINVAL;
840
841 viridian_synic_load_vcpu_ctxt(v, &ctxt);
842 viridian_time_load_vcpu_ctxt(v, &ctxt);
843
844 return 0;
845 }
846
847 HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_VCPU, viridian_save_vcpu_ctxt,
848 viridian_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
849
parse_viridian_version(const char * arg)850 static int __init parse_viridian_version(const char *arg)
851 {
852 const char *t;
853 unsigned int n[3];
854 unsigned int i = 0;
855
856 n[0] = viridian_major;
857 n[1] = viridian_minor;
858 n[2] = viridian_build;
859
860 do {
861 const char *e;
862
863 t = strchr(arg, ',');
864 if ( !t )
865 t = strchr(arg, '\0');
866
867 if ( *arg && *arg != ',' && i < 3 )
868 {
869 n[i] = simple_strtoul(arg, &e, 0);
870 if ( e != t )
871 break;
872 }
873
874 i++;
875 arg = t + 1;
876 } while ( *t );
877
878 if ( i != 3 )
879 return -EINVAL;
880
881 if ( ((typeof(viridian_major))n[0] != n[0]) ||
882 ((typeof(viridian_minor))n[1] != n[1]) ||
883 ((typeof(viridian_build))n[2] != n[2]) )
884 return -EINVAL;
885
886 viridian_major = n[0];
887 viridian_minor = n[1];
888 viridian_build = n[2];
889
890 printk("viridian-version = %#x,%#x,%#x\n",
891 viridian_major, viridian_minor, viridian_build);
892 return 0;
893 }
894 custom_param("viridian-version", parse_viridian_version);
895
896 /*
897 * Local variables:
898 * mode: C
899 * c-file-style: "BSD"
900 * c-basic-offset: 4
901 * tab-width: 4
902 * indent-tabs-mode: nil
903 * End:
904 */
905