1 /***************************************************************************
2 * synic.c
3 *
4 * An implementation of some interrupt related Viridian enlightenments.
5 * See Microsoft's Hypervisor Top Level Functional Specification.
6 * for more information.
7 */
8
9 #include <xen/domain_page.h>
10 #include <xen/hypercall.h>
11 #include <xen/sched.h>
12 #include <xen/version.h>
13
14 #include <asm/apic.h>
15 #include <asm/guest/hyperv-tlfs.h>
16 #include <asm/hvm/support.h>
17 #include <asm/hvm/vlapic.h>
18
19 #include "private.h"
20
21
build_assertions(void)22 void __init __maybe_unused build_assertions(void)
23 {
24 BUILD_BUG_ON(sizeof(struct hv_message) != HV_MESSAGE_SIZE);
25 }
26
viridian_apic_assist_set(const struct vcpu * v)27 void viridian_apic_assist_set(const struct vcpu *v)
28 {
29 struct viridian_vcpu *vv = v->arch.hvm.viridian;
30 struct hv_vp_assist_page *ptr = vv->vp_assist.ptr;
31
32 if ( !ptr )
33 return;
34
35 /*
36 * If there is already an assist pending then something has gone
37 * wrong and the VM will most likely hang so force a crash now
38 * to make the problem clear.
39 */
40 if ( vv->apic_assist_pending )
41 domain_crash(v->domain);
42
43 vv->apic_assist_pending = true;
44 ptr->apic_assist = 1;
45 }
46
viridian_apic_assist_completed(const struct vcpu * v)47 bool viridian_apic_assist_completed(const struct vcpu *v)
48 {
49 struct viridian_vcpu *vv = v->arch.hvm.viridian;
50 struct hv_vp_assist_page *ptr = vv->vp_assist.ptr;
51
52 if ( !ptr )
53 return false;
54
55 if ( vv->apic_assist_pending && !ptr->apic_assist )
56 {
57 /* An EOI has been avoided */
58 vv->apic_assist_pending = false;
59 return true;
60 }
61
62 return false;
63 }
64
viridian_apic_assist_clear(const struct vcpu * v)65 void viridian_apic_assist_clear(const struct vcpu *v)
66 {
67 struct viridian_vcpu *vv = v->arch.hvm.viridian;
68 struct hv_vp_assist_page *ptr = vv->vp_assist.ptr;
69
70 if ( !ptr )
71 return;
72
73 ptr->apic_assist = 0;
74 vv->apic_assist_pending = false;
75 }
76
viridian_synic_wrmsr(struct vcpu * v,uint32_t idx,uint64_t val)77 int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
78 {
79 struct viridian_vcpu *vv = v->arch.hvm.viridian;
80 struct domain *d = v->domain;
81
82 ASSERT(v == current || !v->is_running);
83
84 switch ( idx )
85 {
86 case HV_X64_MSR_EOI:
87 vlapic_EOI_set(vcpu_vlapic(v));
88 break;
89
90 case HV_X64_MSR_ICR:
91 vlapic_reg_write(v, APIC_ICR2, val >> 32);
92 vlapic_reg_write(v, APIC_ICR, val);
93 break;
94
95 case HV_X64_MSR_TPR:
96 vlapic_reg_write(v, APIC_TASKPRI, val);
97 break;
98
99 case HV_X64_MSR_VP_ASSIST_PAGE:
100 /* release any previous mapping */
101 viridian_unmap_guest_page(&vv->vp_assist);
102 vv->vp_assist.msr.raw = val;
103 viridian_dump_guest_page(v, "VP_ASSIST", &vv->vp_assist);
104 if ( vv->vp_assist.msr.enabled )
105 viridian_map_guest_page(d, &vv->vp_assist);
106 break;
107
108 case HV_X64_MSR_SCONTROL:
109 if ( !(viridian_feature_mask(d) & HVMPV_synic) )
110 return X86EMUL_EXCEPTION;
111
112 vv->scontrol = val;
113 break;
114
115 case HV_X64_MSR_SVERSION:
116 return X86EMUL_EXCEPTION;
117
118 case HV_X64_MSR_SIEFP:
119 if ( !(viridian_feature_mask(d) & HVMPV_synic) )
120 return X86EMUL_EXCEPTION;
121
122 vv->siefp = val;
123 break;
124
125 case HV_X64_MSR_SIMP:
126 if ( !(viridian_feature_mask(d) & HVMPV_synic) )
127 return X86EMUL_EXCEPTION;
128
129 viridian_unmap_guest_page(&vv->simp);
130 vv->simp.msr.raw = val;
131 viridian_dump_guest_page(v, "SIMP", &vv->simp);
132 if ( vv->simp.msr.enabled )
133 viridian_map_guest_page(d, &vv->simp);
134 break;
135
136 case HV_X64_MSR_EOM:
137 if ( !(viridian_feature_mask(d) & HVMPV_synic) )
138 return X86EMUL_EXCEPTION;
139
140 vv->msg_pending = 0;
141 break;
142
143 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
144 {
145 unsigned int sintx = idx - HV_X64_MSR_SINT0;
146 union hv_synic_sint new, *vs =
147 &array_access_nospec(vv->sint, sintx);
148 uint8_t vector;
149
150 if ( !(viridian_feature_mask(d) & HVMPV_synic) )
151 return X86EMUL_EXCEPTION;
152
153 /* Vectors must be in the range 0x10-0xff inclusive */
154 new.as_uint64 = val;
155 if ( new.vector < 0x10 )
156 return X86EMUL_EXCEPTION;
157
158 /*
159 * Invalidate any previous mapping by setting an out-of-range
160 * index before setting the new mapping.
161 */
162 vector = vs->vector;
163 vv->vector_to_sintx[vector] = ARRAY_SIZE(vv->sint);
164
165 vector = new.vector;
166 vv->vector_to_sintx[vector] = sintx;
167
168 printk(XENLOG_G_INFO "%pv: VIRIDIAN SINT%u: vector: %x\n", v, sintx,
169 vector);
170
171 if ( new.polling )
172 __clear_bit(sintx, &vv->msg_pending);
173
174 *vs = new;
175 break;
176 }
177
178 default:
179 gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016"PRIx64")\n",
180 __func__, idx, val);
181 return X86EMUL_EXCEPTION;
182 }
183
184 return X86EMUL_OKAY;
185 }
186
viridian_synic_rdmsr(const struct vcpu * v,uint32_t idx,uint64_t * val)187 int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
188 {
189 const struct viridian_vcpu *vv = v->arch.hvm.viridian;
190 const struct domain *d = v->domain;
191
192 switch ( idx )
193 {
194 case HV_X64_MSR_EOI:
195 return X86EMUL_EXCEPTION;
196
197 case HV_X64_MSR_ICR:
198 {
199 uint32_t icr2 = vlapic_get_reg(vcpu_vlapic(v), APIC_ICR2);
200 uint32_t icr = vlapic_get_reg(vcpu_vlapic(v), APIC_ICR);
201
202 *val = ((uint64_t)icr2 << 32) | icr;
203 break;
204 }
205
206 case HV_X64_MSR_TPR:
207 *val = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI);
208 break;
209
210 case HV_X64_MSR_VP_ASSIST_PAGE:
211 *val = vv->vp_assist.msr.raw;
212 break;
213
214 case HV_X64_MSR_SCONTROL:
215 if ( !(viridian_feature_mask(d) & HVMPV_synic) )
216 return X86EMUL_EXCEPTION;
217
218 *val = vv->scontrol;
219 break;
220
221 case HV_X64_MSR_SVERSION:
222 if ( !(viridian_feature_mask(d) & HVMPV_synic) )
223 return X86EMUL_EXCEPTION;
224
225 /*
226 * The specification says that the version number is 0x00000001
227 * and should be in the lower 32-bits of the MSR, while the
228 * upper 32-bits are reserved... but it doesn't say what they
229 * should be set to. Assume everything but the bottom bit
230 * should be zero.
231 */
232 *val = 1ul;
233 break;
234
235 case HV_X64_MSR_SIEFP:
236 if ( !(viridian_feature_mask(d) & HVMPV_synic) )
237 return X86EMUL_EXCEPTION;
238
239 *val = vv->siefp;
240 break;
241
242 case HV_X64_MSR_SIMP:
243 if ( !(viridian_feature_mask(d) & HVMPV_synic) )
244 return X86EMUL_EXCEPTION;
245
246 *val = vv->simp.msr.raw;
247 break;
248
249 case HV_X64_MSR_EOM:
250 if ( !(viridian_feature_mask(d) & HVMPV_synic) )
251 return X86EMUL_EXCEPTION;
252
253 *val = 0;
254 break;
255
256 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
257 {
258 unsigned int sintx = idx - HV_X64_MSR_SINT0;
259 const union hv_synic_sint *vs =
260 &array_access_nospec(vv->sint, sintx);
261
262 if ( !(viridian_feature_mask(d) & HVMPV_synic) )
263 return X86EMUL_EXCEPTION;
264
265 *val = vs->as_uint64;
266 break;
267 }
268
269 default:
270 gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x\n", __func__, idx);
271 return X86EMUL_EXCEPTION;
272 }
273
274 return X86EMUL_OKAY;
275 }
276
viridian_synic_vcpu_init(const struct vcpu * v)277 int viridian_synic_vcpu_init(const struct vcpu *v)
278 {
279 struct viridian_vcpu *vv = v->arch.hvm.viridian;
280 unsigned int i;
281
282 /*
283 * The specification says that all synthetic interrupts must be
284 * initally masked.
285 */
286 for ( i = 0; i < ARRAY_SIZE(vv->sint); i++ )
287 vv->sint[i].masked = 1;
288
289 /* Initialize the mapping array with invalid values */
290 for ( i = 0; i < ARRAY_SIZE(vv->vector_to_sintx); i++ )
291 vv->vector_to_sintx[i] = ARRAY_SIZE(vv->sint);
292
293 return 0;
294 }
295
viridian_synic_domain_init(const struct domain * d)296 int viridian_synic_domain_init(const struct domain *d)
297 {
298 return 0;
299 }
300
viridian_synic_vcpu_deinit(const struct vcpu * v)301 void viridian_synic_vcpu_deinit(const struct vcpu *v)
302 {
303 struct viridian_vcpu *vv = v->arch.hvm.viridian;
304
305 viridian_unmap_guest_page(&vv->vp_assist);
306 viridian_unmap_guest_page(&vv->simp);
307 }
308
viridian_synic_domain_deinit(const struct domain * d)309 void viridian_synic_domain_deinit(const struct domain *d)
310 {
311 }
312
viridian_synic_poll(struct vcpu * v)313 void viridian_synic_poll(struct vcpu *v)
314 {
315 viridian_time_poll_timers(v);
316 }
317
viridian_synic_deliver_timer_msg(struct vcpu * v,unsigned int sintx,unsigned int index,uint64_t expiration,uint64_t delivery)318 bool viridian_synic_deliver_timer_msg(struct vcpu *v, unsigned int sintx,
319 unsigned int index,
320 uint64_t expiration,
321 uint64_t delivery)
322 {
323 struct viridian_vcpu *vv = v->arch.hvm.viridian;
324 const union hv_synic_sint *vs = &vv->sint[sintx];
325 struct hv_message *msg = vv->simp.ptr;
326 struct {
327 uint32_t TimerIndex;
328 uint32_t Reserved;
329 uint64_t ExpirationTime;
330 uint64_t DeliveryTime;
331 } payload = {
332 .TimerIndex = index,
333 .ExpirationTime = expiration,
334 .DeliveryTime = delivery,
335 };
336
337 if ( test_bit(sintx, &vv->msg_pending) )
338 return false;
339
340 /*
341 * To avoid using an atomic test-and-set, and barrier before calling
342 * vlapic_set_irq(), this function must be called in context of the
343 * vcpu receiving the message.
344 */
345 ASSERT(v == current);
346
347 msg += sintx;
348
349 if ( msg->header.message_type != HVMSG_NONE )
350 {
351 msg->header.message_flags.msg_pending = 1;
352 __set_bit(sintx, &vv->msg_pending);
353 return false;
354 }
355
356 msg->header.message_type = HVMSG_TIMER_EXPIRED;
357 msg->header.message_flags.msg_pending = 0;
358 msg->header.payload_size = sizeof(payload);
359
360 BUILD_BUG_ON(sizeof(payload) > sizeof(msg->u.payload));
361 memcpy(msg->u.payload, &payload, sizeof(payload));
362
363 if ( !vs->masked )
364 vlapic_set_irq(vcpu_vlapic(v), vs->vector, 0);
365
366 return true;
367 }
368
viridian_synic_is_auto_eoi_sint(const struct vcpu * v,unsigned int vector)369 bool viridian_synic_is_auto_eoi_sint(const struct vcpu *v,
370 unsigned int vector)
371 {
372 const struct viridian_vcpu *vv = v->arch.hvm.viridian;
373 unsigned int sintx = vv->vector_to_sintx[vector];
374 const union hv_synic_sint *vs =
375 &array_access_nospec(vv->sint, sintx);
376
377 if ( sintx >= ARRAY_SIZE(vv->sint) )
378 return false;
379
380 return vs->auto_eoi;
381 }
382
viridian_synic_ack_sint(const struct vcpu * v,unsigned int vector)383 void viridian_synic_ack_sint(const struct vcpu *v, unsigned int vector)
384 {
385 struct viridian_vcpu *vv = v->arch.hvm.viridian;
386 unsigned int sintx = vv->vector_to_sintx[vector];
387
388 ASSERT(v == current);
389
390 if ( sintx < ARRAY_SIZE(vv->sint) )
391 __clear_bit(array_index_nospec(sintx, ARRAY_SIZE(vv->sint)),
392 &vv->msg_pending);
393 }
394
viridian_synic_save_vcpu_ctxt(const struct vcpu * v,struct hvm_viridian_vcpu_context * ctxt)395 void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
396 struct hvm_viridian_vcpu_context *ctxt)
397 {
398 const struct viridian_vcpu *vv = v->arch.hvm.viridian;
399 unsigned int i;
400
401 BUILD_BUG_ON(ARRAY_SIZE(vv->sint) != ARRAY_SIZE(ctxt->sint_msr));
402
403 for ( i = 0; i < ARRAY_SIZE(vv->sint); i++ )
404 ctxt->sint_msr[i] = vv->sint[i].as_uint64;
405
406 ctxt->simp_msr = vv->simp.msr.raw;
407
408 ctxt->apic_assist_pending = vv->apic_assist_pending;
409 ctxt->vp_assist_msr = vv->vp_assist.msr.raw;
410 }
411
viridian_synic_load_vcpu_ctxt(struct vcpu * v,const struct hvm_viridian_vcpu_context * ctxt)412 void viridian_synic_load_vcpu_ctxt(
413 struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
414 {
415 struct viridian_vcpu *vv = v->arch.hvm.viridian;
416 struct domain *d = v->domain;
417 unsigned int i;
418
419 vv->vp_assist.msr.raw = ctxt->vp_assist_msr;
420 if ( vv->vp_assist.msr.enabled )
421 viridian_map_guest_page(d, &vv->vp_assist);
422
423 vv->apic_assist_pending = ctxt->apic_assist_pending;
424
425 vv->simp.msr.raw = ctxt->simp_msr;
426 if ( vv->simp.msr.enabled )
427 viridian_map_guest_page(d, &vv->simp);
428
429 for ( i = 0; i < ARRAY_SIZE(vv->sint); i++ )
430 {
431 uint8_t vector;
432
433 vv->sint[i].as_uint64 = ctxt->sint_msr[i];
434
435 vector = vv->sint[i].vector;
436 if ( vector < 0x10 )
437 continue;
438
439 vv->vector_to_sintx[vector] = i;
440 }
441 }
442
viridian_synic_save_domain_ctxt(const struct domain * d,struct hvm_viridian_domain_context * ctxt)443 void viridian_synic_save_domain_ctxt(
444 const struct domain *d, struct hvm_viridian_domain_context *ctxt)
445 {
446 }
447
viridian_synic_load_domain_ctxt(struct domain * d,const struct hvm_viridian_domain_context * ctxt)448 void viridian_synic_load_domain_ctxt(
449 struct domain *d, const struct hvm_viridian_domain_context *ctxt)
450 {
451 }
452
453 /*
454 * Local variables:
455 * mode: C
456 * c-file-style: "BSD"
457 * c-basic-offset: 4
458 * tab-width: 4
459 * indent-tabs-mode: nil
460 * End:
461 */
462