1 /*
2 * intr.c: Interrupt handling for SVM.
3 * Copyright (c) 2005, AMD Inc.
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <xen/init.h>
20 #include <xen/mm.h>
21 #include <xen/lib.h>
22 #include <xen/trace.h>
23 #include <xen/errno.h>
24 #include <asm/cpufeature.h>
25 #include <asm/processor.h>
26 #include <asm/msr.h>
27 #include <asm/paging.h>
28 #include <asm/hvm/hvm.h>
29 #include <asm/hvm/io.h>
30 #include <asm/hvm/support.h>
31 #include <asm/hvm/vlapic.h>
32 #include <asm/hvm/svm/svm.h>
33 #include <asm/hvm/svm/intr.h>
34 #include <asm/hvm/nestedhvm.h> /* for nestedhvm_vcpu_in_guestmode */
35 #include <asm/vm_event.h>
36 #include <xen/event.h>
37 #include <xen/kernel.h>
38 #include <public/hvm/ioreq.h>
39 #include <xen/domain_page.h>
40 #include <asm/hvm/trace.h>
41
svm_inject_nmi(struct vcpu * v)42 static void svm_inject_nmi(struct vcpu *v)
43 {
44 struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
45 u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
46 intinfo_t event;
47
48 event.raw = 0;
49 event.v = true;
50 event.type = X86_EVENTTYPE_NMI;
51 event.vector = TRAP_nmi;
52
53 ASSERT(!vmcb->event_inj.v);
54 vmcb->event_inj = event;
55
56 /*
57 * SVM does not virtualise the NMI mask, so we emulate it by intercepting
58 * the next IRET and blocking NMI injection until the intercept triggers.
59 */
60 vmcb_set_general1_intercepts(
61 vmcb, general1_intercepts | GENERAL1_INTERCEPT_IRET);
62 }
63
svm_inject_extint(struct vcpu * v,int vector)64 static void svm_inject_extint(struct vcpu *v, int vector)
65 {
66 struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
67 intinfo_t event;
68
69 event.raw = 0;
70 event.v = true;
71 event.type = X86_EVENTTYPE_EXT_INTR;
72 event.vector = vector;
73
74 ASSERT(!vmcb->event_inj.v);
75 vmcb->event_inj = event;
76 }
77
svm_enable_intr_window(struct vcpu * v,struct hvm_intack intack)78 static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack)
79 {
80 struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
81 uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb);
82 vintr_t intr;
83
84 ASSERT(intack.source != hvm_intsrc_none);
85
86 if ( nestedhvm_enabled(v->domain) ) {
87 struct nestedvcpu *nv = &vcpu_nestedhvm(v);
88 if ( nv->nv_vmentry_pending ) {
89 struct vmcb_struct *gvmcb = nv->nv_vvmcx;
90
91 /* check if l1 guest injects interrupt into l2 guest via vintr.
92 * return here or l2 guest looses interrupts, otherwise.
93 */
94 ASSERT(gvmcb != NULL);
95 intr = vmcb_get_vintr(gvmcb);
96 if ( intr.fields.irq )
97 return;
98 }
99 }
100
101 HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
102 vmcb->event_inj.v ? vmcb->event_inj.vector : -1);
103
104 /*
105 * Create a dummy virtual interrupt to intercept as soon as the
106 * guest can accept the real interrupt.
107 *
108 * TODO: Better NMI handling. We need a way to skip a MOV SS interrupt
109 * shadow. This is hard to do without hardware support. Also we should
110 * not be waiting for EFLAGS.IF to become 1.
111 */
112
113 /*
114 * NMI-blocking window is handled by IRET interception. We should not
115 * inject a VINTR in this case as VINTR is unaware of NMI-blocking and
116 * hence we can enter an endless loop (VINTR intercept fires, yet
117 * hvm_interrupt_blocked() still indicates NMI-blocking is active, so
118 * we inject a VINTR, ...).
119 */
120 if ( (intack.source == hvm_intsrc_nmi) &&
121 (general1_intercepts & GENERAL1_INTERCEPT_IRET) )
122 return;
123
124 intr = vmcb_get_vintr(vmcb);
125 intr.fields.irq = 1;
126 intr.fields.vector = 0;
127 intr.fields.prio = intack.vector >> 4;
128 intr.fields.ign_tpr = (intack.source != hvm_intsrc_lapic);
129 vmcb_set_vintr(vmcb, intr);
130 vmcb_set_general1_intercepts(
131 vmcb, general1_intercepts | GENERAL1_INTERCEPT_VINTR);
132 }
133
svm_intr_assist(void)134 void svm_intr_assist(void)
135 {
136 struct vcpu *v = current;
137 struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
138 struct hvm_intack intack;
139 enum hvm_intblk intblk;
140
141 /* Block event injection while handling a sync vm_event. */
142 if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event )
143 return;
144
145 /* Crank the handle on interrupt state. */
146 pt_update_irq(v);
147
148 do {
149 intack = hvm_vcpu_has_pending_irq(v);
150 if ( likely(intack.source == hvm_intsrc_none) )
151 return;
152
153 intblk = hvm_interrupt_blocked(v, intack);
154 if ( intblk == hvm_intblk_svm_gif ) {
155 ASSERT(nestedhvm_enabled(v->domain));
156 return;
157 }
158
159 /* Interrupts for the nested guest are already
160 * in the vmcb.
161 */
162 if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) )
163 {
164 int rc;
165
166 /* l2 guest was running when an interrupt for
167 * the l1 guest occurred.
168 */
169 rc = nestedsvm_vcpu_interrupt(v, intack);
170 switch (rc) {
171 case NSVM_INTR_NOTINTERCEPTED:
172 /* Inject interrupt into 2nd level guest directly. */
173 break;
174 case NSVM_INTR_NOTHANDLED:
175 case NSVM_INTR_FORCEVMEXIT:
176 return;
177 case NSVM_INTR_MASKED:
178 /* Guest already enabled an interrupt window. */
179 return;
180 default:
181 panic("%s: nestedsvm_vcpu_interrupt can't handle value %#x\n",
182 __func__, rc);
183 }
184 }
185
186 /*
187 * Pending IRQs must be delayed if:
188 * 1. An event is already pending. This is despite the fact that SVM
189 * provides a VINTR delivery method quite separate from the EVENTINJ
190 * mechanism. The event delivery can arbitrarily delay the injection
191 * of the vintr (for example, if the exception is handled via an
192 * interrupt gate, hence zeroing RFLAGS.IF). In the meantime:
193 * - the vTPR could be modified upwards, so we need to wait until
194 * the exception is delivered before we can safely decide that an
195 * interrupt is deliverable; and
196 * - the guest might look at the APIC/PIC state, so we ought not to
197 * have cleared the interrupt out of the IRR.
198 * 2. The IRQ is masked.
199 */
200 if ( unlikely(vmcb->event_inj.v) || intblk )
201 {
202 svm_enable_intr_window(v, intack);
203 return;
204 }
205
206 intack = hvm_vcpu_ack_pending_irq(v, intack);
207 } while ( intack.source == hvm_intsrc_none );
208
209 if ( intack.source == hvm_intsrc_nmi )
210 {
211 svm_inject_nmi(v);
212 }
213 else
214 {
215 HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
216 svm_inject_extint(v, intack.vector);
217 pt_intr_post(v, intack);
218 }
219
220 /* Is there another IRQ to queue up behind this one? */
221 intack = hvm_vcpu_has_pending_irq(v);
222 if ( unlikely(intack.source != hvm_intsrc_none) )
223 svm_enable_intr_window(v, intack);
224 }
225
226 /*
227 * Local variables:
228 * mode: C
229 * c-file-style: "BSD"
230 * c-basic-offset: 4
231 * tab-width: 4
232 * indent-tabs-mode: nil
233 * End:
234 */
235