1 /*
2 * i8259 interrupt controller emulation
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2005 Intel Corperation
6 * Copyright (c) 2006 Keir Fraser, XenSource Inc.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
27 #include <xen/types.h>
28 #include <xen/event.h>
29 #include <xen/lib.h>
30 #include <xen/errno.h>
31 #include <xen/sched.h>
32 #include <xen/trace.h>
33 #include <asm/hvm/hvm.h>
34 #include <asm/hvm/io.h>
35 #include <asm/hvm/support.h>
36
37 #define vpic_domain(v) (container_of((v), struct domain, \
38 arch.hvm.vpic[!vpic->is_master]))
39 #define __vpic_lock(v) &container_of((v), struct hvm_domain, \
40 vpic[!(v)->is_master])->irq_lock
41 #define vpic_lock(v) spin_lock(__vpic_lock(v))
42 #define vpic_unlock(v) spin_unlock(__vpic_lock(v))
43 #define vpic_is_locked(v) spin_is_locked(__vpic_lock(v))
44 #define vpic_elcr_mask(v) (vpic->is_master ? (uint8_t)0xf8 : (uint8_t)0xde);
45
46 /* Return the highest priority found in mask. Return 8 if none. */
47 #define VPIC_PRIO_NONE 8
vpic_get_priority(struct hvm_hw_vpic * vpic,uint8_t mask)48 static int vpic_get_priority(struct hvm_hw_vpic *vpic, uint8_t mask)
49 {
50 int prio;
51
52 ASSERT(vpic_is_locked(vpic));
53
54 if ( mask == 0 )
55 return VPIC_PRIO_NONE;
56
57 /* prio = ffs(mask ROR vpic->priority_add); */
58 asm ( "ror %%cl,%b1 ; rep; bsf %1,%0"
59 : "=r" (prio) : "q" ((uint32_t)mask), "c" (vpic->priority_add) );
60 return prio;
61 }
62
63 /* Return the PIC's highest priority pending interrupt. Return -1 if none. */
vpic_get_highest_priority_irq(struct hvm_hw_vpic * vpic)64 static int vpic_get_highest_priority_irq(struct hvm_hw_vpic *vpic)
65 {
66 int cur_priority, priority, irq;
67 uint8_t mask;
68
69 ASSERT(vpic_is_locked(vpic));
70
71 mask = vpic->irr & ~vpic->imr;
72 priority = vpic_get_priority(vpic, mask);
73 if ( priority == VPIC_PRIO_NONE )
74 return -1;
75
76 irq = (priority + vpic->priority_add) & 7;
77
78 /*
79 * Compute current priority. If special fully nested mode on the master,
80 * the IRQ coming from the slave is not taken into account for the
81 * priority computation. In special mask mode, masked interrupts do not
82 * block lower-priority interrupts even if their IS bit is set.
83 */
84 mask = vpic->isr;
85 if ( vpic->special_fully_nested_mode && vpic->is_master && (irq == 2) )
86 mask &= ~(1 << 2);
87 if ( vpic->special_mask_mode )
88 mask &= ~vpic->imr;
89 cur_priority = vpic_get_priority(vpic, mask);
90
91 /* If a higher priority is found then an irq should be generated. */
92 return (priority < cur_priority) ? irq : -1;
93 }
94
vpic_update_int_output(struct hvm_hw_vpic * vpic)95 static void vpic_update_int_output(struct hvm_hw_vpic *vpic)
96 {
97 int irq;
98
99 ASSERT(vpic_is_locked(vpic));
100
101 irq = vpic_get_highest_priority_irq(vpic);
102 TRACE_3D(TRC_HVM_EMUL_PIC_INT_OUTPUT, vpic->int_output, vpic->is_master,
103 irq);
104 if ( vpic->int_output == (irq >= 0) )
105 return;
106
107 /* INT line transition L->H or H->L. */
108 vpic->int_output = !vpic->int_output;
109
110 if ( vpic->int_output )
111 {
112 if ( vpic->is_master )
113 {
114 /* Master INT line is connected in Virtual Wire Mode. */
115 struct vcpu *v = vpic_domain(vpic)->arch.hvm.i8259_target;
116
117 if ( v != NULL )
118 {
119 TRACE_1D(TRC_HVM_EMUL_PIC_KICK, irq);
120 vcpu_kick(v);
121 }
122 }
123 else
124 {
125 /* Assert slave line in master PIC. */
126 (--vpic)->irr |= 1 << 2;
127 vpic_update_int_output(vpic);
128 }
129 }
130 else if ( !vpic->is_master )
131 {
132 /* Clear slave line in master PIC. */
133 (--vpic)->irr &= ~(1 << 2);
134 vpic_update_int_output(vpic);
135 }
136 }
137
__vpic_intack(struct hvm_hw_vpic * vpic,int irq)138 static void __vpic_intack(struct hvm_hw_vpic *vpic, int irq)
139 {
140 uint8_t mask = 1 << irq;
141
142 ASSERT(vpic_is_locked(vpic));
143
144 TRACE_2D(TRC_HVM_EMUL_PIC_INTACK, vpic->is_master, irq);
145 /* Edge-triggered: clear the IRR (forget the edge). */
146 if ( !(vpic->elcr & mask) )
147 vpic->irr &= ~mask;
148
149 if ( !vpic->auto_eoi )
150 vpic->isr |= mask;
151 else if ( vpic->rotate_on_auto_eoi )
152 vpic->priority_add = (irq + 1) & 7;
153
154 vpic_update_int_output(vpic);
155 }
156
vpic_intack(struct hvm_hw_vpic * vpic)157 static int vpic_intack(struct hvm_hw_vpic *vpic)
158 {
159 int irq = -1;
160
161 vpic_lock(vpic);
162
163 if ( !vpic->int_output )
164 goto out;
165
166 irq = vpic_get_highest_priority_irq(vpic);
167 BUG_ON(irq < 0);
168 __vpic_intack(vpic, irq);
169
170 if ( (irq == 2) && vpic->is_master )
171 {
172 vpic++; /* Slave PIC */
173 irq = vpic_get_highest_priority_irq(vpic);
174 BUG_ON(irq < 0);
175 __vpic_intack(vpic, irq);
176 irq += 8;
177 }
178
179 out:
180 vpic_unlock(vpic);
181 return irq;
182 }
183
vpic_ioport_write(struct hvm_hw_vpic * vpic,uint32_t addr,uint32_t val)184 static void vpic_ioport_write(
185 struct hvm_hw_vpic *vpic, uint32_t addr, uint32_t val)
186 {
187 int priority, cmd, irq;
188 uint8_t mask, unmasked = 0;
189
190 vpic_lock(vpic);
191
192 if ( (addr & 1) == 0 )
193 {
194 if ( val & 0x10 )
195 {
196 /* ICW1 */
197 /* Clear edge-sensing logic. */
198 vpic->irr &= vpic->elcr;
199
200 unmasked = vpic->imr;
201 /* No interrupts masked or in service. */
202 vpic->imr = vpic->isr = 0;
203
204 /* IR7 is lowest priority. */
205 vpic->priority_add = 0;
206 vpic->rotate_on_auto_eoi = 0;
207
208 vpic->special_mask_mode = 0;
209 vpic->readsel_isr = 0;
210 vpic->poll = 0;
211
212 if ( !(val & 1) )
213 {
214 /* NO ICW4: ICW4 features are cleared. */
215 vpic->auto_eoi = 0;
216 vpic->special_fully_nested_mode = 0;
217 }
218
219 vpic->init_state = ((val & 3) << 2) | 1;
220 }
221 else if ( val & 0x08 )
222 {
223 /* OCW3 */
224 if ( val & 0x04 )
225 vpic->poll = 1;
226 if ( val & 0x02 )
227 vpic->readsel_isr = val & 1;
228 if ( val & 0x40 )
229 vpic->special_mask_mode = (val >> 5) & 1;
230 }
231 else
232 {
233 /* OCW2 */
234 cmd = val >> 5;
235 switch ( cmd )
236 {
237 case 0: /* Rotate in AEOI Mode (Clear) */
238 case 4: /* Rotate in AEOI Mode (Set) */
239 vpic->rotate_on_auto_eoi = cmd >> 2;
240 break;
241 case 1: /* Non-Specific EOI */
242 case 5: /* Non-Specific EOI & Rotate */
243 mask = vpic->isr;
244 if ( vpic->special_mask_mode )
245 mask &= ~vpic->imr; /* SMM: ignore masked IRs. */
246 priority = vpic_get_priority(vpic, mask);
247 if ( priority == VPIC_PRIO_NONE )
248 break;
249 irq = (priority + vpic->priority_add) & 7;
250 vpic->isr &= ~(1 << irq);
251 if ( cmd == 5 )
252 vpic->priority_add = (irq + 1) & 7;
253 break;
254 case 3: /* Specific EOI */
255 case 7: /* Specific EOI & Rotate */
256 irq = val & 7;
257 vpic->isr &= ~(1 << irq);
258 if ( cmd == 7 )
259 vpic->priority_add = (irq + 1) & 7;
260 /* Release lock and EOI the physical interrupt (if any). */
261 vpic_update_int_output(vpic);
262 vpic_unlock(vpic);
263 hvm_dpci_eoi(current->domain,
264 hvm_isa_irq_to_gsi((addr >> 7) ? (irq|8) : irq),
265 NULL);
266 return; /* bail immediately */
267 case 6: /* Set Priority */
268 vpic->priority_add = (val + 1) & 7;
269 break;
270 }
271 }
272 }
273 else
274 {
275 switch ( vpic->init_state & 3 )
276 {
277 case 0:
278 /* OCW1 */
279 unmasked = vpic->imr & (~val);
280 vpic->imr = val;
281 break;
282 case 1:
283 /* ICW2 */
284 vpic->irq_base = val & 0xf8;
285 vpic->init_state++;
286 if ( !(vpic->init_state & 8) )
287 break; /* CASCADE mode: wait for write to ICW3. */
288 /* SNGL mode: fall through (no ICW3). */
289 case 2:
290 /* ICW3 */
291 vpic->init_state++;
292 if ( !(vpic->init_state & 4) )
293 vpic->init_state = 0; /* No ICW4: init done */
294 break;
295 case 3:
296 /* ICW4 */
297 vpic->special_fully_nested_mode = (val >> 4) & 1;
298 vpic->auto_eoi = (val >> 1) & 1;
299 vpic->init_state = 0;
300 break;
301 }
302 }
303
304 vpic_update_int_output(vpic);
305
306 vpic_unlock(vpic);
307
308 if ( unmasked )
309 pt_may_unmask_irq(vpic_domain(vpic), NULL);
310 }
311
vpic_ioport_read(struct hvm_hw_vpic * vpic,uint32_t addr)312 static uint32_t vpic_ioport_read(struct hvm_hw_vpic *vpic, uint32_t addr)
313 {
314 if ( vpic->poll )
315 {
316 vpic->poll = 0;
317 return vpic_intack(vpic);
318 }
319
320 if ( (addr & 1) == 0 )
321 return (vpic->readsel_isr ? vpic->isr : vpic->irr);
322
323 return vpic->imr;
324 }
325
vpic_intercept_pic_io(int dir,unsigned int port,unsigned int bytes,uint32_t * val)326 static int vpic_intercept_pic_io(
327 int dir, unsigned int port, unsigned int bytes, uint32_t *val)
328 {
329 struct hvm_hw_vpic *vpic;
330
331 if ( bytes != 1 )
332 {
333 gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", bytes);
334 *val = ~0;
335 return X86EMUL_OKAY;
336 }
337
338 vpic = ¤t->domain->arch.hvm.vpic[!!(port & 0x80)];
339
340 if ( dir == IOREQ_WRITE )
341 vpic_ioport_write(vpic, port, (uint8_t)*val);
342 else
343 *val = (uint8_t)vpic_ioport_read(vpic, port);
344
345 return X86EMUL_OKAY;
346 }
347
vpic_intercept_elcr_io(int dir,unsigned int port,unsigned int bytes,uint32_t * val)348 static int vpic_intercept_elcr_io(
349 int dir, unsigned int port, unsigned int bytes, uint32_t *val)
350 {
351 struct hvm_hw_vpic *vpic;
352 uint32_t data;
353
354 BUG_ON(bytes != 1);
355
356 vpic = ¤t->domain->arch.hvm.vpic[port & 1];
357
358 if ( dir == IOREQ_WRITE )
359 {
360 /* Some IRs are always edge trig. Slave IR is always level trig. */
361 data = *val & vpic_elcr_mask(vpic);
362 if ( vpic->is_master )
363 data |= 1 << 2;
364 vpic->elcr = data;
365 }
366 else
367 {
368 /* Reader should not see hardcoded level-triggered slave IR. */
369 *val = vpic->elcr & vpic_elcr_mask(vpic);
370 }
371
372 return X86EMUL_OKAY;
373 }
374
vpic_save(struct vcpu * v,hvm_domain_context_t * h)375 static int vpic_save(struct vcpu *v, hvm_domain_context_t *h)
376 {
377 struct domain *d = v->domain;
378 struct hvm_hw_vpic *s;
379 int i;
380
381 if ( !has_vpic(d) )
382 return 0;
383
384 /* Save the state of both PICs */
385 for ( i = 0; i < 2 ; i++ )
386 {
387 s = &d->arch.hvm.vpic[i];
388 if ( hvm_save_entry(PIC, i, h, s) )
389 return 1;
390 }
391
392 return 0;
393 }
394
vpic_load(struct domain * d,hvm_domain_context_t * h)395 static int vpic_load(struct domain *d, hvm_domain_context_t *h)
396 {
397 struct hvm_hw_vpic *s;
398 unsigned int inst = hvm_load_instance(h);
399
400 if ( !has_vpic(d) )
401 return -ENODEV;
402
403 /* Which PIC is this? */
404 if ( inst > 1 )
405 return -EINVAL;
406 s = &d->arch.hvm.vpic[inst];
407
408 /* Load the state */
409 if ( hvm_load_entry(PIC, h, s) != 0 )
410 return -EINVAL;
411
412 return 0;
413 }
414
415 HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load, 2, HVMSR_PER_DOM);
416
vpic_reset(struct domain * d)417 void vpic_reset(struct domain *d)
418 {
419 struct hvm_hw_vpic *vpic;
420
421 if ( !has_vpic(d) )
422 return;
423
424 /* Master PIC. */
425 vpic = &d->arch.hvm.vpic[0];
426 memset(vpic, 0, sizeof(*vpic));
427 vpic->is_master = 1;
428 vpic->elcr = 1 << 2;
429
430 /* Slave PIC. */
431 vpic++;
432 memset(vpic, 0, sizeof(*vpic));
433 }
434
vpic_init(struct domain * d)435 void vpic_init(struct domain *d)
436 {
437 if ( !has_vpic(d) )
438 return;
439
440 vpic_reset(d);
441
442 register_portio_handler(d, 0x20, 2, vpic_intercept_pic_io);
443 register_portio_handler(d, 0xa0, 2, vpic_intercept_pic_io);
444
445 register_portio_handler(d, 0x4d0, 1, vpic_intercept_elcr_io);
446 register_portio_handler(d, 0x4d1, 1, vpic_intercept_elcr_io);
447 }
448
vpic_irq_positive_edge(struct domain * d,int irq)449 void vpic_irq_positive_edge(struct domain *d, int irq)
450 {
451 struct hvm_hw_vpic *vpic = &d->arch.hvm.vpic[!!(irq & 8)];
452 uint8_t mask = 1 << (irq & 7);
453
454 ASSERT(has_vpic(d));
455 ASSERT(irq <= 15);
456 ASSERT(vpic_is_locked(vpic));
457
458 TRACE_1D(TRC_HVM_EMUL_PIC_POSEDGE, irq);
459 if ( irq == 2 )
460 return;
461
462 vpic->irr |= mask;
463 if ( !(vpic->imr & mask) )
464 vpic_update_int_output(vpic);
465 }
466
vpic_irq_negative_edge(struct domain * d,int irq)467 void vpic_irq_negative_edge(struct domain *d, int irq)
468 {
469 struct hvm_hw_vpic *vpic = &d->arch.hvm.vpic[!!(irq & 8)];
470 uint8_t mask = 1 << (irq & 7);
471
472 ASSERT(has_vpic(d));
473 ASSERT(irq <= 15);
474 ASSERT(vpic_is_locked(vpic));
475
476 TRACE_1D(TRC_HVM_EMUL_PIC_NEGEDGE, irq);
477 if ( irq == 2 )
478 return;
479
480 vpic->irr &= ~mask;
481 if ( !(vpic->imr & mask) )
482 vpic_update_int_output(vpic);
483 }
484
vpic_ack_pending_irq(struct vcpu * v)485 int vpic_ack_pending_irq(struct vcpu *v)
486 {
487 int irq;
488 struct hvm_hw_vpic *vpic = &v->domain->arch.hvm.vpic[0];
489
490 ASSERT(has_vpic(v->domain));
491
492 TRACE_2D(TRC_HVM_EMUL_PIC_PEND_IRQ_CALL, vlapic_accept_pic_intr(v),
493 vpic->int_output);
494 if ( !vlapic_accept_pic_intr(v) || !vpic->int_output )
495 return -1;
496
497 irq = vpic_intack(vpic);
498 if ( irq == -1 )
499 return -1;
500
501 return vpic[irq >> 3].irq_base + (irq & 7);
502 }
503
504 /*
505 * Local variables:
506 * mode: C
507 * c-file-style: "BSD"
508 * c-basic-offset: 4
509 * indent-tabs-mode: nil
510 * End:
511 */
512