1 /*
2 * Copyright (c) 2003-2007, Virtual Iron Software, Inc.
3 *
4 * Portions have been modified by Virtual Iron Software, Inc.
5 * (c) 2007. This file and the modifications can be redistributed and/or
6 * modified under the terms and conditions of the GNU General Public
7 * License, version 2.1 and not any later version of the GPL, as published
8 * by the Free Software Foundation.
9 *
10 * This improves the performance of Standard VGA,
11 * the mode used during Windows boot and by the Linux
12 * splash screen.
13 *
14 * It does so by buffering all the stdvga programmed output ops
15 * and memory mapped ops (both reads and writes) that are sent to QEMU.
16 *
17 * We maintain locally essential VGA state so we can respond
18 * immediately to input and read ops without waiting for
19 * QEMU. We snoop output and write ops to keep our state
20 * up-to-date.
21 *
22 * PIO input ops are satisfied from cached state without
23 * bothering QEMU.
24 *
25 * PIO output and mmio ops are passed through to QEMU, including
26 * mmio read ops. This is necessary because mmio reads
27 * can have side effects.
28 */
29
30 #include <xen/types.h>
31 #include <xen/sched.h>
32 #include <xen/domain_page.h>
33 #include <asm/hvm/ioreq.h>
34 #include <asm/hvm/support.h>
35 #include <xen/numa.h>
36 #include <xen/paging.h>
37
38 #define VGA_MEM_BASE 0xa0000
39 #define VGA_MEM_SIZE 0x20000
40
41 #define PAT(x) (x)
42 static const uint32_t mask16[16] = {
43 PAT(0x00000000),
44 PAT(0x000000ff),
45 PAT(0x0000ff00),
46 PAT(0x0000ffff),
47 PAT(0x00ff0000),
48 PAT(0x00ff00ff),
49 PAT(0x00ffff00),
50 PAT(0x00ffffff),
51 PAT(0xff000000),
52 PAT(0xff0000ff),
53 PAT(0xff00ff00),
54 PAT(0xff00ffff),
55 PAT(0xffff0000),
56 PAT(0xffff00ff),
57 PAT(0xffffff00),
58 PAT(0xffffffff),
59 };
60
61 /* force some bits to zero */
62 static const uint8_t sr_mask[8] = {
63 (uint8_t)~0xfc,
64 (uint8_t)~0xc2,
65 (uint8_t)~0xf0,
66 (uint8_t)~0xc0,
67 (uint8_t)~0xf1,
68 (uint8_t)~0xff,
69 (uint8_t)~0xff,
70 (uint8_t)~0x00,
71 };
72
73 static const uint8_t gr_mask[9] = {
74 (uint8_t)~0xf0, /* 0x00 */
75 (uint8_t)~0xf0, /* 0x01 */
76 (uint8_t)~0xf0, /* 0x02 */
77 (uint8_t)~0xe0, /* 0x03 */
78 (uint8_t)~0xfc, /* 0x04 */
79 (uint8_t)~0x84, /* 0x05 */
80 (uint8_t)~0xf0, /* 0x06 */
81 (uint8_t)~0xf0, /* 0x07 */
82 (uint8_t)~0x00, /* 0x08 */
83 };
84
vram_getb(struct hvm_hw_stdvga * s,unsigned int a)85 static uint8_t *vram_getb(struct hvm_hw_stdvga *s, unsigned int a)
86 {
87 struct page_info *pg = s->vram_page[(a >> 12) & 0x3f];
88 uint8_t *p = __map_domain_page(pg);
89 return &p[a & 0xfff];
90 }
91
vram_getl(struct hvm_hw_stdvga * s,unsigned int a)92 static uint32_t *vram_getl(struct hvm_hw_stdvga *s, unsigned int a)
93 {
94 struct page_info *pg = s->vram_page[(a >> 10) & 0x3f];
95 uint32_t *p = __map_domain_page(pg);
96 return &p[a & 0x3ff];
97 }
98
vram_put(struct hvm_hw_stdvga * s,void * p)99 static void vram_put(struct hvm_hw_stdvga *s, void *p)
100 {
101 unmap_domain_page(p);
102 }
103
stdvga_try_cache_enable(struct hvm_hw_stdvga * s)104 static void stdvga_try_cache_enable(struct hvm_hw_stdvga *s)
105 {
106 /*
107 * Caching mode can only be enabled if the the cache has
108 * never been used before. As soon as it is disabled, it will
109 * become out-of-sync with the VGA device model and since no
110 * mechanism exists to acquire current VRAM state from the
111 * device model, re-enabling it would lead to stale data being
112 * seen by the guest.
113 */
114 if ( s->cache != STDVGA_CACHE_UNINITIALIZED )
115 return;
116
117 gdprintk(XENLOG_INFO, "entering caching mode\n");
118 s->cache = STDVGA_CACHE_ENABLED;
119 }
120
stdvga_cache_disable(struct hvm_hw_stdvga * s)121 static void stdvga_cache_disable(struct hvm_hw_stdvga *s)
122 {
123 if ( s->cache != STDVGA_CACHE_ENABLED )
124 return;
125
126 gdprintk(XENLOG_INFO, "leaving caching mode\n");
127 s->cache = STDVGA_CACHE_DISABLED;
128 }
129
stdvga_cache_is_enabled(const struct hvm_hw_stdvga * s)130 static bool_t stdvga_cache_is_enabled(const struct hvm_hw_stdvga *s)
131 {
132 return s->cache == STDVGA_CACHE_ENABLED;
133 }
134
stdvga_outb(uint64_t addr,uint8_t val)135 static int stdvga_outb(uint64_t addr, uint8_t val)
136 {
137 struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
138 int rc = 1, prev_stdvga = s->stdvga;
139
140 switch ( addr )
141 {
142 case 0x3c4: /* sequencer address register */
143 s->sr_index = val;
144 break;
145
146 case 0x3c5: /* sequencer data register */
147 rc = (s->sr_index < sizeof(s->sr));
148 if ( rc )
149 s->sr[s->sr_index] = val & sr_mask[s->sr_index] ;
150 break;
151
152 case 0x3ce: /* graphics address register */
153 s->gr_index = val;
154 break;
155
156 case 0x3cf: /* graphics data register */
157 rc = (s->gr_index < sizeof(s->gr));
158 if ( rc )
159 s->gr[s->gr_index] = val & gr_mask[s->gr_index];
160 break;
161
162 default:
163 rc = 0;
164 break;
165 }
166
167 /* When in standard vga mode, emulate here all writes to the vram buffer
168 * so we can immediately satisfy reads without waiting for qemu. */
169 s->stdvga = (s->sr[7] == 0x00);
170
171 if ( !prev_stdvga && s->stdvga )
172 {
173 gdprintk(XENLOG_INFO, "entering stdvga mode\n");
174 stdvga_try_cache_enable(s);
175 }
176 else if ( prev_stdvga && !s->stdvga )
177 {
178 gdprintk(XENLOG_INFO, "leaving stdvga mode\n");
179 }
180
181 return rc;
182 }
183
stdvga_out(uint32_t port,uint32_t bytes,uint32_t val)184 static void stdvga_out(uint32_t port, uint32_t bytes, uint32_t val)
185 {
186 switch ( bytes )
187 {
188 case 1:
189 stdvga_outb(port, val);
190 break;
191
192 case 2:
193 stdvga_outb(port + 0, val >> 0);
194 stdvga_outb(port + 1, val >> 8);
195 break;
196
197 default:
198 break;
199 }
200 }
201
stdvga_intercept_pio(int dir,unsigned int port,unsigned int bytes,uint32_t * val)202 static int stdvga_intercept_pio(
203 int dir, unsigned int port, unsigned int bytes, uint32_t *val)
204 {
205 struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
206
207 if ( dir == IOREQ_WRITE )
208 {
209 spin_lock(&s->lock);
210 stdvga_out(port, bytes, *val);
211 spin_unlock(&s->lock);
212 }
213
214 return X86EMUL_UNHANDLEABLE; /* propagate to external ioemu */
215 }
216
stdvga_mem_offset(struct hvm_hw_stdvga * s,unsigned int mmio_addr)217 static unsigned int stdvga_mem_offset(
218 struct hvm_hw_stdvga *s, unsigned int mmio_addr)
219 {
220 unsigned int memory_map_mode = (s->gr[6] >> 2) & 3;
221 unsigned int offset = mmio_addr & 0x1ffff;
222
223 switch ( memory_map_mode )
224 {
225 case 0:
226 break;
227 case 1:
228 if ( offset >= 0x10000 )
229 goto fail;
230 offset += 0; /* assume bank_offset == 0; */
231 break;
232 case 2:
233 offset -= 0x10000;
234 if ( offset >= 0x8000 )
235 goto fail;
236 break;
237 default:
238 case 3:
239 offset -= 0x18000;
240 if ( offset >= 0x8000 )
241 goto fail;
242 break;
243 }
244
245 return offset;
246
247 fail:
248 return ~0u;
249 }
250
251 #define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff)
252
stdvga_mem_readb(uint64_t addr)253 static uint8_t stdvga_mem_readb(uint64_t addr)
254 {
255 struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
256 int plane;
257 uint32_t ret, *vram_l;
258 uint8_t *vram_b;
259
260 addr = stdvga_mem_offset(s, addr);
261 if ( addr == ~0u )
262 return 0xff;
263
264 if ( s->sr[4] & 0x08 )
265 {
266 /* chain 4 mode : simplest access */
267 vram_b = vram_getb(s, addr);
268 ret = *vram_b;
269 vram_put(s, vram_b);
270 }
271 else if ( s->gr[5] & 0x10 )
272 {
273 /* odd/even mode (aka text mode mapping) */
274 plane = (s->gr[4] & 2) | (addr & 1);
275 vram_b = vram_getb(s, ((addr & ~1) << 1) | plane);
276 ret = *vram_b;
277 vram_put(s, vram_b);
278 }
279 else
280 {
281 /* standard VGA latched access */
282 vram_l = vram_getl(s, addr);
283 s->latch = *vram_l;
284 vram_put(s, vram_l);
285
286 if ( !(s->gr[5] & 0x08) )
287 {
288 /* read mode 0 */
289 plane = s->gr[4];
290 ret = GET_PLANE(s->latch, plane);
291 }
292 else
293 {
294 /* read mode 1 */
295 ret = (s->latch ^ mask16[s->gr[2]]) & mask16[s->gr[7]];
296 ret |= ret >> 16;
297 ret |= ret >> 8;
298 ret = (~ret) & 0xff;
299 }
300 }
301
302 return ret;
303 }
304
stdvga_mem_read(const struct hvm_io_handler * handler,uint64_t addr,uint32_t size,uint64_t * p_data)305 static int stdvga_mem_read(const struct hvm_io_handler *handler,
306 uint64_t addr, uint32_t size, uint64_t *p_data)
307 {
308 uint64_t data = ~0ul;
309
310 switch ( size )
311 {
312 case 1:
313 data = stdvga_mem_readb(addr);
314 break;
315
316 case 2:
317 data = stdvga_mem_readb(addr);
318 data |= stdvga_mem_readb(addr + 1) << 8;
319 break;
320
321 case 4:
322 data = stdvga_mem_readb(addr);
323 data |= stdvga_mem_readb(addr + 1) << 8;
324 data |= stdvga_mem_readb(addr + 2) << 16;
325 data |= (uint32_t)stdvga_mem_readb(addr + 3) << 24;
326 break;
327
328 case 8:
329 data = (uint64_t)(stdvga_mem_readb(addr));
330 data |= (uint64_t)(stdvga_mem_readb(addr + 1)) << 8;
331 data |= (uint64_t)(stdvga_mem_readb(addr + 2)) << 16;
332 data |= (uint64_t)(stdvga_mem_readb(addr + 3)) << 24;
333 data |= (uint64_t)(stdvga_mem_readb(addr + 4)) << 32;
334 data |= (uint64_t)(stdvga_mem_readb(addr + 5)) << 40;
335 data |= (uint64_t)(stdvga_mem_readb(addr + 6)) << 48;
336 data |= (uint64_t)(stdvga_mem_readb(addr + 7)) << 56;
337 break;
338
339 default:
340 gdprintk(XENLOG_WARNING, "invalid io size: %u\n", size);
341 break;
342 }
343
344 *p_data = data;
345 return X86EMUL_OKAY;
346 }
347
stdvga_mem_writeb(uint64_t addr,uint32_t val)348 static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
349 {
350 struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
351 int plane, write_mode, b, func_select, mask;
352 uint32_t write_mask, bit_mask, set_mask, *vram_l;
353 uint8_t *vram_b;
354
355 addr = stdvga_mem_offset(s, addr);
356 if ( addr == ~0u )
357 return;
358
359 if ( s->sr[4] & 0x08 )
360 {
361 /* chain 4 mode : simplest access */
362 plane = addr & 3;
363 mask = (1 << plane);
364 if ( s->sr[2] & mask )
365 {
366 vram_b = vram_getb(s, addr);
367 *vram_b = val;
368 vram_put(s, vram_b);
369 }
370 }
371 else if ( s->gr[5] & 0x10 )
372 {
373 /* odd/even mode (aka text mode mapping) */
374 plane = (s->gr[4] & 2) | (addr & 1);
375 mask = (1 << plane);
376 if ( s->sr[2] & mask )
377 {
378 addr = ((addr & ~1) << 1) | plane;
379 vram_b = vram_getb(s, addr);
380 *vram_b = val;
381 vram_put(s, vram_b);
382 }
383 }
384 else
385 {
386 write_mode = s->gr[5] & 3;
387 switch ( write_mode )
388 {
389 default:
390 case 0:
391 /* rotate */
392 b = s->gr[3] & 7;
393 val = ((val >> b) | (val << (8 - b))) & 0xff;
394 val |= val << 8;
395 val |= val << 16;
396
397 /* apply set/reset mask */
398 set_mask = mask16[s->gr[1]];
399 val = (val & ~set_mask) | (mask16[s->gr[0]] & set_mask);
400 bit_mask = s->gr[8];
401 break;
402 case 1:
403 val = s->latch;
404 goto do_write;
405 case 2:
406 val = mask16[val & 0x0f];
407 bit_mask = s->gr[8];
408 break;
409 case 3:
410 /* rotate */
411 b = s->gr[3] & 7;
412 val = (val >> b) | (val << (8 - b));
413
414 bit_mask = s->gr[8] & val;
415 val = mask16[s->gr[0]];
416 break;
417 }
418
419 /* apply logical operation */
420 func_select = s->gr[3] >> 3;
421 switch ( func_select )
422 {
423 case 0:
424 default:
425 /* nothing to do */
426 break;
427 case 1:
428 /* and */
429 val &= s->latch;
430 break;
431 case 2:
432 /* or */
433 val |= s->latch;
434 break;
435 case 3:
436 /* xor */
437 val ^= s->latch;
438 break;
439 }
440
441 /* apply bit mask */
442 bit_mask |= bit_mask << 8;
443 bit_mask |= bit_mask << 16;
444 val = (val & bit_mask) | (s->latch & ~bit_mask);
445
446 do_write:
447 /* mask data according to sr[2] */
448 mask = s->sr[2];
449 write_mask = mask16[mask];
450 vram_l = vram_getl(s, addr);
451 *vram_l = (*vram_l & ~write_mask) | (val & write_mask);
452 vram_put(s, vram_l);
453 }
454 }
455
stdvga_mem_write(const struct hvm_io_handler * handler,uint64_t addr,uint32_t size,uint64_t data)456 static int stdvga_mem_write(const struct hvm_io_handler *handler,
457 uint64_t addr, uint32_t size,
458 uint64_t data)
459 {
460 struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
461 ioreq_t p = {
462 .type = IOREQ_TYPE_COPY,
463 .addr = addr,
464 .size = size,
465 .count = 1,
466 .dir = IOREQ_WRITE,
467 .data = data,
468 };
469 struct hvm_ioreq_server *srv;
470
471 if ( !stdvga_cache_is_enabled(s) || !s->stdvga )
472 goto done;
473
474 /* Intercept mmio write */
475 switch ( size )
476 {
477 case 1:
478 stdvga_mem_writeb(addr, (data >> 0) & 0xff);
479 break;
480
481 case 2:
482 stdvga_mem_writeb(addr+0, (data >> 0) & 0xff);
483 stdvga_mem_writeb(addr+1, (data >> 8) & 0xff);
484 break;
485
486 case 4:
487 stdvga_mem_writeb(addr+0, (data >> 0) & 0xff);
488 stdvga_mem_writeb(addr+1, (data >> 8) & 0xff);
489 stdvga_mem_writeb(addr+2, (data >> 16) & 0xff);
490 stdvga_mem_writeb(addr+3, (data >> 24) & 0xff);
491 break;
492
493 case 8:
494 stdvga_mem_writeb(addr+0, (data >> 0) & 0xff);
495 stdvga_mem_writeb(addr+1, (data >> 8) & 0xff);
496 stdvga_mem_writeb(addr+2, (data >> 16) & 0xff);
497 stdvga_mem_writeb(addr+3, (data >> 24) & 0xff);
498 stdvga_mem_writeb(addr+4, (data >> 32) & 0xff);
499 stdvga_mem_writeb(addr+5, (data >> 40) & 0xff);
500 stdvga_mem_writeb(addr+6, (data >> 48) & 0xff);
501 stdvga_mem_writeb(addr+7, (data >> 56) & 0xff);
502 break;
503
504 default:
505 gdprintk(XENLOG_WARNING, "invalid io size: %u\n", size);
506 break;
507 }
508
509 done:
510 srv = hvm_select_ioreq_server(current->domain, &p);
511 if ( !srv )
512 return X86EMUL_UNHANDLEABLE;
513
514 return hvm_send_ioreq(srv, &p, 1);
515 }
516
stdvga_mem_accept(const struct hvm_io_handler * handler,const ioreq_t * p)517 static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
518 const ioreq_t *p)
519 {
520 struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
521
522 /*
523 * The range check must be done without taking the lock, to avoid
524 * deadlock when hvm_mmio_internal() is called from
525 * hvm_copy_to/from_guest_phys() in hvm_process_io_intercept().
526 */
527 if ( (hvm_mmio_first_byte(p) < VGA_MEM_BASE) ||
528 (hvm_mmio_last_byte(p) >= (VGA_MEM_BASE + VGA_MEM_SIZE)) )
529 return 0;
530
531 spin_lock(&s->lock);
532
533 if ( p->dir == IOREQ_WRITE && p->count > 1 )
534 {
535 /*
536 * We cannot return X86EMUL_UNHANDLEABLE on anything other then the
537 * first cycle of an I/O. So, since we cannot guarantee to always be
538 * able to send buffered writes, we have to reject any multi-cycle
539 * I/O and, since we are rejecting an I/O, we must invalidate the
540 * cache.
541 * Single-cycle write transactions are accepted even if the cache is
542 * not active since we can assert, when in stdvga mode, that writes
543 * to VRAM have no side effect and thus we can try to buffer them.
544 */
545 stdvga_cache_disable(s);
546
547 goto reject;
548 }
549 else if ( p->dir == IOREQ_READ &&
550 (!stdvga_cache_is_enabled(s) || !s->stdvga) )
551 goto reject;
552
553 /* s->lock intentionally held */
554 return 1;
555
556 reject:
557 spin_unlock(&s->lock);
558 return 0;
559 }
560
stdvga_mem_complete(const struct hvm_io_handler * handler)561 static void stdvga_mem_complete(const struct hvm_io_handler *handler)
562 {
563 struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
564
565 spin_unlock(&s->lock);
566 }
567
568 static const struct hvm_io_ops stdvga_mem_ops = {
569 .accept = stdvga_mem_accept,
570 .read = stdvga_mem_read,
571 .write = stdvga_mem_write,
572 .complete = stdvga_mem_complete
573 };
574
stdvga_init(struct domain * d)575 void stdvga_init(struct domain *d)
576 {
577 struct hvm_hw_stdvga *s = &d->arch.hvm.stdvga;
578 struct page_info *pg;
579 unsigned int i;
580
581 if ( !has_vvga(d) )
582 return;
583
584 memset(s, 0, sizeof(*s));
585 spin_lock_init(&s->lock);
586
587 for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
588 {
589 pg = alloc_domheap_page(d, MEMF_no_owner);
590 if ( pg == NULL )
591 break;
592 s->vram_page[i] = pg;
593 clear_domain_page(page_to_mfn(pg));
594 }
595
596 if ( i == ARRAY_SIZE(s->vram_page) )
597 {
598 struct hvm_io_handler *handler;
599
600 /* Sequencer registers. */
601 register_portio_handler(d, 0x3c4, 2, stdvga_intercept_pio);
602 /* Graphics registers. */
603 register_portio_handler(d, 0x3ce, 2, stdvga_intercept_pio);
604
605 /* VGA memory */
606 handler = hvm_next_io_handler(d);
607
608 if ( handler == NULL )
609 return;
610
611 handler->type = IOREQ_TYPE_COPY;
612 handler->ops = &stdvga_mem_ops;
613 }
614 }
615
stdvga_deinit(struct domain * d)616 void stdvga_deinit(struct domain *d)
617 {
618 struct hvm_hw_stdvga *s = &d->arch.hvm.stdvga;
619 int i;
620
621 if ( !has_vvga(d) )
622 return;
623
624 for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
625 {
626 if ( s->vram_page[i] == NULL )
627 continue;
628 free_domheap_page(s->vram_page[i]);
629 s->vram_page[i] = NULL;
630 }
631 }
632
633 /*
634 * Local variables:
635 * mode: C
636 * c-file-style: "BSD"
637 * c-basic-offset: 4
638 * tab-width: 4
639 * indent-tabs-mode: nil
640 * End:
641 */
642